LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140
141 // Move everything after this operand down.
142 //
143 // FIXME: we could just swap with the end of the list, then erase. However,
144 // clients might not expect this to happen. The code as it is thrashes the
145 // use/def lists, which is kinda lame.
146 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
147 copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
148
149 // Nuke the last value.
150 Op<-1>().set(nullptr);
152
153 // If the PHI node is dead, because it has zero entries, nuke it now.
154 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
155 // If anyone is using this PHI, make them use a dummy value instead...
158 }
159 return Removed;
160}
161
162void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
163 bool DeletePHIIfEmpty) {
164 SmallDenseSet<unsigned> RemoveIndices;
165 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
166 if (Predicate(Idx))
167 RemoveIndices.insert(Idx);
168
169 if (RemoveIndices.empty())
170 return;
171
172 // Remove operands.
173 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
174 return RemoveIndices.contains(U.getOperandNo());
175 });
176 for (Use &U : make_range(NewOpEnd, op_end()))
177 U.set(nullptr);
178
179 // Remove incoming blocks.
180 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
181 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
182 return RemoveIndices.contains(&BB - block_begin());
183 });
184
185 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
186
187 // If the PHI node is dead, because it has zero entries, nuke it now.
188 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
189 // If anyone is using this PHI, make them use a dummy value instead...
192 }
193}
194
195/// growOperands - grow operands - This grows the operand list in response
196/// to a push_back style of operation. This grows the number of ops by 1.5
197/// times.
198///
199void PHINode::growOperands() {
200 unsigned e = getNumOperands();
201 unsigned NumOps = e + e / 2;
202 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
203
204 ReservedSpace = NumOps;
205 growHungoffUses(ReservedSpace, /* IsPhi */ true);
206}
207
208/// hasConstantValue - If the specified PHI node always merges together the same
209/// value, return the value, otherwise return null.
211 // Exploit the fact that phi nodes always have at least one entry.
212 Value *ConstantValue = getIncomingValue(0);
213 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
214 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
215 if (ConstantValue != this)
216 return nullptr; // Incoming values not all the same.
217 // The case where the first value is this PHI.
218 ConstantValue = getIncomingValue(i);
219 }
220 if (ConstantValue == this)
221 return PoisonValue::get(getType());
222 return ConstantValue;
223}
224
225/// hasConstantOrUndefValue - Whether the specified PHI node always merges
226/// together the same value, assuming that undefs result in the same value as
227/// non-undefs.
228/// Unlike \ref hasConstantValue, this does not return a value because the
229/// unique non-undef incoming value need not dominate the PHI node.
231 Value *ConstantValue = nullptr;
232 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
234 if (Incoming != this && !isa<UndefValue>(Incoming)) {
235 if (ConstantValue && ConstantValue != Incoming)
236 return false;
237 ConstantValue = Incoming;
238 }
239 }
240 return true;
241}
242
243//===----------------------------------------------------------------------===//
244// LandingPadInst Implementation
245//===----------------------------------------------------------------------===//
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr,
249 InsertPosition InsertBefore)
250 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
251 init(NumReservedValues, NameStr);
252}
253
254LandingPadInst::LandingPadInst(const LandingPadInst &LP)
255 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
256 ReservedSpace(LP.getNumOperands()) {
259 Use *OL = getOperandList();
260 const Use *InOL = LP.getOperandList();
261 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
262 OL[I] = InOL[I];
263
264 setCleanup(LP.isCleanup());
265}
266
267LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
268 const Twine &NameStr,
269 InsertPosition InsertBefore) {
270 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
271}
272
273void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
274 ReservedSpace = NumReservedValues;
276 allocHungoffUses(ReservedSpace);
277 setName(NameStr);
278 setCleanup(false);
279}
280
281/// growOperands - grow operands - This grows the operand list in response to a
282/// push_back style of operation. This grows the number of ops by 2 times.
283void LandingPadInst::growOperands(unsigned Size) {
284 unsigned e = getNumOperands();
285 if (ReservedSpace >= e + Size) return;
286 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
287 growHungoffUses(ReservedSpace);
288}
289
291 unsigned OpNo = getNumOperands();
292 growOperands(1);
293 assert(OpNo < ReservedSpace && "Growing didn't work!");
295 getOperandList()[OpNo] = Val;
296}
297
298//===----------------------------------------------------------------------===//
299// CallBase Implementation
300//===----------------------------------------------------------------------===//
301
303 InsertPosition InsertPt) {
304 switch (CB->getOpcode()) {
305 case Instruction::Call:
306 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
307 case Instruction::Invoke:
308 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
309 case Instruction::CallBr:
310 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
311 default:
312 llvm_unreachable("Unknown CallBase sub-class!");
313 }
314}
315
317 InsertPosition InsertPt) {
319 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
320 auto ChildOB = CI->getOperandBundleAt(i);
321 if (ChildOB.getTagName() != OpB.getTag())
322 OpDefs.emplace_back(ChildOB);
323 }
324 OpDefs.emplace_back(OpB);
325 return CallBase::Create(CI, OpDefs, InsertPt);
326}
327
329
331 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
332 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
333}
334
336 const Value *V = getCalledOperand();
337 if (isa<Function>(V) || isa<Constant>(V))
338 return false;
339 return !isInlineAsm();
340}
341
342/// Tests if this call site must be tail call optimized. Only a CallInst can
343/// be tail call optimized.
345 if (auto *CI = dyn_cast<CallInst>(this))
346 return CI->isMustTailCall();
347 return false;
348}
349
350/// Tests if this call site is marked as a tail call.
352 if (auto *CI = dyn_cast<CallInst>(this))
353 return CI->isTailCall();
354 return false;
355}
356
359 return F->getIntrinsicID();
361}
362
364 FPClassTest Mask = Attrs.getRetNoFPClass();
365
366 if (const Function *F = getCalledFunction())
367 Mask |= F->getAttributes().getRetNoFPClass();
368 return Mask;
369}
370
372 FPClassTest Mask = Attrs.getParamNoFPClass(i);
373
374 if (const Function *F = getCalledFunction())
375 Mask |= F->getAttributes().getParamNoFPClass(i);
376 return Mask;
377}
378
379std::optional<ConstantRange> CallBase::getRange() const {
380 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
382 if (const Function *F = getCalledFunction())
383 FnAttr = F->getRetAttribute(Attribute::Range);
384
385 if (CallAttr.isValid() && FnAttr.isValid())
386 return CallAttr.getRange().intersectWith(FnAttr.getRange());
387 if (CallAttr.isValid())
388 return CallAttr.getRange();
389 if (FnAttr.isValid())
390 return FnAttr.getRange();
391 return std::nullopt;
392}
393
395 if (hasRetAttr(Attribute::NonNull))
396 return true;
397
398 if (getRetDereferenceableBytes() > 0 &&
400 return true;
401
402 return false;
403}
404
406 unsigned Index;
407
408 if (Attrs.hasAttrSomewhere(Kind, &Index))
409 return getArgOperand(Index - AttributeList::FirstArgIndex);
410 if (const Function *F = getCalledFunction())
411 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
412 return getArgOperand(Index - AttributeList::FirstArgIndex);
413
414 return nullptr;
415}
416
417/// Determine whether the argument or parameter has the given attribute.
418bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
419 assert(ArgNo < arg_size() && "Param index out of bounds!");
420
421 if (Attrs.hasParamAttr(ArgNo, Kind))
422 return true;
423
424 const Function *F = getCalledFunction();
425 if (!F)
426 return false;
427
428 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
429 return false;
430
431 // Take into account mod/ref by operand bundles.
432 switch (Kind) {
433 case Attribute::ReadNone:
435 case Attribute::ReadOnly:
437 case Attribute::WriteOnly:
438 return !hasReadingOperandBundles();
439 default:
440 return true;
441 }
442}
443
445 bool AllowUndefOrPoison) const {
447 "Argument must be a pointer");
448 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
449 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
450 return true;
451
452 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
454 getCaller(),
456 return true;
457
458 return false;
459}
460
461bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
470 return F->getAttributes().hasFnAttr(Kind);
471
472 return false;
473}
474
475template <typename AK>
476Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
477 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
478 // getMemoryEffects() correctly combines memory effects from the call-site,
479 // operand bundles and function.
480 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
481 }
482
484 return F->getAttributes().getFnAttr(Kind);
485
486 return Attribute();
487}
488
489template LLVM_ABI Attribute
490CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
491template LLVM_ABI Attribute
492CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
493
494template <typename AK>
495Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496 AK Kind) const {
498
499 if (auto *F = dyn_cast<Function>(V))
500 return F->getAttributes().getParamAttr(ArgNo, Kind);
501
502 return Attribute();
503}
504template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
505 unsigned ArgNo, Attribute::AttrKind Kind) const;
506template LLVM_ABI Attribute
507CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
508
511 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
513}
514
517 const unsigned BeginIndex) {
518 auto It = op_begin() + BeginIndex;
519 for (auto &B : Bundles)
520 It = std::copy(B.input_begin(), B.input_end(), It);
521
522 auto *ContextImpl = getContext().pImpl;
523 auto BI = Bundles.begin();
524 unsigned CurrentIndex = BeginIndex;
525
526 for (auto &BOI : bundle_op_infos()) {
527 assert(BI != Bundles.end() && "Incorrect allocation?");
528
529 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
530 BOI.Begin = CurrentIndex;
531 BOI.End = CurrentIndex + BI->input_size();
532 CurrentIndex = BOI.End;
533 BI++;
534 }
535
536 assert(BI == Bundles.end() && "Incorrect allocation?");
537
538 return It;
539}
540
542 /// When there isn't many bundles, we do a simple linear search.
543 /// Else fallback to a binary-search that use the fact that bundles usually
544 /// have similar number of argument to get faster convergence.
546 for (auto &BOI : bundle_op_infos())
547 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
548 return BOI;
549
550 llvm_unreachable("Did not find operand bundle for operand!");
551 }
552
553 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
555 OpIdx < std::prev(bundle_op_info_end())->End &&
556 "The Idx isn't in the operand bundle");
557
558 /// We need a decimal number below and to prevent using floating point numbers
559 /// we use an intergal value multiplied by this constant.
560 constexpr unsigned NumberScaling = 1024;
561
564 bundle_op_iterator Current = Begin;
565
566 while (Begin != End) {
567 unsigned ScaledOperandPerBundle =
568 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
569 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
570 ScaledOperandPerBundle);
571 if (Current >= End)
572 Current = std::prev(End);
573 assert(Current < End && Current >= Begin &&
574 "the operand bundle doesn't cover every value in the range");
575 if (OpIdx >= Current->Begin && OpIdx < Current->End)
576 break;
577 if (OpIdx >= Current->End)
578 Begin = Current + 1;
579 else
580 End = Current;
581 }
582
583 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
584 "the operand bundle doesn't cover every value in the range");
585 return *Current;
586}
587
590 InsertPosition InsertPt) {
591 if (CB->getOperandBundle(ID))
592 return CB;
593
595 CB->getOperandBundlesAsDefs(Bundles);
596 Bundles.push_back(OB);
597 return Create(CB, Bundles, InsertPt);
598}
599
601 InsertPosition InsertPt) {
603 bool CreateNew = false;
604
605 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
606 auto Bundle = CB->getOperandBundleAt(I);
607 if (Bundle.getTagID() == ID) {
608 CreateNew = true;
609 continue;
610 }
611 Bundles.emplace_back(Bundle);
612 }
613
614 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
615}
616
618 // Implementation note: this is a conservative implementation of operand
619 // bundle semantics, where *any* non-assume operand bundle (other than
620 // ptrauth) forces a callsite to be at least readonly.
625 getIntrinsicID() != Intrinsic::assume;
626}
627
636
638 MemoryEffects ME = getAttributes().getMemoryEffects();
639 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
640 MemoryEffects FnME = Fn->getMemoryEffects();
641 if (hasOperandBundles()) {
642 // TODO: Add a method to get memory effects for operand bundles instead.
644 FnME |= MemoryEffects::readOnly();
646 FnME |= MemoryEffects::writeOnly();
647 }
648 if (isVolatile()) {
649 // Volatile operations also access inaccessible memory.
651 }
652 ME &= FnME;
653 }
654 return ME;
655}
659
660/// Determine if the function does not access memory.
667
668/// Determine if the function does not access or only reads memory.
675
676/// Determine if the function does not access or only writes memory.
683
684/// Determine if the call can access memmory only using pointers based
685/// on its arguments.
692
693/// Determine if the function may only access memory that is
694/// inaccessible from the IR.
701
702/// Determine if the function may only access memory that is
703/// either inaccessible from the IR or pointed to by its arguments.
711
713 if (OpNo < arg_size()) {
714 // If the argument is passed byval, the callee does not have access to the
715 // original pointer and thus cannot capture it.
716 if (isByValArgument(OpNo))
717 return CaptureInfo::none();
718
720 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
721 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
722 return CI;
723 }
724
725 // Bundles on assumes are captures(none).
726 if (getIntrinsicID() == Intrinsic::assume)
727 return CaptureInfo::none();
728
729 // deopt operand bundles are captures(none)
730 auto &BOI = getBundleOpInfoForOperand(OpNo);
731 auto OBU = operandBundleFromBundleOpInfo(BOI);
732 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
733}
734
736 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
738 continue;
739
741 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
742 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
744 return true;
745 }
746 return false;
747}
748
749//===----------------------------------------------------------------------===//
750// CallInst Implementation
751//===----------------------------------------------------------------------===//
752
753void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
754 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
755 this->FTy = FTy;
756 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
757 "NumOperands not set up?");
758
759#ifndef NDEBUG
760 assert((Args.size() == FTy->getNumParams() ||
761 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
762 "Calling a function with bad signature!");
763
764 for (unsigned i = 0; i != Args.size(); ++i)
765 assert((i >= FTy->getNumParams() ||
766 FTy->getParamType(i) == Args[i]->getType()) &&
767 "Calling a function with a bad signature!");
768#endif
769
770 // Set operands in order of their index to match use-list-order
771 // prediction.
772 llvm::copy(Args, op_begin());
773 setCalledOperand(Func);
774
775 auto It = populateBundleOperandInfos(Bundles, Args.size());
776 (void)It;
777 assert(It + 1 == op_end() && "Should add up!");
778
779 setName(NameStr);
780}
781
782void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
783 this->FTy = FTy;
784 assert(getNumOperands() == 1 && "NumOperands not set up?");
785 setCalledOperand(Func);
786
787 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
788
789 setName(NameStr);
790}
791
792CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
793 AllocInfo AllocInfo, InsertPosition InsertBefore)
794 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
795 InsertBefore) {
796 init(Ty, Func, Name);
797}
798
799CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
800 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
802 "Wrong number of operands allocated");
803 setTailCallKind(CI.getTailCallKind());
805
806 std::copy(CI.op_begin(), CI.op_end(), op_begin());
807 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
810}
811
813 InsertPosition InsertPt) {
814 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
815
816 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
817 Args, OpB, CI->getName(), InsertPt);
818 NewCI->setTailCallKind(CI->getTailCallKind());
819 NewCI->setCallingConv(CI->getCallingConv());
820 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
821 NewCI->setAttributes(CI->getAttributes());
822 NewCI->setDebugLoc(CI->getDebugLoc());
823 return NewCI;
824}
825
826// Update profile weight for call instruction by scaling it using the ratio
827// of S/T. The meaning of "branch_weights" meta data for call instruction is
828// transfered to represent call count.
830 if (T == 0) {
831 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
832 "div by 0. Ignoring. Likely the function "
833 << getParent()->getParent()->getName()
834 << " has 0 entry count, and contains call instructions "
835 "with non-zero prof info.");
836 return;
837 }
838 scaleProfData(*this, S, T);
839}
840
841//===----------------------------------------------------------------------===//
842// InvokeInst Implementation
843//===----------------------------------------------------------------------===//
844
845void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
846 BasicBlock *IfException, ArrayRef<Value *> Args,
848 const Twine &NameStr) {
849 this->FTy = FTy;
850
852 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
853 "NumOperands not set up?");
854
855#ifndef NDEBUG
856 assert(((Args.size() == FTy->getNumParams()) ||
857 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
858 "Invoking a function with bad signature");
859
860 for (unsigned i = 0, e = Args.size(); i != e; i++)
861 assert((i >= FTy->getNumParams() ||
862 FTy->getParamType(i) == Args[i]->getType()) &&
863 "Invoking a function with a bad signature!");
864#endif
865
866 // Set operands in order of their index to match use-list-order
867 // prediction.
868 llvm::copy(Args, op_begin());
869 setNormalDest(IfNormal);
870 setUnwindDest(IfException);
872
873 auto It = populateBundleOperandInfos(Bundles, Args.size());
874 (void)It;
875 assert(It + 3 == op_end() && "Should add up!");
876
877 setName(NameStr);
878}
879
880InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
881 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
882 assert(getNumOperands() == II.getNumOperands() &&
883 "Wrong number of operands allocated");
884 setCallingConv(II.getCallingConv());
885 std::copy(II.op_begin(), II.op_end(), op_begin());
886 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
888 SubclassOptionalData = II.SubclassOptionalData;
889}
890
892 InsertPosition InsertPt) {
893 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
894
895 auto *NewII = InvokeInst::Create(
896 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
897 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
898 NewII->setCallingConv(II->getCallingConv());
899 NewII->SubclassOptionalData = II->SubclassOptionalData;
900 NewII->setAttributes(II->getAttributes());
901 NewII->setDebugLoc(II->getDebugLoc());
902 return NewII;
903}
904
906 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
907}
908
910 if (T == 0) {
911 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
912 "div by 0. Ignoring. Likely the function "
913 << getParent()->getParent()->getName()
914 << " has 0 entry count, and contains call instructions "
915 "with non-zero prof info.");
916 return;
917 }
918 scaleProfData(*this, S, T);
919}
920
921//===----------------------------------------------------------------------===//
922// CallBrInst Implementation
923//===----------------------------------------------------------------------===//
924
925void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
926 ArrayRef<BasicBlock *> IndirectDests,
929 const Twine &NameStr) {
930 this->FTy = FTy;
931
932 assert(getNumOperands() == ComputeNumOperands(Args.size(),
933 IndirectDests.size(),
934 CountBundleInputs(Bundles)) &&
935 "NumOperands not set up?");
936
937#ifndef NDEBUG
938 assert(((Args.size() == FTy->getNumParams()) ||
939 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
940 "Calling a function with bad signature");
941
942 for (unsigned i = 0, e = Args.size(); i != e; i++)
943 assert((i >= FTy->getNumParams() ||
944 FTy->getParamType(i) == Args[i]->getType()) &&
945 "Calling a function with a bad signature!");
946#endif
947
948 // Set operands in order of their index to match use-list-order
949 // prediction.
950 llvm::copy(Args, op_begin());
951 NumIndirectDests = IndirectDests.size();
952 setDefaultDest(Fallthrough);
953 for (unsigned i = 0; i != NumIndirectDests; ++i)
954 setIndirectDest(i, IndirectDests[i]);
956
957 auto It = populateBundleOperandInfos(Bundles, Args.size());
958 (void)It;
959 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
960
961 setName(NameStr);
962}
963
964CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
965 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
966 AllocInfo) {
968 "Wrong number of operands allocated");
970 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
971 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
974 NumIndirectDests = CBI.NumIndirectDests;
975}
976
977CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
978 InsertPosition InsertPt) {
979 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
980
981 auto *NewCBI = CallBrInst::Create(
982 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
983 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
984 NewCBI->setCallingConv(CBI->getCallingConv());
985 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
986 NewCBI->setAttributes(CBI->getAttributes());
987 NewCBI->setDebugLoc(CBI->getDebugLoc());
988 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
989 return NewCBI;
990}
991
992//===----------------------------------------------------------------------===//
993// ReturnInst Implementation
994//===----------------------------------------------------------------------===//
995
996ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
997 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
998 AllocInfo) {
1000 "Wrong number of operands allocated");
1001 if (RI.getNumOperands())
1002 Op<0>() = RI.Op<0>();
1004}
1005
1006ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1007 InsertPosition InsertBefore)
1008 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1009 InsertBefore) {
1010 if (retVal)
1011 Op<0>() = retVal;
1012}
1013
1014//===----------------------------------------------------------------------===//
1015// ResumeInst Implementation
1016//===----------------------------------------------------------------------===//
1017
1018ResumeInst::ResumeInst(const ResumeInst &RI)
1019 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1020 AllocMarker) {
1021 Op<0>() = RI.Op<0>();
1022}
1023
1024ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1025 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1026 AllocMarker, InsertBefore) {
1027 Op<0>() = Exn;
1028}
1029
1030//===----------------------------------------------------------------------===//
1031// CleanupReturnInst Implementation
1032//===----------------------------------------------------------------------===//
1033
1034CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1036 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1038 "Wrong number of operands allocated");
1039 setSubclassData<Instruction::OpaqueField>(
1041 Op<0>() = CRI.Op<0>();
1042 if (CRI.hasUnwindDest())
1043 Op<1>() = CRI.Op<1>();
1044}
1045
1046void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1047 if (UnwindBB)
1048 setSubclassData<UnwindDestField>(true);
1049
1050 Op<0>() = CleanupPad;
1051 if (UnwindBB)
1052 Op<1>() = UnwindBB;
1053}
1054
1055CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1057 InsertPosition InsertBefore)
1058 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1059 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1060 init(CleanupPad, UnwindBB);
1061}
1062
1063//===----------------------------------------------------------------------===//
1064// CatchReturnInst Implementation
1065//===----------------------------------------------------------------------===//
1066void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1067 Op<0>() = CatchPad;
1068 Op<1>() = BB;
1069}
1070
1071CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1072 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1073 AllocMarker) {
1074 Op<0>() = CRI.Op<0>();
1075 Op<1>() = CRI.Op<1>();
1076}
1077
1078CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1079 InsertPosition InsertBefore)
1080 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1081 AllocMarker, InsertBefore) {
1082 init(CatchPad, BB);
1083}
1084
1085//===----------------------------------------------------------------------===//
1086// CatchSwitchInst Implementation
1087//===----------------------------------------------------------------------===//
1088
1089CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1090 unsigned NumReservedValues,
1091 const Twine &NameStr,
1092 InsertPosition InsertBefore)
1093 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1094 InsertBefore) {
1095 if (UnwindDest)
1096 ++NumReservedValues;
1097 init(ParentPad, UnwindDest, NumReservedValues + 1);
1098 setName(NameStr);
1099}
1100
1101CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1102 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1104 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1105 setNumHungOffUseOperands(ReservedSpace);
1106 Use *OL = getOperandList();
1107 const Use *InOL = CSI.getOperandList();
1108 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1109 OL[I] = InOL[I];
1110}
1111
1112void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1113 unsigned NumReservedValues) {
1114 assert(ParentPad && NumReservedValues);
1115
1116 ReservedSpace = NumReservedValues;
1117 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1118 allocHungoffUses(ReservedSpace);
1119
1120 Op<0>() = ParentPad;
1121 if (UnwindDest) {
1123 setUnwindDest(UnwindDest);
1124 }
1125}
1126
1127/// growOperands - grow operands - This grows the operand list in response to a
1128/// push_back style of operation. This grows the number of ops by 2 times.
1129void CatchSwitchInst::growOperands(unsigned Size) {
1130 unsigned NumOperands = getNumOperands();
1131 assert(NumOperands >= 1);
1132 if (ReservedSpace >= NumOperands + Size)
1133 return;
1134 ReservedSpace = (NumOperands + Size / 2) * 2;
1135 growHungoffUses(ReservedSpace);
1136}
1137
1139 unsigned OpNo = getNumOperands();
1140 growOperands(1);
1141 assert(OpNo < ReservedSpace && "Growing didn't work!");
1143 getOperandList()[OpNo] = Handler;
1144}
1145
1147 // Move all subsequent handlers up one.
1148 Use *EndDst = op_end() - 1;
1149 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1150 *CurDst = *(CurDst + 1);
1151 // Null out the last handler use.
1152 *EndDst = nullptr;
1153
1155}
1156
1157//===----------------------------------------------------------------------===//
1158// FuncletPadInst Implementation
1159//===----------------------------------------------------------------------===//
1160void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1161 const Twine &NameStr) {
1162 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1163 llvm::copy(Args, op_begin());
1164 setParentPad(ParentPad);
1165 setName(NameStr);
1166}
1167
1168FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1169 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1171 "Wrong number of operands allocated");
1172 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1174}
1175
1176FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1178 const Twine &NameStr,
1179 InsertPosition InsertBefore)
1180 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1181 init(ParentPad, Args, NameStr);
1182}
1183
1184//===----------------------------------------------------------------------===//
1185// UnreachableInst Implementation
1186//===----------------------------------------------------------------------===//
1187
1189 InsertPosition InsertBefore)
1190 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1191 AllocMarker, InsertBefore) {}
1192
1193//===----------------------------------------------------------------------===//
1194// BranchInst Implementation
1195//===----------------------------------------------------------------------===//
1196
1197void BranchInst::AssertOK() {
1198 if (isConditional())
1199 assert(getCondition()->getType()->isIntegerTy(1) &&
1200 "May only branch on boolean predicates!");
1201}
1202
1203BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1204 InsertPosition InsertBefore)
1205 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1206 AllocInfo, InsertBefore) {
1207 assert(IfTrue && "Branch destination may not be null!");
1208 Op<-1>() = IfTrue;
1209}
1210
1211BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1212 AllocInfo AllocInfo, InsertPosition InsertBefore)
1213 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1214 AllocInfo, InsertBefore) {
1215 // Assign in order of operand index to make use-list order predictable.
1216 Op<-3>() = Cond;
1217 Op<-2>() = IfFalse;
1218 Op<-1>() = IfTrue;
1219#ifndef NDEBUG
1220 AssertOK();
1221#endif
1222}
1223
1224BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1225 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1226 AllocInfo) {
1228 "Wrong number of operands allocated");
1229 // Assign in order of operand index to make use-list order predictable.
1230 if (BI.getNumOperands() != 1) {
1231 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1232 Op<-3>() = BI.Op<-3>();
1233 Op<-2>() = BI.Op<-2>();
1234 }
1235 Op<-1>() = BI.Op<-1>();
1237}
1238
1241 "Cannot swap successors of an unconditional branch");
1242 Op<-1>().swap(Op<-2>());
1243
1244 // Update profile metadata if present and it matches our structural
1245 // expectations.
1247}
1248
1249//===----------------------------------------------------------------------===//
1250// AllocaInst Implementation
1251//===----------------------------------------------------------------------===//
1252
1253static Value *getAISize(LLVMContext &Context, Value *Amt) {
1254 if (!Amt)
1255 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1256 else {
1257 assert(!isa<BasicBlock>(Amt) &&
1258 "Passed basic block into allocation size parameter! Use other ctor");
1259 assert(Amt->getType()->isIntegerTy() &&
1260 "Allocation array size is not an integer!");
1261 }
1262 return Amt;
1263}
1264
1266 assert(Pos.isValid() &&
1267 "Insertion position cannot be null when alignment not provided!");
1268 BasicBlock *BB = Pos.getBasicBlock();
1269 assert(BB->getParent() &&
1270 "BB must be in a Function when alignment not provided!");
1271 const DataLayout &DL = BB->getDataLayout();
1272 return DL.getPrefTypeAlign(Ty);
1273}
1274
1275AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1276 InsertPosition InsertBefore)
1277 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1278
1279AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1280 const Twine &Name, InsertPosition InsertBefore)
1281 : AllocaInst(Ty, AddrSpace, ArraySize,
1282 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1283 InsertBefore) {}
1284
1285AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1286 Align Align, const Twine &Name,
1287 InsertPosition InsertBefore)
1288 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1289 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1290 AllocatedType(Ty) {
1292 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1293 setName(Name);
1294}
1295
1298 return !CI->isOne();
1299 return true;
1300}
1301
1302/// isStaticAlloca - Return true if this alloca is in the entry block of the
1303/// function and is a constant size. If so, the code generator will fold it
1304/// into the prolog/epilog code, so it is basically free.
1306 // Must be constant size.
1307 if (!isa<ConstantInt>(getArraySize())) return false;
1308
1309 // Must be in the entry block.
1310 const BasicBlock *Parent = getParent();
1311 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1312}
1313
1314//===----------------------------------------------------------------------===//
1315// LoadInst Implementation
1316//===----------------------------------------------------------------------===//
1317
1318void LoadInst::AssertOK() {
1320 "Ptr must have pointer type.");
1321}
1322
1324 assert(Pos.isValid() &&
1325 "Insertion position cannot be null when alignment not provided!");
1326 BasicBlock *BB = Pos.getBasicBlock();
1327 assert(BB->getParent() &&
1328 "BB must be in a Function when alignment not provided!");
1329 const DataLayout &DL = BB->getDataLayout();
1330 return DL.getABITypeAlign(Ty);
1331}
1332
1333LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1334 InsertPosition InsertBef)
1335 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1336
1337LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1338 InsertPosition InsertBef)
1339 : LoadInst(Ty, Ptr, Name, isVolatile,
1340 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1341
1342LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1343 Align Align, InsertPosition InsertBef)
1344 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1345 SyncScope::System, InsertBef) {}
1346
1347LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1349 InsertPosition InsertBef)
1350 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1353 setAtomic(Order, SSID);
1354 AssertOK();
1355 setName(Name);
1356}
1357
1358//===----------------------------------------------------------------------===//
1359// StoreInst Implementation
1360//===----------------------------------------------------------------------===//
1361
1362void StoreInst::AssertOK() {
1363 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1365 "Ptr must have pointer type!");
1366}
1367
1369 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1370
1372 InsertPosition InsertBefore)
1373 : StoreInst(val, addr, isVolatile,
1374 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1375 InsertBefore) {}
1376
1378 InsertPosition InsertBefore)
1380 SyncScope::System, InsertBefore) {}
1381
1383 AtomicOrdering Order, SyncScope::ID SSID,
1384 InsertPosition InsertBefore)
1385 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1386 InsertBefore) {
1387 Op<0>() = val;
1388 Op<1>() = addr;
1391 setAtomic(Order, SSID);
1392 AssertOK();
1393}
1394
1395//===----------------------------------------------------------------------===//
1396// AtomicCmpXchgInst Implementation
1397//===----------------------------------------------------------------------===//
1398
1399void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1400 Align Alignment, AtomicOrdering SuccessOrdering,
1401 AtomicOrdering FailureOrdering,
1402 SyncScope::ID SSID) {
1403 Op<0>() = Ptr;
1404 Op<1>() = Cmp;
1405 Op<2>() = NewVal;
1406 setSuccessOrdering(SuccessOrdering);
1407 setFailureOrdering(FailureOrdering);
1408 setSyncScopeID(SSID);
1409 setAlignment(Alignment);
1410
1411 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1412 "All operands must be non-null!");
1414 "Ptr must have pointer type!");
1415 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1416 "Cmp type and NewVal type must be same!");
1417}
1418
1420 Align Alignment,
1421 AtomicOrdering SuccessOrdering,
1422 AtomicOrdering FailureOrdering,
1423 SyncScope::ID SSID,
1424 InsertPosition InsertBefore)
1425 : Instruction(
1426 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1427 AtomicCmpXchg, AllocMarker, InsertBefore) {
1428 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1429}
1430
1431//===----------------------------------------------------------------------===//
1432// AtomicRMWInst Implementation
1433//===----------------------------------------------------------------------===//
1434
1435void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1436 Align Alignment, AtomicOrdering Ordering,
1437 SyncScope::ID SSID) {
1438 assert(Ordering != AtomicOrdering::NotAtomic &&
1439 "atomicrmw instructions can only be atomic.");
1440 assert(Ordering != AtomicOrdering::Unordered &&
1441 "atomicrmw instructions cannot be unordered.");
1442 Op<0>() = Ptr;
1443 Op<1>() = Val;
1445 setOrdering(Ordering);
1446 setSyncScopeID(SSID);
1447 setAlignment(Alignment);
1448
1449 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1451 "Ptr must have pointer type!");
1452 assert(Ordering != AtomicOrdering::NotAtomic &&
1453 "AtomicRMW instructions must be atomic!");
1454}
1455
1457 Align Alignment, AtomicOrdering Ordering,
1458 SyncScope::ID SSID, InsertPosition InsertBefore)
1459 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1460 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1461}
1462
1464 switch (Op) {
1466 return "xchg";
1467 case AtomicRMWInst::Add:
1468 return "add";
1469 case AtomicRMWInst::Sub:
1470 return "sub";
1471 case AtomicRMWInst::And:
1472 return "and";
1474 return "nand";
1475 case AtomicRMWInst::Or:
1476 return "or";
1477 case AtomicRMWInst::Xor:
1478 return "xor";
1479 case AtomicRMWInst::Max:
1480 return "max";
1481 case AtomicRMWInst::Min:
1482 return "min";
1484 return "umax";
1486 return "umin";
1488 return "fadd";
1490 return "fsub";
1492 return "fmax";
1494 return "fmin";
1496 return "fmaximum";
1498 return "fminimum";
1500 return "uinc_wrap";
1502 return "udec_wrap";
1504 return "usub_cond";
1506 return "usub_sat";
1508 return "<invalid operation>";
1509 }
1510
1511 llvm_unreachable("invalid atomicrmw operation");
1512}
1513
1514//===----------------------------------------------------------------------===//
1515// FenceInst Implementation
1516//===----------------------------------------------------------------------===//
1517
1519 SyncScope::ID SSID, InsertPosition InsertBefore)
1520 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1521 setOrdering(Ordering);
1522 setSyncScopeID(SSID);
1523}
1524
1525//===----------------------------------------------------------------------===//
1526// GetElementPtrInst Implementation
1527//===----------------------------------------------------------------------===//
1528
1529void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1530 const Twine &Name) {
1531 assert(getNumOperands() == 1 + IdxList.size() &&
1532 "NumOperands not initialized?");
1533 Op<0>() = Ptr;
1534 llvm::copy(IdxList, op_begin() + 1);
1535 setName(Name);
1536}
1537
1538GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1540 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1541 SourceElementType(GEPI.SourceElementType),
1542 ResultElementType(GEPI.ResultElementType) {
1543 assert(getNumOperands() == GEPI.getNumOperands() &&
1544 "Wrong number of operands allocated");
1545 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1547}
1548
1550 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1551 if (!Struct->indexValid(Idx))
1552 return nullptr;
1553 return Struct->getTypeAtIndex(Idx);
1554 }
1555 if (!Idx->getType()->isIntOrIntVectorTy())
1556 return nullptr;
1557 if (auto *Array = dyn_cast<ArrayType>(Ty))
1558 return Array->getElementType();
1559 if (auto *Vector = dyn_cast<VectorType>(Ty))
1560 return Vector->getElementType();
1561 return nullptr;
1562}
1563
1565 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1566 if (Idx >= Struct->getNumElements())
1567 return nullptr;
1568 return Struct->getElementType(Idx);
1569 }
1570 if (auto *Array = dyn_cast<ArrayType>(Ty))
1571 return Array->getElementType();
1572 if (auto *Vector = dyn_cast<VectorType>(Ty))
1573 return Vector->getElementType();
1574 return nullptr;
1575}
1576
1577template <typename IndexTy>
1579 if (IdxList.empty())
1580 return Ty;
1581 for (IndexTy V : IdxList.slice(1)) {
1583 if (!Ty)
1584 return Ty;
1585 }
1586 return Ty;
1587}
1588
1592
1594 ArrayRef<Constant *> IdxList) {
1595 return getIndexedTypeInternal(Ty, IdxList);
1596}
1597
1601
1602/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1603/// zeros. If so, the result pointer and the first operand have the same
1604/// value, just potentially different types.
1606 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1608 if (!CI->isZero()) return false;
1609 } else {
1610 return false;
1611 }
1612 }
1613 return true;
1614}
1615
1616/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1617/// constant integers. If so, the result pointer and the first operand have
1618/// a constant offset between them.
1620 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1622 return false;
1623 }
1624 return true;
1625}
1626
1630
1632 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1633 if (B)
1635 else
1636 NW = NW.withoutInBounds();
1637 setNoWrapFlags(NW);
1638}
1639
1641 return cast<GEPOperator>(this)->getNoWrapFlags();
1642}
1643
1645 return cast<GEPOperator>(this)->isInBounds();
1646}
1647
1649 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1650}
1651
1653 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1654}
1655
1657 APInt &Offset) const {
1658 // Delegate to the generic GEPOperator implementation.
1659 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1660}
1661
1663 const DataLayout &DL, unsigned BitWidth,
1664 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1665 APInt &ConstantOffset) const {
1666 // Delegate to the generic GEPOperator implementation.
1667 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1668 ConstantOffset);
1669}
1670
1671//===----------------------------------------------------------------------===//
1672// ExtractElementInst Implementation
1673//===----------------------------------------------------------------------===//
1674
1675ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1676 const Twine &Name,
1677 InsertPosition InsertBef)
1678 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1679 ExtractElement, AllocMarker, InsertBef) {
1680 assert(isValidOperands(Val, Index) &&
1681 "Invalid extractelement instruction operands!");
1682 Op<0>() = Val;
1683 Op<1>() = Index;
1684 setName(Name);
1685}
1686
1687bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1688 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1689 return false;
1690 return true;
1691}
1692
1693//===----------------------------------------------------------------------===//
1694// InsertElementInst Implementation
1695//===----------------------------------------------------------------------===//
1696
1697InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1698 const Twine &Name,
1699 InsertPosition InsertBef)
1700 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1701 assert(isValidOperands(Vec, Elt, Index) &&
1702 "Invalid insertelement instruction operands!");
1703 Op<0>() = Vec;
1704 Op<1>() = Elt;
1705 Op<2>() = Index;
1706 setName(Name);
1707}
1708
1710 const Value *Index) {
1711 if (!Vec->getType()->isVectorTy())
1712 return false; // First operand of insertelement must be vector type.
1713
1714 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1715 return false;// Second operand of insertelement must be vector element type.
1716
1717 if (!Index->getType()->isIntegerTy())
1718 return false; // Third operand of insertelement must be i32.
1719 return true;
1720}
1721
1722//===----------------------------------------------------------------------===//
1723// ShuffleVectorInst Implementation
1724//===----------------------------------------------------------------------===//
1725
1727 assert(V && "Cannot create placeholder of nullptr V");
1728 return PoisonValue::get(V->getType());
1729}
1730
1732 InsertPosition InsertBefore)
1734 InsertBefore) {}
1735
1737 const Twine &Name,
1738 InsertPosition InsertBefore)
1740 InsertBefore) {}
1741
1743 const Twine &Name,
1744 InsertPosition InsertBefore)
1745 : Instruction(
1746 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1747 cast<VectorType>(Mask->getType())->getElementCount()),
1748 ShuffleVector, AllocMarker, InsertBefore) {
1749 assert(isValidOperands(V1, V2, Mask) &&
1750 "Invalid shuffle vector instruction operands!");
1751
1752 Op<0>() = V1;
1753 Op<1>() = V2;
1754 SmallVector<int, 16> MaskArr;
1755 getShuffleMask(cast<Constant>(Mask), MaskArr);
1756 setShuffleMask(MaskArr);
1757 setName(Name);
1758}
1759
1761 const Twine &Name,
1762 InsertPosition InsertBefore)
1763 : Instruction(
1764 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1765 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1766 ShuffleVector, AllocMarker, InsertBefore) {
1767 assert(isValidOperands(V1, V2, Mask) &&
1768 "Invalid shuffle vector instruction operands!");
1769 Op<0>() = V1;
1770 Op<1>() = V2;
1771 setShuffleMask(Mask);
1772 setName(Name);
1773}
1774
1776 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1777 int NumMaskElts = ShuffleMask.size();
1778 SmallVector<int, 16> NewMask(NumMaskElts);
1779 for (int i = 0; i != NumMaskElts; ++i) {
1780 int MaskElt = getMaskValue(i);
1781 if (MaskElt == PoisonMaskElem) {
1782 NewMask[i] = PoisonMaskElem;
1783 continue;
1784 }
1785 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1786 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1787 NewMask[i] = MaskElt;
1788 }
1789 setShuffleMask(NewMask);
1790 Op<0>().swap(Op<1>());
1791}
1792
1794 ArrayRef<int> Mask) {
1795 // V1 and V2 must be vectors of the same type.
1796 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1797 return false;
1798
1799 // Make sure the mask elements make sense.
1800 int V1Size =
1801 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1802 for (int Elem : Mask)
1803 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1804 return false;
1805
1807 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1808 return false;
1809
1810 return true;
1811}
1812
1814 const Value *Mask) {
1815 // V1 and V2 must be vectors of the same type.
1816 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1817 return false;
1818
1819 // Mask must be vector of i32, and must be the same kind of vector as the
1820 // input vectors
1821 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1822 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1824 return false;
1825
1826 // Check to see if Mask is valid.
1828 return true;
1829
1830 // NOTE: Through vector ConstantInt we have the potential to support more
1831 // than just zero splat masks but that requires a LangRef change.
1832 if (isa<ScalableVectorType>(MaskTy))
1833 return false;
1834
1835 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1836
1837 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1838 return !CI->uge(V1Size * 2);
1839
1840 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1841 for (Value *Op : MV->operands()) {
1842 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1843 if (CI->uge(V1Size*2))
1844 return false;
1845 } else if (!isa<UndefValue>(Op)) {
1846 return false;
1847 }
1848 }
1849 return true;
1850 }
1851
1852 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1853 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1854 i != e; ++i)
1855 if (CDS->getElementAsInteger(i) >= V1Size*2)
1856 return false;
1857 return true;
1858 }
1859
1860 return false;
1861}
1862
1864 SmallVectorImpl<int> &Result) {
1865 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1866
1867 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1868 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1869 Result.append(EC.getKnownMinValue(), MaskVal);
1870 return;
1871 }
1872
1873 assert(!EC.isScalable() &&
1874 "Scalable vector shuffle mask must be undef or zeroinitializer");
1875
1876 unsigned NumElts = EC.getFixedValue();
1877
1878 Result.reserve(NumElts);
1879
1880 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1881 for (unsigned i = 0; i != NumElts; ++i)
1882 Result.push_back(CDS->getElementAsInteger(i));
1883 return;
1884 }
1885 for (unsigned i = 0; i != NumElts; ++i) {
1886 Constant *C = Mask->getAggregateElement(i);
1887 Result.push_back(isa<UndefValue>(C) ? -1 :
1888 cast<ConstantInt>(C)->getZExtValue());
1889 }
1890}
1891
1893 ShuffleMask.assign(Mask.begin(), Mask.end());
1894 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1895}
1896
1898 Type *ResultTy) {
1899 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1900 if (isa<ScalableVectorType>(ResultTy)) {
1901 assert(all_equal(Mask) && "Unexpected shuffle");
1902 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1903 if (Mask[0] == 0)
1904 return Constant::getNullValue(VecTy);
1905 return PoisonValue::get(VecTy);
1906 }
1908 for (int Elem : Mask) {
1909 if (Elem == PoisonMaskElem)
1911 else
1912 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1913 }
1914 return ConstantVector::get(MaskConst);
1915}
1916
1917static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1918 assert(!Mask.empty() && "Shuffle mask must contain elements");
1919 bool UsesLHS = false;
1920 bool UsesRHS = false;
1921 for (int I : Mask) {
1922 if (I == -1)
1923 continue;
1924 assert(I >= 0 && I < (NumOpElts * 2) &&
1925 "Out-of-bounds shuffle mask element");
1926 UsesLHS |= (I < NumOpElts);
1927 UsesRHS |= (I >= NumOpElts);
1928 if (UsesLHS && UsesRHS)
1929 return false;
1930 }
1931 // Allow for degenerate case: completely undef mask means neither source is used.
1932 return UsesLHS || UsesRHS;
1933}
1934
1936 // We don't have vector operand size information, so assume operands are the
1937 // same size as the mask.
1938 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1939}
1940
1941static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1942 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1943 return false;
1944 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1945 if (Mask[i] == -1)
1946 continue;
1947 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1948 return false;
1949 }
1950 return true;
1951}
1952
1954 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1955 return false;
1956 // We don't have vector operand size information, so assume operands are the
1957 // same size as the mask.
1958 return isIdentityMaskImpl(Mask, NumSrcElts);
1959}
1960
1962 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1963 return false;
1964 if (!isSingleSourceMask(Mask, NumSrcElts))
1965 return false;
1966
1967 // The number of elements in the mask must be at least 2.
1968 if (NumSrcElts < 2)
1969 return false;
1970
1971 for (int I = 0, E = Mask.size(); I < E; ++I) {
1972 if (Mask[I] == -1)
1973 continue;
1974 if (Mask[I] != (NumSrcElts - 1 - I) &&
1975 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1976 return false;
1977 }
1978 return true;
1979}
1980
1982 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1983 return false;
1984 if (!isSingleSourceMask(Mask, NumSrcElts))
1985 return false;
1986 for (int I = 0, E = Mask.size(); I < E; ++I) {
1987 if (Mask[I] == -1)
1988 continue;
1989 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1990 return false;
1991 }
1992 return true;
1993}
1994
1996 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1997 return false;
1998 // Select is differentiated from identity. It requires using both sources.
1999 if (isSingleSourceMask(Mask, NumSrcElts))
2000 return false;
2001 for (int I = 0, E = Mask.size(); I < E; ++I) {
2002 if (Mask[I] == -1)
2003 continue;
2004 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2005 return false;
2006 }
2007 return true;
2008}
2009
2011 // Example masks that will return true:
2012 // v1 = <a, b, c, d>
2013 // v2 = <e, f, g, h>
2014 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2015 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2016
2017 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2018 return false;
2019 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2020 int Sz = Mask.size();
2021 if (Sz < 2 || !isPowerOf2_32(Sz))
2022 return false;
2023
2024 // 2. The first element of the mask must be either a 0 or a 1.
2025 if (Mask[0] != 0 && Mask[0] != 1)
2026 return false;
2027
2028 // 3. The difference between the first 2 elements must be equal to the
2029 // number of elements in the mask.
2030 if ((Mask[1] - Mask[0]) != NumSrcElts)
2031 return false;
2032
2033 // 4. The difference between consecutive even-numbered and odd-numbered
2034 // elements must be equal to 2.
2035 for (int I = 2; I < Sz; ++I) {
2036 int MaskEltVal = Mask[I];
2037 if (MaskEltVal == -1)
2038 return false;
2039 int MaskEltPrevVal = Mask[I - 2];
2040 if (MaskEltVal - MaskEltPrevVal != 2)
2041 return false;
2042 }
2043 return true;
2044}
2045
2047 int &Index) {
2048 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2049 return false;
2050 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2051 int StartIndex = -1;
2052 for (int I = 0, E = Mask.size(); I != E; ++I) {
2053 int MaskEltVal = Mask[I];
2054 if (MaskEltVal == -1)
2055 continue;
2056
2057 if (StartIndex == -1) {
2058 // Don't support a StartIndex that begins in the second input, or if the
2059 // first non-undef index would access below the StartIndex.
2060 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2061 return false;
2062
2063 StartIndex = MaskEltVal - I;
2064 continue;
2065 }
2066
2067 // Splice is sequential starting from StartIndex.
2068 if (MaskEltVal != (StartIndex + I))
2069 return false;
2070 }
2071
2072 if (StartIndex == -1)
2073 return false;
2074
2075 // NOTE: This accepts StartIndex == 0 (COPY).
2076 Index = StartIndex;
2077 return true;
2078}
2079
2081 int NumSrcElts, int &Index) {
2082 // Must extract from a single source.
2083 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2084 return false;
2085
2086 // Must be smaller (else this is an Identity shuffle).
2087 if (NumSrcElts <= (int)Mask.size())
2088 return false;
2089
2090 // Find start of extraction, accounting that we may start with an UNDEF.
2091 int SubIndex = -1;
2092 for (int i = 0, e = Mask.size(); i != e; ++i) {
2093 int M = Mask[i];
2094 if (M < 0)
2095 continue;
2096 int Offset = (M % NumSrcElts) - i;
2097 if (0 <= SubIndex && SubIndex != Offset)
2098 return false;
2099 SubIndex = Offset;
2100 }
2101
2102 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2103 Index = SubIndex;
2104 return true;
2105 }
2106 return false;
2107}
2108
2110 int NumSrcElts, int &NumSubElts,
2111 int &Index) {
2112 int NumMaskElts = Mask.size();
2113
2114 // Don't try to match if we're shuffling to a smaller size.
2115 if (NumMaskElts < NumSrcElts)
2116 return false;
2117
2118 // TODO: We don't recognize self-insertion/widening.
2119 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2120 return false;
2121
2122 // Determine which mask elements are attributed to which source.
2123 APInt UndefElts = APInt::getZero(NumMaskElts);
2124 APInt Src0Elts = APInt::getZero(NumMaskElts);
2125 APInt Src1Elts = APInt::getZero(NumMaskElts);
2126 bool Src0Identity = true;
2127 bool Src1Identity = true;
2128
2129 for (int i = 0; i != NumMaskElts; ++i) {
2130 int M = Mask[i];
2131 if (M < 0) {
2132 UndefElts.setBit(i);
2133 continue;
2134 }
2135 if (M < NumSrcElts) {
2136 Src0Elts.setBit(i);
2137 Src0Identity &= (M == i);
2138 continue;
2139 }
2140 Src1Elts.setBit(i);
2141 Src1Identity &= (M == (i + NumSrcElts));
2142 }
2143 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2144 "unknown shuffle elements");
2145 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2146 "2-source shuffle not found");
2147
2148 // Determine lo/hi span ranges.
2149 // TODO: How should we handle undefs at the start of subvector insertions?
2150 int Src0Lo = Src0Elts.countr_zero();
2151 int Src1Lo = Src1Elts.countr_zero();
2152 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2153 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2154
2155 // If src0 is in place, see if the src1 elements is inplace within its own
2156 // span.
2157 if (Src0Identity) {
2158 int NumSub1Elts = Src1Hi - Src1Lo;
2159 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2160 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2161 NumSubElts = NumSub1Elts;
2162 Index = Src1Lo;
2163 return true;
2164 }
2165 }
2166
2167 // If src1 is in place, see if the src0 elements is inplace within its own
2168 // span.
2169 if (Src1Identity) {
2170 int NumSub0Elts = Src0Hi - Src0Lo;
2171 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2172 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2173 NumSubElts = NumSub0Elts;
2174 Index = Src0Lo;
2175 return true;
2176 }
2177 }
2178
2179 return false;
2180}
2181
2183 // FIXME: Not currently possible to express a shuffle mask for a scalable
2184 // vector for this case.
2186 return false;
2187
2188 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2189 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2190 if (NumMaskElts <= NumOpElts)
2191 return false;
2192
2193 // The first part of the mask must choose elements from exactly 1 source op.
2195 if (!isIdentityMaskImpl(Mask, NumOpElts))
2196 return false;
2197
2198 // All extending must be with undef elements.
2199 for (int i = NumOpElts; i < NumMaskElts; ++i)
2200 if (Mask[i] != -1)
2201 return false;
2202
2203 return true;
2204}
2205
2207 // FIXME: Not currently possible to express a shuffle mask for a scalable
2208 // vector for this case.
2210 return false;
2211
2212 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2213 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2214 if (NumMaskElts >= NumOpElts)
2215 return false;
2216
2217 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2218}
2219
2221 // Vector concatenation is differentiated from identity with padding.
2223 return false;
2224
2225 // FIXME: Not currently possible to express a shuffle mask for a scalable
2226 // vector for this case.
2228 return false;
2229
2230 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2231 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2232 if (NumMaskElts != NumOpElts * 2)
2233 return false;
2234
2235 // Use the mask length rather than the operands' vector lengths here. We
2236 // already know that the shuffle returns a vector twice as long as the inputs,
2237 // and neither of the inputs are undef vectors. If the mask picks consecutive
2238 // elements from both inputs, then this is a concatenation of the inputs.
2239 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2240}
2241
2243 int ReplicationFactor, int VF) {
2244 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2245 "Unexpected mask size.");
2246
2247 for (int CurrElt : seq(VF)) {
2248 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2249 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2250 "Run out of mask?");
2251 Mask = Mask.drop_front(ReplicationFactor);
2252 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2253 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2254 }))
2255 return false;
2256 }
2257 assert(Mask.empty() && "Did not consume the whole mask?");
2258
2259 return true;
2260}
2261
2263 int &ReplicationFactor, int &VF) {
2264 // undef-less case is trivial.
2265 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2266 ReplicationFactor =
2267 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2268 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2269 return false;
2270 VF = Mask.size() / ReplicationFactor;
2271 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2272 }
2273
2274 // However, if the mask contains undef's, we have to enumerate possible tuples
2275 // and pick one. There are bounds on replication factor: [1, mask size]
2276 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2277 // Additionally, mask size is a replication factor multiplied by vector size,
2278 // which further significantly reduces the search space.
2279
2280 // Before doing that, let's perform basic correctness checking first.
2281 int Largest = -1;
2282 for (int MaskElt : Mask) {
2283 if (MaskElt == PoisonMaskElem)
2284 continue;
2285 // Elements must be in non-decreasing order.
2286 if (MaskElt < Largest)
2287 return false;
2288 Largest = std::max(Largest, MaskElt);
2289 }
2290
2291 // Prefer larger replication factor if all else equal.
2292 for (int PossibleReplicationFactor :
2293 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2294 if (Mask.size() % PossibleReplicationFactor != 0)
2295 continue;
2296 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2297 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2298 PossibleVF))
2299 continue;
2300 ReplicationFactor = PossibleReplicationFactor;
2301 VF = PossibleVF;
2302 return true;
2303 }
2304
2305 return false;
2306}
2307
2308bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2309 int &VF) const {
2310 // Not possible to express a shuffle mask for a scalable vector for this
2311 // case.
2313 return false;
2314
2315 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2316 if (ShuffleMask.size() % VF != 0)
2317 return false;
2318 ReplicationFactor = ShuffleMask.size() / VF;
2319
2320 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2321}
2322
2324 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2325 Mask.size() % VF != 0)
2326 return false;
2327 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2328 ArrayRef<int> SubMask = Mask.slice(K, VF);
2329 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2330 continue;
2331 SmallBitVector Used(VF, false);
2332 for (int Idx : SubMask) {
2333 if (Idx != PoisonMaskElem && Idx < VF)
2334 Used.set(Idx);
2335 }
2336 if (!Used.all())
2337 return false;
2338 }
2339 return true;
2340}
2341
2342/// Return true if this shuffle mask is a replication mask.
2344 // Not possible to express a shuffle mask for a scalable vector for this
2345 // case.
2347 return false;
2348 if (!isSingleSourceMask(ShuffleMask, VF))
2349 return false;
2350
2351 return isOneUseSingleSourceMask(ShuffleMask, VF);
2352}
2353
2354bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2356 // shuffle_vector can only interleave fixed length vectors - for scalable
2357 // vectors, see the @llvm.vector.interleave2 intrinsic
2358 if (!OpTy)
2359 return false;
2360 unsigned OpNumElts = OpTy->getNumElements();
2361
2362 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2363}
2364
2366 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2367 SmallVectorImpl<unsigned> &StartIndexes) {
2368 unsigned NumElts = Mask.size();
2369 if (NumElts % Factor)
2370 return false;
2371
2372 unsigned LaneLen = NumElts / Factor;
2373 if (!isPowerOf2_32(LaneLen))
2374 return false;
2375
2376 StartIndexes.resize(Factor);
2377
2378 // Check whether each element matches the general interleaved rule.
2379 // Ignore undef elements, as long as the defined elements match the rule.
2380 // Outer loop processes all factors (x, y, z in the above example)
2381 unsigned I = 0, J;
2382 for (; I < Factor; I++) {
2383 unsigned SavedLaneValue;
2384 unsigned SavedNoUndefs = 0;
2385
2386 // Inner loop processes consecutive accesses (x, x+1... in the example)
2387 for (J = 0; J < LaneLen - 1; J++) {
2388 // Lane computes x's position in the Mask
2389 unsigned Lane = J * Factor + I;
2390 unsigned NextLane = Lane + Factor;
2391 int LaneValue = Mask[Lane];
2392 int NextLaneValue = Mask[NextLane];
2393
2394 // If both are defined, values must be sequential
2395 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2396 LaneValue + 1 != NextLaneValue)
2397 break;
2398
2399 // If the next value is undef, save the current one as reference
2400 if (LaneValue >= 0 && NextLaneValue < 0) {
2401 SavedLaneValue = LaneValue;
2402 SavedNoUndefs = 1;
2403 }
2404
2405 // Undefs are allowed, but defined elements must still be consecutive:
2406 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2407 // Verify this by storing the last non-undef followed by an undef
2408 // Check that following non-undef masks are incremented with the
2409 // corresponding distance.
2410 if (SavedNoUndefs > 0 && LaneValue < 0) {
2411 SavedNoUndefs++;
2412 if (NextLaneValue >= 0 &&
2413 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2414 break;
2415 }
2416 }
2417
2418 if (J < LaneLen - 1)
2419 return false;
2420
2421 int StartMask = 0;
2422 if (Mask[I] >= 0) {
2423 // Check that the start of the I range (J=0) is greater than 0
2424 StartMask = Mask[I];
2425 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2426 // StartMask defined by the last value in lane
2427 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2428 } else if (SavedNoUndefs > 0) {
2429 // StartMask defined by some non-zero value in the j loop
2430 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2431 }
2432 // else StartMask remains set to 0, i.e. all elements are undefs
2433
2434 if (StartMask < 0)
2435 return false;
2436 // We must stay within the vectors; This case can happen with undefs.
2437 if (StartMask + LaneLen > NumInputElts)
2438 return false;
2439
2440 StartIndexes[I] = StartMask;
2441 }
2442
2443 return true;
2444}
2445
2446/// Check if the mask is a DE-interleave mask of the given factor
2447/// \p Factor like:
2448/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2450 unsigned Factor,
2451 unsigned &Index) {
2452 // Check all potential start indices from 0 to (Factor - 1).
2453 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2454 unsigned I = 0;
2455
2456 // Check that elements are in ascending order by Factor. Ignore undef
2457 // elements.
2458 for (; I < Mask.size(); I++)
2459 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2460 break;
2461
2462 if (I == Mask.size()) {
2463 Index = Idx;
2464 return true;
2465 }
2466 }
2467
2468 return false;
2469}
2470
2471/// Try to lower a vector shuffle as a bit rotation.
2472///
2473/// Look for a repeated rotation pattern in each sub group.
2474/// Returns an element-wise left bit rotation amount or -1 if failed.
2475static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2476 int NumElts = Mask.size();
2477 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2478
2479 int RotateAmt = -1;
2480 for (int i = 0; i != NumElts; i += NumSubElts) {
2481 for (int j = 0; j != NumSubElts; ++j) {
2482 int M = Mask[i + j];
2483 if (M < 0)
2484 continue;
2485 if (M < i || M >= i + NumSubElts)
2486 return -1;
2487 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2488 if (0 <= RotateAmt && Offset != RotateAmt)
2489 return -1;
2490 RotateAmt = Offset;
2491 }
2492 }
2493 return RotateAmt;
2494}
2495
2497 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2498 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2499 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2500 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2501 if (EltRotateAmt < 0)
2502 continue;
2503 RotateAmt = EltRotateAmt * EltSizeInBits;
2504 return true;
2505 }
2506
2507 return false;
2508}
2509
2510//===----------------------------------------------------------------------===//
2511// InsertValueInst Class
2512//===----------------------------------------------------------------------===//
2513
2514void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2515 const Twine &Name) {
2516 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2517
2518 // There's no fundamental reason why we require at least one index
2519 // (other than weirdness with &*IdxBegin being invalid; see
2520 // getelementptr's init routine for example). But there's no
2521 // present need to support it.
2522 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2523
2525 Val->getType() && "Inserted value must match indexed type!");
2526 Op<0>() = Agg;
2527 Op<1>() = Val;
2528
2529 Indices.append(Idxs.begin(), Idxs.end());
2530 setName(Name);
2531}
2532
2533InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2534 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2535 Indices(IVI.Indices) {
2536 Op<0>() = IVI.getOperand(0);
2537 Op<1>() = IVI.getOperand(1);
2539}
2540
2541//===----------------------------------------------------------------------===//
2542// ExtractValueInst Class
2543//===----------------------------------------------------------------------===//
2544
2545void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2546 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2547
2548 // There's no fundamental reason why we require at least one index.
2549 // But there's no present need to support it.
2550 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2551
2552 Indices.append(Idxs.begin(), Idxs.end());
2553 setName(Name);
2554}
2555
2556ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2557 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2558 (BasicBlock *)nullptr),
2559 Indices(EVI.Indices) {
2561}
2562
2563// getIndexedType - Returns the type of the element that would be extracted
2564// with an extractvalue instruction with the specified parameters.
2565//
2566// A null type is returned if the indices are invalid for the specified
2567// pointer type.
2568//
2570 ArrayRef<unsigned> Idxs) {
2571 for (unsigned Index : Idxs) {
2572 // We can't use CompositeType::indexValid(Index) here.
2573 // indexValid() always returns true for arrays because getelementptr allows
2574 // out-of-bounds indices. Since we don't allow those for extractvalue and
2575 // insertvalue we need to check array indexing manually.
2576 // Since the only other types we can index into are struct types it's just
2577 // as easy to check those manually as well.
2578 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2579 if (Index >= AT->getNumElements())
2580 return nullptr;
2581 Agg = AT->getElementType();
2582 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2583 if (Index >= ST->getNumElements())
2584 return nullptr;
2585 Agg = ST->getElementType(Index);
2586 } else {
2587 // Not a valid type to index into.
2588 return nullptr;
2589 }
2590 }
2591 return Agg;
2592}
2593
2594//===----------------------------------------------------------------------===//
2595// UnaryOperator Class
2596//===----------------------------------------------------------------------===//
2597
2599 const Twine &Name, InsertPosition InsertBefore)
2600 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2601 Op<0>() = S;
2602 setName(Name);
2603 AssertOK();
2604}
2605
2607 InsertPosition InsertBefore) {
2608 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2609}
2610
2611void UnaryOperator::AssertOK() {
2612 Value *LHS = getOperand(0);
2613 (void)LHS; // Silence warnings.
2614#ifndef NDEBUG
2615 switch (getOpcode()) {
2616 case FNeg:
2617 assert(getType() == LHS->getType() &&
2618 "Unary operation should return same type as operand!");
2619 assert(getType()->isFPOrFPVectorTy() &&
2620 "Tried to create a floating-point operation on a "
2621 "non-floating-point type!");
2622 break;
2623 default: llvm_unreachable("Invalid opcode provided");
2624 }
2625#endif
2626}
2627
2628//===----------------------------------------------------------------------===//
2629// BinaryOperator Class
2630//===----------------------------------------------------------------------===//
2631
2633 const Twine &Name, InsertPosition InsertBefore)
2634 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2635 Op<0>() = S1;
2636 Op<1>() = S2;
2637 setName(Name);
2638 AssertOK();
2639}
2640
2641void BinaryOperator::AssertOK() {
2642 Value *LHS = getOperand(0), *RHS = getOperand(1);
2643 (void)LHS; (void)RHS; // Silence warnings.
2644 assert(LHS->getType() == RHS->getType() &&
2645 "Binary operator operand types must match!");
2646#ifndef NDEBUG
2647 switch (getOpcode()) {
2648 case Add: case Sub:
2649 case Mul:
2650 assert(getType() == LHS->getType() &&
2651 "Arithmetic operation should return same type as operands!");
2652 assert(getType()->isIntOrIntVectorTy() &&
2653 "Tried to create an integer operation on a non-integer type!");
2654 break;
2655 case FAdd: case FSub:
2656 case FMul:
2657 assert(getType() == LHS->getType() &&
2658 "Arithmetic operation should return same type as operands!");
2659 assert(getType()->isFPOrFPVectorTy() &&
2660 "Tried to create a floating-point operation on a "
2661 "non-floating-point type!");
2662 break;
2663 case UDiv:
2664 case SDiv:
2665 assert(getType() == LHS->getType() &&
2666 "Arithmetic operation should return same type as operands!");
2667 assert(getType()->isIntOrIntVectorTy() &&
2668 "Incorrect operand type (not integer) for S/UDIV");
2669 break;
2670 case FDiv:
2671 assert(getType() == LHS->getType() &&
2672 "Arithmetic operation should return same type as operands!");
2673 assert(getType()->isFPOrFPVectorTy() &&
2674 "Incorrect operand type (not floating point) for FDIV");
2675 break;
2676 case URem:
2677 case SRem:
2678 assert(getType() == LHS->getType() &&
2679 "Arithmetic operation should return same type as operands!");
2680 assert(getType()->isIntOrIntVectorTy() &&
2681 "Incorrect operand type (not integer) for S/UREM");
2682 break;
2683 case FRem:
2684 assert(getType() == LHS->getType() &&
2685 "Arithmetic operation should return same type as operands!");
2686 assert(getType()->isFPOrFPVectorTy() &&
2687 "Incorrect operand type (not floating point) for FREM");
2688 break;
2689 case Shl:
2690 case LShr:
2691 case AShr:
2692 assert(getType() == LHS->getType() &&
2693 "Shift operation should return same type as operands!");
2694 assert(getType()->isIntOrIntVectorTy() &&
2695 "Tried to create a shift operation on a non-integral type!");
2696 break;
2697 case And: case Or:
2698 case Xor:
2699 assert(getType() == LHS->getType() &&
2700 "Logical operation should return same type as operands!");
2701 assert(getType()->isIntOrIntVectorTy() &&
2702 "Tried to create a logical operation on a non-integral type!");
2703 break;
2704 default: llvm_unreachable("Invalid opcode provided");
2705 }
2706#endif
2707}
2708
2710 const Twine &Name,
2711 InsertPosition InsertBefore) {
2712 assert(S1->getType() == S2->getType() &&
2713 "Cannot create binary operator with two operands of differing type!");
2714 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2715}
2716
2718 InsertPosition InsertBefore) {
2719 Value *Zero = ConstantInt::get(Op->getType(), 0);
2720 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2721 InsertBefore);
2722}
2723
2725 InsertPosition InsertBefore) {
2726 Value *Zero = ConstantInt::get(Op->getType(), 0);
2727 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2728}
2729
2731 InsertPosition InsertBefore) {
2732 Constant *C = Constant::getAllOnesValue(Op->getType());
2733 return new BinaryOperator(Instruction::Xor, Op, C,
2734 Op->getType(), Name, InsertBefore);
2735}
2736
2737// Exchange the two operands to this instruction. This instruction is safe to
2738// use on any binary instruction and does not modify the semantics of the
2739// instruction.
2741 if (!isCommutative())
2742 return true; // Can't commute operands
2743 Op<0>().swap(Op<1>());
2744 return false;
2745}
2746
2747//===----------------------------------------------------------------------===//
2748// FPMathOperator Class
2749//===----------------------------------------------------------------------===//
2750
2752 const MDNode *MD =
2753 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2754 if (!MD)
2755 return 0.0;
2757 return Accuracy->getValueAPF().convertToFloat();
2758}
2759
2760//===----------------------------------------------------------------------===//
2761// CastInst Class
2762//===----------------------------------------------------------------------===//
2763
2764// Just determine if this cast only deals with integral->integral conversion.
2766 switch (getOpcode()) {
2767 default: return false;
2768 case Instruction::ZExt:
2769 case Instruction::SExt:
2770 case Instruction::Trunc:
2771 return true;
2772 case Instruction::BitCast:
2773 return getOperand(0)->getType()->isIntegerTy() &&
2774 getType()->isIntegerTy();
2775 }
2776}
2777
2778/// This function determines if the CastInst does not require any bits to be
2779/// changed in order to effect the cast. Essentially, it identifies cases where
2780/// no code gen is necessary for the cast, hence the name no-op cast. For
2781/// example, the following are all no-op casts:
2782/// # bitcast i32* %x to i8*
2783/// # bitcast <2 x i32> %x to <4 x i16>
2784/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2785/// Determine if the described cast is a no-op.
2787 Type *SrcTy,
2788 Type *DestTy,
2789 const DataLayout &DL) {
2790 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2791 switch (Opcode) {
2792 default: llvm_unreachable("Invalid CastOp");
2793 case Instruction::Trunc:
2794 case Instruction::ZExt:
2795 case Instruction::SExt:
2796 case Instruction::FPTrunc:
2797 case Instruction::FPExt:
2798 case Instruction::UIToFP:
2799 case Instruction::SIToFP:
2800 case Instruction::FPToUI:
2801 case Instruction::FPToSI:
2802 case Instruction::AddrSpaceCast:
2803 // TODO: Target informations may give a more accurate answer here.
2804 return false;
2805 case Instruction::BitCast:
2806 return true; // BitCast never modifies bits.
2807 case Instruction::PtrToAddr:
2808 case Instruction::PtrToInt:
2809 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2810 DestTy->getScalarSizeInBits();
2811 case Instruction::IntToPtr:
2812 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2813 SrcTy->getScalarSizeInBits();
2814 }
2815}
2816
2818 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2819}
2820
2821/// This function determines if a pair of casts can be eliminated and what
2822/// opcode should be used in the elimination. This assumes that there are two
2823/// instructions like this:
2824/// * %F = firstOpcode SrcTy %x to MidTy
2825/// * %S = secondOpcode MidTy %F to DstTy
2826/// The function returns a resultOpcode so these two casts can be replaced with:
2827/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2828/// If no such cast is permitted, the function returns 0.
2830 Instruction::CastOps secondOp,
2831 Type *SrcTy, Type *MidTy, Type *DstTy,
2832 const DataLayout *DL) {
2833 // Define the 144 possibilities for these two cast instructions. The values
2834 // in this matrix determine what to do in a given situation and select the
2835 // case in the switch below. The rows correspond to firstOp, the columns
2836 // correspond to secondOp. In looking at the table below, keep in mind
2837 // the following cast properties:
2838 //
2839 // Size Compare Source Destination
2840 // Operator Src ? Size Type Sign Type Sign
2841 // -------- ------------ ------------------- ---------------------
2842 // TRUNC > Integer Any Integral Any
2843 // ZEXT < Integral Unsigned Integer Any
2844 // SEXT < Integral Signed Integer Any
2845 // FPTOUI n/a FloatPt n/a Integral Unsigned
2846 // FPTOSI n/a FloatPt n/a Integral Signed
2847 // UITOFP n/a Integral Unsigned FloatPt n/a
2848 // SITOFP n/a Integral Signed FloatPt n/a
2849 // FPTRUNC > FloatPt n/a FloatPt n/a
2850 // FPEXT < FloatPt n/a FloatPt n/a
2851 // PTRTOINT n/a Pointer n/a Integral Unsigned
2852 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2853 // INTTOPTR n/a Integral Unsigned Pointer n/a
2854 // BITCAST = FirstClass n/a FirstClass n/a
2855 // ADDRSPCST n/a Pointer n/a Pointer n/a
2856 //
2857 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2858 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2859 // into "fptoui double to i64", but this loses information about the range
2860 // of the produced value (we no longer know the top-part is all zeros).
2861 // Further this conversion is often much more expensive for typical hardware,
2862 // and causes issues when building libgcc. We disallow fptosi+sext for the
2863 // same reason.
2864 const unsigned numCastOps =
2865 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2866 // clang-format off
2867 static const uint8_t CastResults[numCastOps][numCastOps] = {
2868 // T F F U S F F P P I B A -+
2869 // R Z S P P I I T P 2 2 N T S |
2870 // U E E 2 2 2 2 R E I A T C C +- secondOp
2871 // N X X U S F F N X N D 2 V V |
2872 // C T T I I P P C T T R P T T -+
2873 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2874 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2875 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2876 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2877 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2878 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2879 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2880 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2881 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2882 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2883 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2884 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2885 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2886 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2887 };
2888 // clang-format on
2889
2890 // TODO: This logic could be encoded into the table above and handled in the
2891 // switch below.
2892 // If either of the casts are a bitcast from scalar to vector, disallow the
2893 // merging. However, any pair of bitcasts are allowed.
2894 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2895 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2896 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2897
2898 // Check if any of the casts convert scalars <-> vectors.
2899 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2900 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2901 if (!AreBothBitcasts)
2902 return 0;
2903
2904 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2905 [secondOp-Instruction::CastOpsBegin];
2906 switch (ElimCase) {
2907 case 0:
2908 // Categorically disallowed.
2909 return 0;
2910 case 1:
2911 // Allowed, use first cast's opcode.
2912 return firstOp;
2913 case 2:
2914 // Allowed, use second cast's opcode.
2915 return secondOp;
2916 case 3:
2917 // No-op cast in second op implies firstOp as long as the DestTy
2918 // is integer and we are not converting between a vector and a
2919 // non-vector type.
2920 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2921 return firstOp;
2922 return 0;
2923 case 4:
2924 // No-op cast in second op implies firstOp as long as the DestTy
2925 // matches MidTy.
2926 if (DstTy == MidTy)
2927 return firstOp;
2928 return 0;
2929 case 5:
2930 // No-op cast in first op implies secondOp as long as the SrcTy
2931 // is an integer.
2932 if (SrcTy->isIntegerTy())
2933 return secondOp;
2934 return 0;
2935 case 7: {
2936 // Disable inttoptr/ptrtoint optimization if enabled.
2937 if (DisableI2pP2iOpt)
2938 return 0;
2939
2940 // Cannot simplify if address spaces are different!
2941 if (SrcTy != DstTy)
2942 return 0;
2943
2944 // Cannot simplify if the intermediate integer size is smaller than the
2945 // pointer size.
2946 unsigned MidSize = MidTy->getScalarSizeInBits();
2947 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2948 return 0;
2949
2950 return Instruction::BitCast;
2951 }
2952 case 8: {
2953 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2954 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2955 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2956 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2957 unsigned DstSize = DstTy->getScalarSizeInBits();
2958 if (SrcTy == DstTy)
2959 return Instruction::BitCast;
2960 if (SrcSize < DstSize)
2961 return firstOp;
2962 if (SrcSize > DstSize)
2963 return secondOp;
2964 return 0;
2965 }
2966 case 9:
2967 // zext, sext -> zext, because sext can't sign extend after zext
2968 return Instruction::ZExt;
2969 case 11: {
2970 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2971 if (!DL)
2972 return 0;
2973 unsigned MidSize = secondOp == Instruction::PtrToAddr
2974 ? DL->getAddressSizeInBits(MidTy)
2975 : DL->getPointerTypeSizeInBits(MidTy);
2976 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2977 unsigned DstSize = DstTy->getScalarSizeInBits();
2978 // If the middle size is smaller than both source and destination,
2979 // an additional masking operation would be required.
2980 if (MidSize < SrcSize && MidSize < DstSize)
2981 return 0;
2982 if (DstSize < SrcSize)
2983 return Instruction::Trunc;
2984 if (DstSize > SrcSize)
2985 return Instruction::ZExt;
2986 return Instruction::BitCast;
2987 }
2988 case 12:
2989 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2990 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2991 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2992 return Instruction::AddrSpaceCast;
2993 return Instruction::BitCast;
2994 case 13:
2995 // FIXME: this state can be merged with (1), but the following assert
2996 // is useful to check the correcteness of the sequence due to semantic
2997 // change of bitcast.
2998 assert(
2999 SrcTy->isPtrOrPtrVectorTy() &&
3000 MidTy->isPtrOrPtrVectorTy() &&
3001 DstTy->isPtrOrPtrVectorTy() &&
3002 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3003 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3004 "Illegal addrspacecast, bitcast sequence!");
3005 // Allowed, use first cast's opcode
3006 return firstOp;
3007 case 14:
3008 // bitcast, addrspacecast -> addrspacecast
3009 return Instruction::AddrSpaceCast;
3010 case 15:
3011 // FIXME: this state can be merged with (1), but the following assert
3012 // is useful to check the correcteness of the sequence due to semantic
3013 // change of bitcast.
3014 assert(
3015 SrcTy->isIntOrIntVectorTy() &&
3016 MidTy->isPtrOrPtrVectorTy() &&
3017 DstTy->isPtrOrPtrVectorTy() &&
3018 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3019 "Illegal inttoptr, bitcast sequence!");
3020 // Allowed, use first cast's opcode
3021 return firstOp;
3022 case 16:
3023 // FIXME: this state can be merged with (2), but the following assert
3024 // is useful to check the correcteness of the sequence due to semantic
3025 // change of bitcast.
3026 assert(
3027 SrcTy->isPtrOrPtrVectorTy() &&
3028 MidTy->isPtrOrPtrVectorTy() &&
3029 DstTy->isIntOrIntVectorTy() &&
3030 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3031 "Illegal bitcast, ptrtoint sequence!");
3032 // Allowed, use second cast's opcode
3033 return secondOp;
3034 case 17:
3035 // (sitofp (zext x)) -> (uitofp x)
3036 return Instruction::UIToFP;
3037 case 99:
3038 // Cast combination can't happen (error in input). This is for all cases
3039 // where the MidTy is not the same for the two cast instructions.
3040 llvm_unreachable("Invalid Cast Combination");
3041 default:
3042 llvm_unreachable("Error in CastResults table!!!");
3043 }
3044}
3045
3047 const Twine &Name, InsertPosition InsertBefore) {
3048 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3049 // Construct and return the appropriate CastInst subclass
3050 switch (op) {
3051 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3052 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3053 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3054 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3055 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3056 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3057 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3058 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3059 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3060 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3061 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3062 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3063 case BitCast:
3064 return new BitCastInst(S, Ty, Name, InsertBefore);
3065 case AddrSpaceCast:
3066 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3067 default:
3068 llvm_unreachable("Invalid opcode provided");
3069 }
3070}
3071
3073 InsertPosition InsertBefore) {
3074 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3075 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3076 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3077}
3078
3080 InsertPosition InsertBefore) {
3081 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3082 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3083 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3084}
3085
3087 InsertPosition InsertBefore) {
3088 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3089 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3090 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3091}
3092
3093/// Create a BitCast or a PtrToInt cast instruction
3095 InsertPosition InsertBefore) {
3096 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3097 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3098 "Invalid cast");
3099 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3100 assert((!Ty->isVectorTy() ||
3101 cast<VectorType>(Ty)->getElementCount() ==
3102 cast<VectorType>(S->getType())->getElementCount()) &&
3103 "Invalid cast");
3104
3105 if (Ty->isIntOrIntVectorTy())
3106 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3107
3108 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3109}
3110
3112 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3113 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3114 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3115
3116 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3117 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3118
3119 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3120}
3121
3123 const Twine &Name,
3124 InsertPosition InsertBefore) {
3125 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3126 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3127 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3128 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3129
3130 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3131}
3132
3134 const Twine &Name,
3135 InsertPosition InsertBefore) {
3136 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3137 "Invalid integer cast");
3138 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3139 unsigned DstBits = Ty->getScalarSizeInBits();
3140 Instruction::CastOps opcode =
3141 (SrcBits == DstBits ? Instruction::BitCast :
3142 (SrcBits > DstBits ? Instruction::Trunc :
3143 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3144 return Create(opcode, C, Ty, Name, InsertBefore);
3145}
3146
3148 InsertPosition InsertBefore) {
3149 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3150 "Invalid cast");
3151 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3152 unsigned DstBits = Ty->getScalarSizeInBits();
3153 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3154 Instruction::CastOps opcode =
3155 (SrcBits == DstBits ? Instruction::BitCast :
3156 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3157 return Create(opcode, C, Ty, Name, InsertBefore);
3158}
3159
3160bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3161 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3162 return false;
3163
3164 if (SrcTy == DestTy)
3165 return true;
3166
3167 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3168 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3169 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3170 // An element by element cast. Valid if casting the elements is valid.
3171 SrcTy = SrcVecTy->getElementType();
3172 DestTy = DestVecTy->getElementType();
3173 }
3174 }
3175 }
3176
3177 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3178 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3179 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3180 }
3181 }
3182
3183 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3184 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3185
3186 // Could still have vectors of pointers if the number of elements doesn't
3187 // match
3188 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3189 return false;
3190
3191 if (SrcBits != DestBits)
3192 return false;
3193
3194 return true;
3195}
3196
3198 const DataLayout &DL) {
3199 // ptrtoint and inttoptr are not allowed on non-integral pointers
3200 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3201 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3202 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3203 !DL.isNonIntegralPointerType(PtrTy));
3204 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3205 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3206 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3207 !DL.isNonIntegralPointerType(PtrTy));
3208
3209 return isBitCastable(SrcTy, DestTy);
3210}
3211
3212// Provide a way to get a "cast" where the cast opcode is inferred from the
3213// types and size of the operand. This, basically, is a parallel of the
3214// logic in the castIsValid function below. This axiom should hold:
3215// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3216// should not assert in castIsValid. In other words, this produces a "correct"
3217// casting opcode for the arguments passed to it.
3220 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3221 Type *SrcTy = Src->getType();
3222
3223 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3224 "Only first class types are castable!");
3225
3226 if (SrcTy == DestTy)
3227 return BitCast;
3228
3229 // FIXME: Check address space sizes here
3230 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3231 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3232 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3233 // An element by element cast. Find the appropriate opcode based on the
3234 // element types.
3235 SrcTy = SrcVecTy->getElementType();
3236 DestTy = DestVecTy->getElementType();
3237 }
3238
3239 // Get the bit sizes, we'll need these
3240 // FIXME: This doesn't work for scalable vector types with different element
3241 // counts that don't call getElementType above.
3242 unsigned SrcBits =
3243 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3244 unsigned DestBits =
3245 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3246
3247 // Run through the possibilities ...
3248 if (DestTy->isIntegerTy()) { // Casting to integral
3249 if (SrcTy->isIntegerTy()) { // Casting from integral
3250 if (DestBits < SrcBits)
3251 return Trunc; // int -> smaller int
3252 else if (DestBits > SrcBits) { // its an extension
3253 if (SrcIsSigned)
3254 return SExt; // signed -> SEXT
3255 else
3256 return ZExt; // unsigned -> ZEXT
3257 } else {
3258 return BitCast; // Same size, No-op cast
3259 }
3260 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3261 if (DestIsSigned)
3262 return FPToSI; // FP -> sint
3263 else
3264 return FPToUI; // FP -> uint
3265 } else if (SrcTy->isVectorTy()) {
3266 assert(DestBits == SrcBits &&
3267 "Casting vector to integer of different width");
3268 return BitCast; // Same size, no-op cast
3269 } else {
3270 assert(SrcTy->isPointerTy() &&
3271 "Casting from a value that is not first-class type");
3272 return PtrToInt; // ptr -> int
3273 }
3274 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3275 if (SrcTy->isIntegerTy()) { // Casting from integral
3276 if (SrcIsSigned)
3277 return SIToFP; // sint -> FP
3278 else
3279 return UIToFP; // uint -> FP
3280 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3281 if (DestBits < SrcBits) {
3282 return FPTrunc; // FP -> smaller FP
3283 } else if (DestBits > SrcBits) {
3284 return FPExt; // FP -> larger FP
3285 } else {
3286 return BitCast; // same size, no-op cast
3287 }
3288 } else if (SrcTy->isVectorTy()) {
3289 assert(DestBits == SrcBits &&
3290 "Casting vector to floating point of different width");
3291 return BitCast; // same size, no-op cast
3292 }
3293 llvm_unreachable("Casting pointer or non-first class to float");
3294 } else if (DestTy->isVectorTy()) {
3295 assert(DestBits == SrcBits &&
3296 "Illegal cast to vector (wrong type or size)");
3297 return BitCast;
3298 } else if (DestTy->isPointerTy()) {
3299 if (SrcTy->isPointerTy()) {
3300 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3301 return AddrSpaceCast;
3302 return BitCast; // ptr -> ptr
3303 } else if (SrcTy->isIntegerTy()) {
3304 return IntToPtr; // int -> ptr
3305 }
3306 llvm_unreachable("Casting pointer to other than pointer or int");
3307 }
3308 llvm_unreachable("Casting to type that is not first-class");
3309}
3310
3311//===----------------------------------------------------------------------===//
3312// CastInst SubClass Constructors
3313//===----------------------------------------------------------------------===//
3314
3315/// Check that the construction parameters for a CastInst are correct. This
3316/// could be broken out into the separate constructors but it is useful to have
3317/// it in one place and to eliminate the redundant code for getting the sizes
3318/// of the types involved.
3319bool
3321 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3322 SrcTy->isAggregateType() || DstTy->isAggregateType())
3323 return false;
3324
3325 // Get the size of the types in bits, and whether we are dealing
3326 // with vector types, we'll need this later.
3327 bool SrcIsVec = isa<VectorType>(SrcTy);
3328 bool DstIsVec = isa<VectorType>(DstTy);
3329 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3330 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3331
3332 // If these are vector types, get the lengths of the vectors (using zero for
3333 // scalar types means that checking that vector lengths match also checks that
3334 // scalars are not being converted to vectors or vectors to scalars).
3335 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3337 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3339
3340 // Switch on the opcode provided
3341 switch (op) {
3342 default: return false; // This is an input error
3343 case Instruction::Trunc:
3344 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3345 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3346 case Instruction::ZExt:
3347 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3348 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3349 case Instruction::SExt:
3350 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3351 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3352 case Instruction::FPTrunc:
3353 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3354 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3355 case Instruction::FPExt:
3356 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3357 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3358 case Instruction::UIToFP:
3359 case Instruction::SIToFP:
3360 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3361 SrcEC == DstEC;
3362 case Instruction::FPToUI:
3363 case Instruction::FPToSI:
3364 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3365 SrcEC == DstEC;
3366 case Instruction::PtrToAddr:
3367 case Instruction::PtrToInt:
3368 if (SrcEC != DstEC)
3369 return false;
3370 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3371 case Instruction::IntToPtr:
3372 if (SrcEC != DstEC)
3373 return false;
3374 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3375 case Instruction::BitCast: {
3376 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3377 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3378
3379 // BitCast implies a no-op cast of type only. No bits change.
3380 // However, you can't cast pointers to anything but pointers.
3381 if (!SrcPtrTy != !DstPtrTy)
3382 return false;
3383
3384 // For non-pointer cases, the cast is okay if the source and destination bit
3385 // widths are identical.
3386 if (!SrcPtrTy)
3387 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3388
3389 // If both are pointers then the address spaces must match.
3390 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3391 return false;
3392
3393 // A vector of pointers must have the same number of elements.
3394 if (SrcIsVec && DstIsVec)
3395 return SrcEC == DstEC;
3396 if (SrcIsVec)
3397 return SrcEC == ElementCount::getFixed(1);
3398 if (DstIsVec)
3399 return DstEC == ElementCount::getFixed(1);
3400
3401 return true;
3402 }
3403 case Instruction::AddrSpaceCast: {
3404 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3405 if (!SrcPtrTy)
3406 return false;
3407
3408 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3409 if (!DstPtrTy)
3410 return false;
3411
3412 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3413 return false;
3414
3415 return SrcEC == DstEC;
3416 }
3417 }
3418}
3419
3421 InsertPosition InsertBefore)
3422 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3423 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3424}
3425
3426ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3427 InsertPosition InsertBefore)
3428 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3429 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3430}
3431
3432SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3433 InsertPosition InsertBefore)
3434 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3435 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3436}
3437
3439 InsertPosition InsertBefore)
3440 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3441 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3442}
3443
3445 InsertPosition InsertBefore)
3446 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3447 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3448}
3449
3451 InsertPosition InsertBefore)
3452 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3453 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3454}
3455
3457 InsertPosition InsertBefore)
3458 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3459 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3460}
3461
3463 InsertPosition InsertBefore)
3464 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3465 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3466}
3467
3469 InsertPosition InsertBefore)
3470 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3471 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3472}
3473
3475 InsertPosition InsertBefore)
3476 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3477 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3478}
3479
3481 InsertPosition InsertBefore)
3482 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3483 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3484}
3485
3487 InsertPosition InsertBefore)
3488 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3489 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3490}
3491
3493 InsertPosition InsertBefore)
3494 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3495 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3496}
3497
3499 InsertPosition InsertBefore)
3500 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3501 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3502}
3503
3504//===----------------------------------------------------------------------===//
3505// CmpInst Classes
3506//===----------------------------------------------------------------------===//
3507
3509 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3510 Instruction *FlagsSource)
3511 : Instruction(ty, op, AllocMarker, InsertBefore) {
3512 Op<0>() = LHS;
3513 Op<1>() = RHS;
3514 setPredicate(predicate);
3515 setName(Name);
3516 if (FlagsSource)
3517 copyIRFlags(FlagsSource);
3518}
3519
3521 const Twine &Name, InsertPosition InsertBefore) {
3522 if (Op == Instruction::ICmp) {
3523 if (InsertBefore.isValid())
3524 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3525 S1, S2, Name);
3526 else
3527 return new ICmpInst(CmpInst::Predicate(predicate),
3528 S1, S2, Name);
3529 }
3530
3531 if (InsertBefore.isValid())
3532 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3533 S1, S2, Name);
3534 else
3535 return new FCmpInst(CmpInst::Predicate(predicate),
3536 S1, S2, Name);
3537}
3538
3540 Value *S2,
3541 const Instruction *FlagsSource,
3542 const Twine &Name,
3543 InsertPosition InsertBefore) {
3544 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3545 Inst->copyIRFlags(FlagsSource);
3546 return Inst;
3547}
3548
3550 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3551 IC->swapOperands();
3552 else
3553 cast<FCmpInst>(this)->swapOperands();
3554}
3555
3557 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3558 return IC->isCommutative();
3559 return cast<FCmpInst>(this)->isCommutative();
3560}
3561
3564 return ICmpInst::isEquality(P);
3566 return FCmpInst::isEquality(P);
3567 llvm_unreachable("Unsupported predicate kind");
3568}
3569
3570// Returns true if either operand of CmpInst is a provably non-zero
3571// floating-point constant.
3572static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3573 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3574 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3575 if (auto *Const = LHS ? LHS : RHS) {
3576 using namespace llvm::PatternMatch;
3577 return match(Const, m_NonZeroNotDenormalFP());
3578 }
3579 return false;
3580}
3581
3582// Floating-point equality is not an equivalence when comparing +0.0 with
3583// -0.0, when comparing NaN with another value, or when flushing
3584// denormals-to-zero.
3585bool CmpInst::isEquivalence(bool Invert) const {
3586 switch (Invert ? getInversePredicate() : getPredicate()) {
3588 return true;
3590 if (!hasNoNaNs())
3591 return false;
3592 [[fallthrough]];
3594 return hasNonZeroFPOperands(this);
3595 default:
3596 return false;
3597 }
3598}
3599
3601 switch (pred) {
3602 default: llvm_unreachable("Unknown cmp predicate!");
3603 case ICMP_EQ: return ICMP_NE;
3604 case ICMP_NE: return ICMP_EQ;
3605 case ICMP_UGT: return ICMP_ULE;
3606 case ICMP_ULT: return ICMP_UGE;
3607 case ICMP_UGE: return ICMP_ULT;
3608 case ICMP_ULE: return ICMP_UGT;
3609 case ICMP_SGT: return ICMP_SLE;
3610 case ICMP_SLT: return ICMP_SGE;
3611 case ICMP_SGE: return ICMP_SLT;
3612 case ICMP_SLE: return ICMP_SGT;
3613
3614 case FCMP_OEQ: return FCMP_UNE;
3615 case FCMP_ONE: return FCMP_UEQ;
3616 case FCMP_OGT: return FCMP_ULE;
3617 case FCMP_OLT: return FCMP_UGE;
3618 case FCMP_OGE: return FCMP_ULT;
3619 case FCMP_OLE: return FCMP_UGT;
3620 case FCMP_UEQ: return FCMP_ONE;
3621 case FCMP_UNE: return FCMP_OEQ;
3622 case FCMP_UGT: return FCMP_OLE;
3623 case FCMP_ULT: return FCMP_OGE;
3624 case FCMP_UGE: return FCMP_OLT;
3625 case FCMP_ULE: return FCMP_OGT;
3626 case FCMP_ORD: return FCMP_UNO;
3627 case FCMP_UNO: return FCMP_ORD;
3628 case FCMP_TRUE: return FCMP_FALSE;
3629 case FCMP_FALSE: return FCMP_TRUE;
3630 }
3631}
3632
3634 switch (Pred) {
3635 default: return "unknown";
3636 case FCmpInst::FCMP_FALSE: return "false";
3637 case FCmpInst::FCMP_OEQ: return "oeq";
3638 case FCmpInst::FCMP_OGT: return "ogt";
3639 case FCmpInst::FCMP_OGE: return "oge";
3640 case FCmpInst::FCMP_OLT: return "olt";
3641 case FCmpInst::FCMP_OLE: return "ole";
3642 case FCmpInst::FCMP_ONE: return "one";
3643 case FCmpInst::FCMP_ORD: return "ord";
3644 case FCmpInst::FCMP_UNO: return "uno";
3645 case FCmpInst::FCMP_UEQ: return "ueq";
3646 case FCmpInst::FCMP_UGT: return "ugt";
3647 case FCmpInst::FCMP_UGE: return "uge";
3648 case FCmpInst::FCMP_ULT: return "ult";
3649 case FCmpInst::FCMP_ULE: return "ule";
3650 case FCmpInst::FCMP_UNE: return "une";
3651 case FCmpInst::FCMP_TRUE: return "true";
3652 case ICmpInst::ICMP_EQ: return "eq";
3653 case ICmpInst::ICMP_NE: return "ne";
3654 case ICmpInst::ICMP_SGT: return "sgt";
3655 case ICmpInst::ICMP_SGE: return "sge";
3656 case ICmpInst::ICMP_SLT: return "slt";
3657 case ICmpInst::ICMP_SLE: return "sle";
3658 case ICmpInst::ICMP_UGT: return "ugt";
3659 case ICmpInst::ICMP_UGE: return "uge";
3660 case ICmpInst::ICMP_ULT: return "ult";
3661 case ICmpInst::ICMP_ULE: return "ule";
3662 }
3663}
3664
3666 OS << CmpInst::getPredicateName(Pred);
3667 return OS;
3668}
3669
3671 switch (pred) {
3672 default: llvm_unreachable("Unknown icmp predicate!");
3673 case ICMP_EQ: case ICMP_NE:
3674 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3675 return pred;
3676 case ICMP_UGT: return ICMP_SGT;
3677 case ICMP_ULT: return ICMP_SLT;
3678 case ICMP_UGE: return ICMP_SGE;
3679 case ICMP_ULE: return ICMP_SLE;
3680 }
3681}
3682
3684 switch (pred) {
3685 default: llvm_unreachable("Unknown icmp predicate!");
3686 case ICMP_EQ: case ICMP_NE:
3687 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3688 return pred;
3689 case ICMP_SGT: return ICMP_UGT;
3690 case ICMP_SLT: return ICMP_ULT;
3691 case ICMP_SGE: return ICMP_UGE;
3692 case ICMP_SLE: return ICMP_ULE;
3693 }
3694}
3695
3697 switch (pred) {
3698 default: llvm_unreachable("Unknown cmp predicate!");
3699 case ICMP_EQ: case ICMP_NE:
3700 return pred;
3701 case ICMP_SGT: return ICMP_SLT;
3702 case ICMP_SLT: return ICMP_SGT;
3703 case ICMP_SGE: return ICMP_SLE;
3704 case ICMP_SLE: return ICMP_SGE;
3705 case ICMP_UGT: return ICMP_ULT;
3706 case ICMP_ULT: return ICMP_UGT;
3707 case ICMP_UGE: return ICMP_ULE;
3708 case ICMP_ULE: return ICMP_UGE;
3709
3710 case FCMP_FALSE: case FCMP_TRUE:
3711 case FCMP_OEQ: case FCMP_ONE:
3712 case FCMP_UEQ: case FCMP_UNE:
3713 case FCMP_ORD: case FCMP_UNO:
3714 return pred;
3715 case FCMP_OGT: return FCMP_OLT;
3716 case FCMP_OLT: return FCMP_OGT;
3717 case FCMP_OGE: return FCMP_OLE;
3718 case FCMP_OLE: return FCMP_OGE;
3719 case FCMP_UGT: return FCMP_ULT;
3720 case FCMP_ULT: return FCMP_UGT;
3721 case FCMP_UGE: return FCMP_ULE;
3722 case FCMP_ULE: return FCMP_UGE;
3723 }
3724}
3725
3727 switch (pred) {
3728 case ICMP_SGE:
3729 case ICMP_SLE:
3730 case ICMP_UGE:
3731 case ICMP_ULE:
3732 case FCMP_OGE:
3733 case FCMP_OLE:
3734 case FCMP_UGE:
3735 case FCMP_ULE:
3736 return true;
3737 default:
3738 return false;
3739 }
3740}
3741
3743 switch (pred) {
3744 case ICMP_SGT:
3745 case ICMP_SLT:
3746 case ICMP_UGT:
3747 case ICMP_ULT:
3748 case FCMP_OGT:
3749 case FCMP_OLT:
3750 case FCMP_UGT:
3751 case FCMP_ULT:
3752 return true;
3753 default:
3754 return false;
3755 }
3756}
3757
3759 switch (pred) {
3760 case ICMP_SGE:
3761 return ICMP_SGT;
3762 case ICMP_SLE:
3763 return ICMP_SLT;
3764 case ICMP_UGE:
3765 return ICMP_UGT;
3766 case ICMP_ULE:
3767 return ICMP_ULT;
3768 case FCMP_OGE:
3769 return FCMP_OGT;
3770 case FCMP_OLE:
3771 return FCMP_OLT;
3772 case FCMP_UGE:
3773 return FCMP_UGT;
3774 case FCMP_ULE:
3775 return FCMP_ULT;
3776 default:
3777 return pred;
3778 }
3779}
3780
3782 switch (pred) {
3783 case ICMP_SGT:
3784 return ICMP_SGE;
3785 case ICMP_SLT:
3786 return ICMP_SLE;
3787 case ICMP_UGT:
3788 return ICMP_UGE;
3789 case ICMP_ULT:
3790 return ICMP_ULE;
3791 case FCMP_OGT:
3792 return FCMP_OGE;
3793 case FCMP_OLT:
3794 return FCMP_OLE;
3795 case FCMP_UGT:
3796 return FCMP_UGE;
3797 case FCMP_ULT:
3798 return FCMP_ULE;
3799 default:
3800 return pred;
3801 }
3802}
3803
3805 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3806
3807 if (isStrictPredicate(pred))
3808 return getNonStrictPredicate(pred);
3809 if (isNonStrictPredicate(pred))
3810 return getStrictPredicate(pred);
3811
3812 llvm_unreachable("Unknown predicate!");
3813}
3814
3816 switch (predicate) {
3817 default: return false;
3819 case ICmpInst::ICMP_UGE: return true;
3820 }
3821}
3822
3824 switch (predicate) {
3825 default: return false;
3827 case ICmpInst::ICMP_SGE: return true;
3828 }
3829}
3830
3831bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3832 ICmpInst::Predicate Pred) {
3833 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3834 switch (Pred) {
3836 return LHS.eq(RHS);
3838 return LHS.ne(RHS);
3840 return LHS.ugt(RHS);
3842 return LHS.uge(RHS);
3844 return LHS.ult(RHS);
3846 return LHS.ule(RHS);
3848 return LHS.sgt(RHS);
3850 return LHS.sge(RHS);
3852 return LHS.slt(RHS);
3854 return LHS.sle(RHS);
3855 default:
3856 llvm_unreachable("Unexpected non-integer predicate.");
3857 };
3858}
3859
3860bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3861 FCmpInst::Predicate Pred) {
3862 APFloat::cmpResult R = LHS.compare(RHS);
3863 switch (Pred) {
3864 default:
3865 llvm_unreachable("Invalid FCmp Predicate");
3867 return false;
3869 return true;
3870 case FCmpInst::FCMP_UNO:
3871 return R == APFloat::cmpUnordered;
3872 case FCmpInst::FCMP_ORD:
3873 return R != APFloat::cmpUnordered;
3874 case FCmpInst::FCMP_UEQ:
3875 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3876 case FCmpInst::FCMP_OEQ:
3877 return R == APFloat::cmpEqual;
3878 case FCmpInst::FCMP_UNE:
3879 return R != APFloat::cmpEqual;
3880 case FCmpInst::FCMP_ONE:
3882 case FCmpInst::FCMP_ULT:
3883 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3884 case FCmpInst::FCMP_OLT:
3885 return R == APFloat::cmpLessThan;
3886 case FCmpInst::FCMP_UGT:
3888 case FCmpInst::FCMP_OGT:
3889 return R == APFloat::cmpGreaterThan;
3890 case FCmpInst::FCMP_ULE:
3891 return R != APFloat::cmpGreaterThan;
3892 case FCmpInst::FCMP_OLE:
3893 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3894 case FCmpInst::FCMP_UGE:
3895 return R != APFloat::cmpLessThan;
3896 case FCmpInst::FCMP_OGE:
3897 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3898 }
3899}
3900
3901std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3902 const KnownBits &RHS,
3903 ICmpInst::Predicate Pred) {
3904 switch (Pred) {
3905 case ICmpInst::ICMP_EQ:
3906 return KnownBits::eq(LHS, RHS);
3907 case ICmpInst::ICMP_NE:
3908 return KnownBits::ne(LHS, RHS);
3909 case ICmpInst::ICMP_UGE:
3910 return KnownBits::uge(LHS, RHS);
3911 case ICmpInst::ICMP_UGT:
3912 return KnownBits::ugt(LHS, RHS);
3913 case ICmpInst::ICMP_ULE:
3914 return KnownBits::ule(LHS, RHS);
3915 case ICmpInst::ICMP_ULT:
3916 return KnownBits::ult(LHS, RHS);
3917 case ICmpInst::ICMP_SGE:
3918 return KnownBits::sge(LHS, RHS);
3919 case ICmpInst::ICMP_SGT:
3920 return KnownBits::sgt(LHS, RHS);
3921 case ICmpInst::ICMP_SLE:
3922 return KnownBits::sle(LHS, RHS);
3923 case ICmpInst::ICMP_SLT:
3924 return KnownBits::slt(LHS, RHS);
3925 default:
3926 llvm_unreachable("Unexpected non-integer predicate.");
3927 }
3928}
3929
3931 if (CmpInst::isEquality(pred))
3932 return pred;
3933 if (isSigned(pred))
3934 return getUnsignedPredicate(pred);
3935 if (isUnsigned(pred))
3936 return getSignedPredicate(pred);
3937
3938 llvm_unreachable("Unknown predicate!");
3939}
3940
3942 switch (predicate) {
3943 default: return false;
3946 case FCmpInst::FCMP_ORD: return true;
3947 }
3948}
3949
3951 switch (predicate) {
3952 default: return false;
3955 case FCmpInst::FCMP_UNO: return true;
3956 }
3957}
3958
3960 switch(predicate) {
3961 default: return false;
3962 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3963 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3964 }
3965}
3966
3968 switch(predicate) {
3969 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3970 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3971 default: return false;
3972 }
3973}
3974
3976 // If the predicates match, then we know the first condition implies the
3977 // second is true.
3978 if (CmpPredicate::getMatching(Pred1, Pred2))
3979 return true;
3980
3981 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3983 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3985
3986 switch (Pred1) {
3987 default:
3988 break;
3989 case CmpInst::ICMP_EQ:
3990 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3991 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3992 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3993 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3994 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3995 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3996 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3997 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3998 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3999 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4000 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
4001 }
4002 return false;
4003}
4004
4006 CmpPredicate Pred2) {
4007 return isImpliedTrueByMatchingCmp(Pred1,
4009}
4010
4012 CmpPredicate Pred2) {
4013 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4014 return true;
4015 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4016 return false;
4017 return std::nullopt;
4018}
4019
4020//===----------------------------------------------------------------------===//
4021// CmpPredicate Implementation
4022//===----------------------------------------------------------------------===//
4023
4024std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4025 CmpPredicate B) {
4026 if (A.Pred == B.Pred)
4027 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4029 return {};
4030 if (A.HasSameSign &&
4032 return B.Pred;
4033 if (B.HasSameSign &&
4035 return A.Pred;
4036 return {};
4037}
4038
4042
4044 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4045 return ICI->getCmpPredicate();
4046 return Cmp->getPredicate();
4047}
4048
4052
4054 return getSwapped(get(Cmp));
4055}
4056
4057//===----------------------------------------------------------------------===//
4058// SwitchInst Implementation
4059//===----------------------------------------------------------------------===//
4060
4061void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4062 assert(Value && Default && NumReserved);
4063 ReservedSpace = NumReserved;
4065 allocHungoffUses(ReservedSpace);
4066
4067 Op<0>() = Value;
4068 Op<1>() = Default;
4069}
4070
4071/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4072/// switch on and a default destination. The number of additional cases can
4073/// be specified here to make memory allocation more efficient. This
4074/// constructor can also autoinsert before another instruction.
4075SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4076 InsertPosition InsertBefore)
4077 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4078 AllocMarker, InsertBefore) {
4079 init(Value, Default, 2+NumCases*2);
4080}
4081
4082SwitchInst::SwitchInst(const SwitchInst &SI)
4083 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4084 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4085 setNumHungOffUseOperands(SI.getNumOperands());
4086 Use *OL = getOperandList();
4087 const Use *InOL = SI.getOperandList();
4088 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4089 OL[i] = InOL[i];
4090 OL[i+1] = InOL[i+1];
4091 }
4092 SubclassOptionalData = SI.SubclassOptionalData;
4093}
4094
4095/// addCase - Add an entry to the switch instruction...
4096///
4098 unsigned NewCaseIdx = getNumCases();
4099 unsigned OpNo = getNumOperands();
4100 if (OpNo+2 > ReservedSpace)
4101 growOperands(); // Get more space!
4102 // Initialize some new operands.
4103 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4105 CaseHandle Case(this, NewCaseIdx);
4106 Case.setValue(OnVal);
4107 Case.setSuccessor(Dest);
4108}
4109
4110/// removeCase - This method removes the specified case and its successor
4111/// from the switch instruction.
4113 unsigned idx = I->getCaseIndex();
4114
4115 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4116
4117 unsigned NumOps = getNumOperands();
4118 Use *OL = getOperandList();
4119
4120 // Overwrite this case with the end of the list.
4121 if (2 + (idx + 1) * 2 != NumOps) {
4122 OL[2 + idx * 2] = OL[NumOps - 2];
4123 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4124 }
4125
4126 // Nuke the last value.
4127 OL[NumOps-2].set(nullptr);
4128 OL[NumOps-2+1].set(nullptr);
4130
4131 return CaseIt(this, idx);
4132}
4133
4134/// growOperands - grow operands - This grows the operand list in response
4135/// to a push_back style of operation. This grows the number of ops by 3 times.
4136///
4137void SwitchInst::growOperands() {
4138 unsigned e = getNumOperands();
4139 unsigned NumOps = e*3;
4140
4141 ReservedSpace = NumOps;
4142 growHungoffUses(ReservedSpace);
4143}
4144
4146 MDNode *ProfileData = getBranchWeightMDNode(SI);
4147 if (!ProfileData)
4148 return;
4149
4150 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4151 llvm_unreachable("number of prof branch_weights metadata operands does "
4152 "not correspond to number of succesors");
4153 }
4154
4156 if (!extractBranchWeights(ProfileData, Weights))
4157 return;
4158 this->Weights = std::move(Weights);
4159}
4160
4163 if (Weights) {
4164 assert(SI.getNumSuccessors() == Weights->size() &&
4165 "num of prof branch_weights must accord with num of successors");
4166 Changed = true;
4167 // Copy the last case to the place of the removed one and shrink.
4168 // This is tightly coupled with the way SwitchInst::removeCase() removes
4169 // the cases in SwitchInst::removeCase(CaseIt).
4170 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4171 Weights->pop_back();
4172 }
4173 return SI.removeCase(I);
4174}
4175
4177 auto *DestBlock = I->getCaseSuccessor();
4178 if (Weights) {
4179 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4180 (*Weights)[0] = Weight.value();
4181 }
4182
4183 SI.setDefaultDest(DestBlock);
4184}
4185
4187 ConstantInt *OnVal, BasicBlock *Dest,
4189 SI.addCase(OnVal, Dest);
4190
4191 if (!Weights && W && *W) {
4192 Changed = true;
4193 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4194 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4195 } else if (Weights) {
4196 Changed = true;
4197 Weights->push_back(W.value_or(0));
4198 }
4199 if (Weights)
4200 assert(SI.getNumSuccessors() == Weights->size() &&
4201 "num of prof branch_weights must accord with num of successors");
4202}
4203
4206 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4207 Changed = false;
4208 if (Weights)
4209 Weights->resize(0);
4210 return SI.eraseFromParent();
4211}
4212
4215 if (!Weights)
4216 return std::nullopt;
4217 return (*Weights)[idx];
4218}
4219
4222 if (!W)
4223 return;
4224
4225 if (!Weights && *W)
4226 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4227
4228 if (Weights) {
4229 auto &OldW = (*Weights)[idx];
4230 if (*W != OldW) {
4231 Changed = true;
4232 OldW = *W;
4233 }
4234 }
4235}
4236
4239 unsigned idx) {
4240 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4241 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4242 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4243 ->getValue()
4244 .getZExtValue();
4245
4246 return std::nullopt;
4247}
4248
4249//===----------------------------------------------------------------------===//
4250// IndirectBrInst Implementation
4251//===----------------------------------------------------------------------===//
4252
4253void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4254 assert(Address && Address->getType()->isPointerTy() &&
4255 "Address of indirectbr must be a pointer");
4256 ReservedSpace = 1+NumDests;
4258 allocHungoffUses(ReservedSpace);
4259
4260 Op<0>() = Address;
4261}
4262
4263
4264/// growOperands - grow operands - This grows the operand list in response
4265/// to a push_back style of operation. This grows the number of ops by 2 times.
4266///
4267void IndirectBrInst::growOperands() {
4268 unsigned e = getNumOperands();
4269 unsigned NumOps = e*2;
4270
4271 ReservedSpace = NumOps;
4272 growHungoffUses(ReservedSpace);
4273}
4274
4275IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4276 InsertPosition InsertBefore)
4277 : Instruction(Type::getVoidTy(Address->getContext()),
4278 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4279 init(Address, NumCases);
4280}
4281
4282IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4283 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4284 AllocMarker) {
4285 NumUserOperands = IBI.NumUserOperands;
4286 allocHungoffUses(IBI.getNumOperands());
4287 Use *OL = getOperandList();
4288 const Use *InOL = IBI.getOperandList();
4289 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4290 OL[i] = InOL[i];
4291 SubclassOptionalData = IBI.SubclassOptionalData;
4292}
4293
4294/// addDestination - Add a destination.
4295///
4297 unsigned OpNo = getNumOperands();
4298 if (OpNo+1 > ReservedSpace)
4299 growOperands(); // Get more space!
4300 // Initialize some new operands.
4301 assert(OpNo < ReservedSpace && "Growing didn't work!");
4303 getOperandList()[OpNo] = DestBB;
4304}
4305
4306/// removeDestination - This method removes the specified successor from the
4307/// indirectbr instruction.
4309 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4310
4311 unsigned NumOps = getNumOperands();
4312 Use *OL = getOperandList();
4313
4314 // Replace this value with the last one.
4315 OL[idx+1] = OL[NumOps-1];
4316
4317 // Nuke the last value.
4318 OL[NumOps-1].set(nullptr);
4320}
4321
4322//===----------------------------------------------------------------------===//
4323// FreezeInst Implementation
4324//===----------------------------------------------------------------------===//
4325
4326FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4327 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4328 setName(Name);
4329}
4330
4331//===----------------------------------------------------------------------===//
4332// cloneImpl() implementations
4333//===----------------------------------------------------------------------===//
4334
4335// Define these methods here so vtables don't get emitted into every translation
4336// unit that uses these classes.
4337
4338GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4340 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4341}
4342
4346
4350
4352 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4353}
4354
4356 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4357}
4358
4359ExtractValueInst *ExtractValueInst::cloneImpl() const {
4360 return new ExtractValueInst(*this);
4361}
4362
4363InsertValueInst *InsertValueInst::cloneImpl() const {
4364 return new InsertValueInst(*this);
4365}
4366
4369 getOperand(0), getAlign());
4370 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4371 Result->setSwiftError(isSwiftError());
4372 return Result;
4373}
4374
4376 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4378}
4379
4384
4389 Result->setVolatile(isVolatile());
4390 Result->setWeak(isWeak());
4391 return Result;
4392}
4393
4395 AtomicRMWInst *Result =
4398 Result->setVolatile(isVolatile());
4399 return Result;
4400}
4401
4405
4407 return new TruncInst(getOperand(0), getType());
4408}
4409
4411 return new ZExtInst(getOperand(0), getType());
4412}
4413
4415 return new SExtInst(getOperand(0), getType());
4416}
4417
4419 return new FPTruncInst(getOperand(0), getType());
4420}
4421
4423 return new FPExtInst(getOperand(0), getType());
4424}
4425
4427 return new UIToFPInst(getOperand(0), getType());
4428}
4429
4431 return new SIToFPInst(getOperand(0), getType());
4432}
4433
4435 return new FPToUIInst(getOperand(0), getType());
4436}
4437
4439 return new FPToSIInst(getOperand(0), getType());
4440}
4441
4443 return new PtrToIntInst(getOperand(0), getType());
4444}
4445
4449
4451 return new IntToPtrInst(getOperand(0), getType());
4452}
4453
4455 return new BitCastInst(getOperand(0), getType());
4456}
4457
4461
4462CallInst *CallInst::cloneImpl() const {
4463 if (hasOperandBundles()) {
4467 return new (AllocMarker) CallInst(*this, AllocMarker);
4468 }
4470 return new (AllocMarker) CallInst(*this, AllocMarker);
4471}
4472
4473SelectInst *SelectInst::cloneImpl() const {
4475}
4476
4478 return new VAArgInst(getOperand(0), getType());
4479}
4480
4481ExtractElementInst *ExtractElementInst::cloneImpl() const {
4483}
4484
4485InsertElementInst *InsertElementInst::cloneImpl() const {
4487}
4488
4492
4493PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4494
4495LandingPadInst *LandingPadInst::cloneImpl() const {
4496 return new LandingPadInst(*this);
4497}
4498
4499ReturnInst *ReturnInst::cloneImpl() const {
4501 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4502}
4503
4504BranchInst *BranchInst::cloneImpl() const {
4506 return new (AllocMarker) BranchInst(*this, AllocMarker);
4507}
4508
4509SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4510
4511IndirectBrInst *IndirectBrInst::cloneImpl() const {
4512 return new IndirectBrInst(*this);
4513}
4514
4515InvokeInst *InvokeInst::cloneImpl() const {
4516 if (hasOperandBundles()) {
4520 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4521 }
4523 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4524}
4525
4526CallBrInst *CallBrInst::cloneImpl() const {
4527 if (hasOperandBundles()) {
4531 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4532 }
4534 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4535}
4536
4537ResumeInst *ResumeInst::cloneImpl() const {
4538 return new (AllocMarker) ResumeInst(*this);
4539}
4540
4541CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4543 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4544}
4545
4546CatchReturnInst *CatchReturnInst::cloneImpl() const {
4547 return new (AllocMarker) CatchReturnInst(*this);
4548}
4549
4550CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4551 return new CatchSwitchInst(*this);
4552}
4553
4554FuncletPadInst *FuncletPadInst::cloneImpl() const {
4556 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4557}
4558
4560 LLVMContext &Context = getContext();
4561 return new UnreachableInst(Context);
4562}
4563
4564bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4565 bool NoTrapAfterNoreturn) const {
4566 if (!TrapUnreachable)
4567 return false;
4568
4569 // We may be able to ignore unreachable behind a noreturn call.
4571 Call && Call->doesNotReturn()) {
4572 if (NoTrapAfterNoreturn)
4573 return false;
4574 // Do not emit an additional trap instruction.
4575 if (Call->isNonContinuableTrap())
4576 return false;
4577 }
4578
4579 if (getFunction()->hasFnAttribute(Attribute::Naked))
4580 return false;
4581
4582 return true;
4583}
4584
4586 return new FreezeInst(getOperand(0));
4587}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
static bool isSigned(unsigned int Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
BasicBlock ** block_iterator
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:279
const Use * getOperandList() const
Definition User.h:225
op_range operands()
Definition User.h:292
op_iterator op_begin()
Definition User.h:284
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:265
Use & Op()
Definition User.h:196
LLVM_ABI void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition User.cpp:70
LLVM_ABI void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:53
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
size_type size() const
Definition DenseSet.h:87
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:360
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:354
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1835
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2108
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66