LLVM 17.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/InstrTypes.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/IR/MDBuilder.h"
31#include "llvm/IR/Metadata.h"
32#include "llvm/IR/Module.h"
33#include "llvm/IR/Operator.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Value.h"
41#include "llvm/Support/ModRef.h"
43#include <algorithm>
44#include <cassert>
45#include <cstdint>
46#include <optional>
47#include <vector>
48
49using namespace llvm;
50
52 "disable-i2p-p2i-opt", cl::init(false),
53 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59std::optional<TypeSize>
61 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62 if (isArrayAllocation()) {
63 auto *C = dyn_cast<ConstantInt>(getArraySize());
64 if (!C)
65 return std::nullopt;
66 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67 Size *= C->getZExtValue();
68 }
69 return Size;
70}
71
72std::optional<TypeSize>
74 std::optional<TypeSize> Size = getAllocationSize(DL);
75 if (Size)
76 return *Size * 8;
77 return std::nullopt;
78}
79
80//===----------------------------------------------------------------------===//
81// SelectInst Class
82//===----------------------------------------------------------------------===//
83
84/// areInvalidOperands - Return a string if the specified operands are invalid
85/// for a select operation, otherwise return null.
86const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87 if (Op1->getType() != Op2->getType())
88 return "both values to select must have same type";
89
90 if (Op1->getType()->isTokenTy())
91 return "select values cannot have token type";
92
93 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94 // Vector select.
95 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96 return "vector select condition element type must be i1";
97 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98 if (!ET)
99 return "selected values for vector select must be vectors";
100 if (ET->getElementCount() != VT->getElementCount())
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
103 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104 return "select condition must be i1 or <n x i1>";
105 }
106 return nullptr;
107}
108
109//===----------------------------------------------------------------------===//
110// PHINode Class
111//===----------------------------------------------------------------------===//
112
113PHINode::PHINode(const PHINode &PN)
114 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115 ReservedSpace(PN.getNumOperands()) {
117 std::copy(PN.op_begin(), PN.op_end(), op_begin());
118 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
120}
121
122// removeIncomingValue - Remove an incoming value. This is useful if a
123// predecessor basic block is deleted.
124Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125 Value *Removed = getIncomingValue(Idx);
126
127 // Move everything after this operand down.
128 //
129 // FIXME: we could just swap with the end of the list, then erase. However,
130 // clients might not expect this to happen. The code as it is thrashes the
131 // use/def lists, which is kinda lame.
132 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
134
135 // Nuke the last value.
136 Op<-1>().set(nullptr);
138
139 // If the PHI node is dead, because it has zero entries, nuke it now.
140 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141 // If anyone is using this PHI, make them use a dummy value instead...
144 }
145 return Removed;
146}
147
148/// growOperands - grow operands - This grows the operand list in response
149/// to a push_back style of operation. This grows the number of ops by 1.5
150/// times.
151///
152void PHINode::growOperands() {
153 unsigned e = getNumOperands();
154 unsigned NumOps = e + e / 2;
155 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
156
157 ReservedSpace = NumOps;
158 growHungoffUses(ReservedSpace, /* IsPhi */ true);
159}
160
161/// hasConstantValue - If the specified PHI node always merges together the same
162/// value, return the value, otherwise return null.
164 // Exploit the fact that phi nodes always have at least one entry.
165 Value *ConstantValue = getIncomingValue(0);
166 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
167 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
168 if (ConstantValue != this)
169 return nullptr; // Incoming values not all the same.
170 // The case where the first value is this PHI.
171 ConstantValue = getIncomingValue(i);
172 }
173 if (ConstantValue == this)
174 return UndefValue::get(getType());
175 return ConstantValue;
176}
177
178/// hasConstantOrUndefValue - Whether the specified PHI node always merges
179/// together the same value, assuming that undefs result in the same value as
180/// non-undefs.
181/// Unlike \ref hasConstantValue, this does not return a value because the
182/// unique non-undef incoming value need not dominate the PHI node.
184 Value *ConstantValue = nullptr;
185 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
186 Value *Incoming = getIncomingValue(i);
187 if (Incoming != this && !isa<UndefValue>(Incoming)) {
188 if (ConstantValue && ConstantValue != Incoming)
189 return false;
190 ConstantValue = Incoming;
191 }
192 }
193 return true;
194}
195
196//===----------------------------------------------------------------------===//
197// LandingPadInst Implementation
198//===----------------------------------------------------------------------===//
199
200LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
201 const Twine &NameStr, Instruction *InsertBefore)
202 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
203 init(NumReservedValues, NameStr);
204}
205
206LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
207 const Twine &NameStr, BasicBlock *InsertAtEnd)
208 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
209 init(NumReservedValues, NameStr);
210}
211
212LandingPadInst::LandingPadInst(const LandingPadInst &LP)
213 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
214 LP.getNumOperands()),
215 ReservedSpace(LP.getNumOperands()) {
217 Use *OL = getOperandList();
218 const Use *InOL = LP.getOperandList();
219 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
220 OL[I] = InOL[I];
221
222 setCleanup(LP.isCleanup());
223}
224
225LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
226 const Twine &NameStr,
227 Instruction *InsertBefore) {
228 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
229}
230
231LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
232 const Twine &NameStr,
233 BasicBlock *InsertAtEnd) {
234 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
235}
236
237void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
238 ReservedSpace = NumReservedValues;
240 allocHungoffUses(ReservedSpace);
241 setName(NameStr);
242 setCleanup(false);
243}
244
245/// growOperands - grow operands - This grows the operand list in response to a
246/// push_back style of operation. This grows the number of ops by 2 times.
247void LandingPadInst::growOperands(unsigned Size) {
248 unsigned e = getNumOperands();
249 if (ReservedSpace >= e + Size) return;
250 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
251 growHungoffUses(ReservedSpace);
252}
253
255 unsigned OpNo = getNumOperands();
256 growOperands(1);
257 assert(OpNo < ReservedSpace && "Growing didn't work!");
259 getOperandList()[OpNo] = Val;
260}
261
262//===----------------------------------------------------------------------===//
263// CallBase Implementation
264//===----------------------------------------------------------------------===//
265
267 Instruction *InsertPt) {
268 switch (CB->getOpcode()) {
269 case Instruction::Call:
270 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
271 case Instruction::Invoke:
272 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
273 case Instruction::CallBr:
274 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
275 default:
276 llvm_unreachable("Unknown CallBase sub-class!");
277 }
278}
279
281 Instruction *InsertPt) {
283 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
284 auto ChildOB = CI->getOperandBundleAt(i);
285 if (ChildOB.getTagName() != OpB.getTag())
286 OpDefs.emplace_back(ChildOB);
287 }
288 OpDefs.emplace_back(OpB);
289 return CallBase::Create(CI, OpDefs, InsertPt);
290}
291
292
294
296 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
297 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
298}
299
301 const Value *V = getCalledOperand();
302 if (isa<Function>(V) || isa<Constant>(V))
303 return false;
304 return !isInlineAsm();
305}
306
307/// Tests if this call site must be tail call optimized. Only a CallInst can
308/// be tail call optimized.
310 if (auto *CI = dyn_cast<CallInst>(this))
311 return CI->isMustTailCall();
312 return false;
313}
314
315/// Tests if this call site is marked as a tail call.
317 if (auto *CI = dyn_cast<CallInst>(this))
318 return CI->isTailCall();
319 return false;
320}
321
323 if (auto *F = getCalledFunction())
324 return F->getIntrinsicID();
326}
327
330
331 if (const Function *F = getCalledFunction())
332 Mask |= F->getAttributes().getRetNoFPClass();
333 return Mask;
334}
335
338
339 if (const Function *F = getCalledFunction())
340 Mask |= F->getAttributes().getParamNoFPClass(i);
341 return Mask;
342}
343
345 if (hasRetAttr(Attribute::NonNull))
346 return true;
347
348 if (getRetDereferenceableBytes() > 0 &&
349 !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
350 return true;
351
352 return false;
353}
354
356 unsigned Index;
357
358 if (Attrs.hasAttrSomewhere(Kind, &Index))
360 if (const Function *F = getCalledFunction())
361 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
363
364 return nullptr;
365}
366
367/// Determine whether the argument or parameter has the given attribute.
368bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
369 assert(ArgNo < arg_size() && "Param index out of bounds!");
370
371 if (Attrs.hasParamAttr(ArgNo, Kind))
372 return true;
373
374 const Function *F = getCalledFunction();
375 if (!F)
376 return false;
377
378 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
379 return false;
380
381 // Take into account mod/ref by operand bundles.
382 switch (Kind) {
383 case Attribute::ReadNone:
385 case Attribute::ReadOnly:
387 case Attribute::WriteOnly:
388 return !hasReadingOperandBundles();
389 default:
390 return true;
391 }
392}
393
394bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
395 Value *V = getCalledOperand();
396 if (auto *CE = dyn_cast<ConstantExpr>(V))
397 if (CE->getOpcode() == BitCast)
398 V = CE->getOperand(0);
399
400 if (auto *F = dyn_cast<Function>(V))
401 return F->getAttributes().hasFnAttr(Kind);
402
403 return false;
404}
405
406bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
407 Value *V = getCalledOperand();
408 if (auto *CE = dyn_cast<ConstantExpr>(V))
409 if (CE->getOpcode() == BitCast)
410 V = CE->getOperand(0);
411
412 if (auto *F = dyn_cast<Function>(V))
413 return F->getAttributes().hasFnAttr(Kind);
414
415 return false;
416}
417
418template <typename AK>
419Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
420 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
421 // getMemoryEffects() correctly combines memory effects from the call-site,
422 // operand bundles and function.
423 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
424 }
425
427 if (auto *CE = dyn_cast<ConstantExpr>(V))
428 if (CE->getOpcode() == BitCast)
429 V = CE->getOperand(0);
430
431 if (auto *F = dyn_cast<Function>(V))
432 return F->getAttributes().getFnAttr(Kind);
433
434 return Attribute();
435}
436
437template Attribute
438CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
439template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
440
443 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
445}
446
449 const unsigned BeginIndex) {
450 auto It = op_begin() + BeginIndex;
451 for (auto &B : Bundles)
452 It = std::copy(B.input_begin(), B.input_end(), It);
453
454 auto *ContextImpl = getContext().pImpl;
455 auto BI = Bundles.begin();
456 unsigned CurrentIndex = BeginIndex;
457
458 for (auto &BOI : bundle_op_infos()) {
459 assert(BI != Bundles.end() && "Incorrect allocation?");
460
461 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
462 BOI.Begin = CurrentIndex;
463 BOI.End = CurrentIndex + BI->input_size();
464 CurrentIndex = BOI.End;
465 BI++;
466 }
467
468 assert(BI == Bundles.end() && "Incorrect allocation?");
469
470 return It;
471}
472
474 /// When there isn't many bundles, we do a simple linear search.
475 /// Else fallback to a binary-search that use the fact that bundles usually
476 /// have similar number of argument to get faster convergence.
478 for (auto &BOI : bundle_op_infos())
479 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
480 return BOI;
481
482 llvm_unreachable("Did not find operand bundle for operand!");
483 }
484
485 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
487 OpIdx < std::prev(bundle_op_info_end())->End &&
488 "The Idx isn't in the operand bundle");
489
490 /// We need a decimal number below and to prevent using floating point numbers
491 /// we use an intergal value multiplied by this constant.
492 constexpr unsigned NumberScaling = 1024;
493
496 bundle_op_iterator Current = Begin;
497
498 while (Begin != End) {
499 unsigned ScaledOperandPerBundle =
500 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
501 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
502 ScaledOperandPerBundle);
503 if (Current >= End)
504 Current = std::prev(End);
505 assert(Current < End && Current >= Begin &&
506 "the operand bundle doesn't cover every value in the range");
507 if (OpIdx >= Current->Begin && OpIdx < Current->End)
508 break;
509 if (OpIdx >= Current->End)
510 Begin = Current + 1;
511 else
512 End = Current;
513 }
514
515 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
516 "the operand bundle doesn't cover every value in the range");
517 return *Current;
518}
519
522 Instruction *InsertPt) {
523 if (CB->getOperandBundle(ID))
524 return CB;
525
527 CB->getOperandBundlesAsDefs(Bundles);
528 Bundles.push_back(OB);
529 return Create(CB, Bundles, InsertPt);
530}
531
533 Instruction *InsertPt) {
535 bool CreateNew = false;
536
537 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
538 auto Bundle = CB->getOperandBundleAt(I);
539 if (Bundle.getTagID() == ID) {
540 CreateNew = true;
541 continue;
542 }
543 Bundles.emplace_back(Bundle);
544 }
545
546 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
547}
548
550 // Implementation note: this is a conservative implementation of operand
551 // bundle semantics, where *any* non-assume operand bundle (other than
552 // ptrauth) forces a callsite to be at least readonly.
555 getIntrinsicID() != Intrinsic::assume;
556}
557
562 getIntrinsicID() != Intrinsic::assume;
563}
564
567 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
568 MemoryEffects FnME = Fn->getMemoryEffects();
569 if (hasOperandBundles()) {
570 // TODO: Add a method to get memory effects for operand bundles instead.
572 FnME |= MemoryEffects::readOnly();
574 FnME |= MemoryEffects::writeOnly();
575 }
576 ME &= FnME;
577 }
578 return ME;
579}
582}
583
584/// Determine if the function does not access memory.
587}
590}
591
592/// Determine if the function does not access or only reads memory.
595}
598}
599
600/// Determine if the function does not access or only writes memory.
603}
606}
607
608/// Determine if the call can access memmory only using pointers based
609/// on its arguments.
612}
615}
616
617/// Determine if the function may only access memory that is
618/// inaccessible from the IR.
621}
624}
625
626/// Determine if the function may only access memory that is
627/// either inaccessible from the IR or pointed to by its arguments.
630}
634}
635
636//===----------------------------------------------------------------------===//
637// CallInst Implementation
638//===----------------------------------------------------------------------===//
639
640void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
641 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
642 this->FTy = FTy;
643 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
644 "NumOperands not set up?");
645
646#ifndef NDEBUG
647 assert((Args.size() == FTy->getNumParams() ||
648 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
649 "Calling a function with bad signature!");
650
651 for (unsigned i = 0; i != Args.size(); ++i)
652 assert((i >= FTy->getNumParams() ||
653 FTy->getParamType(i) == Args[i]->getType()) &&
654 "Calling a function with a bad signature!");
655#endif
656
657 // Set operands in order of their index to match use-list-order
658 // prediction.
659 llvm::copy(Args, op_begin());
660 setCalledOperand(Func);
661
662 auto It = populateBundleOperandInfos(Bundles, Args.size());
663 (void)It;
664 assert(It + 1 == op_end() && "Should add up!");
665
666 setName(NameStr);
667}
668
669void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
670 this->FTy = FTy;
671 assert(getNumOperands() == 1 && "NumOperands not set up?");
672 setCalledOperand(Func);
673
674 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
675
676 setName(NameStr);
677}
678
679CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
680 Instruction *InsertBefore)
681 : CallBase(Ty->getReturnType(), Instruction::Call,
682 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
683 init(Ty, Func, Name);
684}
685
686CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
687 BasicBlock *InsertAtEnd)
688 : CallBase(Ty->getReturnType(), Instruction::Call,
689 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
690 init(Ty, Func, Name);
691}
692
693CallInst::CallInst(const CallInst &CI)
694 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
695 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
696 CI.getNumOperands()) {
697 setTailCallKind(CI.getTailCallKind());
699
700 std::copy(CI.op_begin(), CI.op_end(), op_begin());
701 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
704}
705
707 Instruction *InsertPt) {
708 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
709
710 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
711 Args, OpB, CI->getName(), InsertPt);
712 NewCI->setTailCallKind(CI->getTailCallKind());
713 NewCI->setCallingConv(CI->getCallingConv());
714 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
715 NewCI->setAttributes(CI->getAttributes());
716 NewCI->setDebugLoc(CI->getDebugLoc());
717 return NewCI;
718}
719
720// Update profile weight for call instruction by scaling it using the ratio
721// of S/T. The meaning of "branch_weights" meta data for call instruction is
722// transfered to represent call count.
724 auto *ProfileData = getMetadata(LLVMContext::MD_prof);
725 if (ProfileData == nullptr)
726 return;
727
728 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
729 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
730 !ProfDataName->getString().equals("VP")))
731 return;
732
733 if (T == 0) {
734 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
735 "div by 0. Ignoring. Likely the function "
736 << getParent()->getParent()->getName()
737 << " has 0 entry count, and contains call instructions "
738 "with non-zero prof info.");
739 return;
740 }
741
742 MDBuilder MDB(getContext());
744 Vals.push_back(ProfileData->getOperand(0));
745 APInt APS(128, S), APT(128, T);
746 if (ProfDataName->getString().equals("branch_weights") &&
747 ProfileData->getNumOperands() > 0) {
748 // Using APInt::div may be expensive, but most cases should fit 64 bits.
749 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
750 ->getValue()
751 .getZExtValue());
752 Val *= APS;
753 Vals.push_back(MDB.createConstant(
755 Val.udiv(APT).getLimitedValue(UINT32_MAX))));
756 } else if (ProfDataName->getString().equals("VP"))
757 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
758 // The first value is the key of the value profile, which will not change.
759 Vals.push_back(ProfileData->getOperand(i));
760 uint64_t Count =
761 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
762 ->getValue()
763 .getZExtValue();
764 // Don't scale the magic number.
765 if (Count == NOMORE_ICP_MAGICNUM) {
766 Vals.push_back(ProfileData->getOperand(i + 1));
767 continue;
768 }
769 // Using APInt::div may be expensive, but most cases should fit 64 bits.
770 APInt Val(128, Count);
771 Val *= APS;
772 Vals.push_back(MDB.createConstant(
774 Val.udiv(APT).getLimitedValue())));
775 }
776 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
777}
778
779/// IsConstantOne - Return true only if val is constant int 1
780static bool IsConstantOne(Value *val) {
781 assert(val && "IsConstantOne does not work with nullptr val");
782 const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
783 return CVal && CVal->isOne();
784}
785
787 BasicBlock *InsertAtEnd, Type *IntPtrTy,
788 Type *AllocTy, Value *AllocSize,
789 Value *ArraySize,
791 Function *MallocF, const Twine &Name) {
792 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
793 "createMalloc needs either InsertBefore or InsertAtEnd");
794
795 // malloc(type) becomes:
796 // bitcast (i8* malloc(typeSize)) to type*
797 // malloc(type, arraySize) becomes:
798 // bitcast (i8* malloc(typeSize*arraySize)) to type*
799 if (!ArraySize)
800 ArraySize = ConstantInt::get(IntPtrTy, 1);
801 else if (ArraySize->getType() != IntPtrTy) {
802 if (InsertBefore)
803 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
804 "", InsertBefore);
805 else
806 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
807 "", InsertAtEnd);
808 }
809
810 if (!IsConstantOne(ArraySize)) {
811 if (IsConstantOne(AllocSize)) {
812 AllocSize = ArraySize; // Operand * 1 = Operand
813 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
814 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
815 false /*ZExt*/);
816 // Malloc arg is constant product of type size and array size
817 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
818 } else {
819 // Multiply type size by the array size...
820 if (InsertBefore)
821 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
822 "mallocsize", InsertBefore);
823 else
824 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
825 "mallocsize", InsertAtEnd);
826 }
827 }
828
829 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
830 // Create the call to Malloc.
831 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
832 Module *M = BB->getParent()->getParent();
833 Type *BPTy = Type::getInt8PtrTy(BB->getContext());
834 FunctionCallee MallocFunc = MallocF;
835 if (!MallocFunc)
836 // prototype malloc as "void *malloc(size_t)"
837 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
838 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
839 CallInst *MCall = nullptr;
840 Instruction *Result = nullptr;
841 if (InsertBefore) {
842 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
843 InsertBefore);
844 Result = MCall;
845 if (Result->getType() != AllocPtrType)
846 // Create a cast instruction to convert to the right type...
847 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
848 } else {
849 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
850 Result = MCall;
851 if (Result->getType() != AllocPtrType) {
852 MCall->insertInto(InsertAtEnd, InsertAtEnd->end());
853 // Create a cast instruction to convert to the right type...
854 Result = new BitCastInst(MCall, AllocPtrType, Name);
855 }
856 }
857 MCall->setTailCall();
858 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
859 MCall->setCallingConv(F->getCallingConv());
860 if (!F->returnDoesNotAlias())
861 F->setReturnDoesNotAlias();
862 }
863 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
864
865 return Result;
866}
867
868/// CreateMalloc - Generate the IR for a call to malloc:
869/// 1. Compute the malloc call's argument as the specified type's size,
870/// possibly multiplied by the array size if the array size is not
871/// constant 1.
872/// 2. Call malloc with that argument.
873/// 3. Bitcast the result of the malloc call to the specified type.
875 Type *IntPtrTy, Type *AllocTy,
876 Value *AllocSize, Value *ArraySize,
877 Function *MallocF,
878 const Twine &Name) {
879 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
880 ArraySize, std::nullopt, MallocF, Name);
881}
883 Type *IntPtrTy, Type *AllocTy,
884 Value *AllocSize, Value *ArraySize,
886 Function *MallocF,
887 const Twine &Name) {
888 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
889 ArraySize, OpB, MallocF, Name);
890}
891
892/// CreateMalloc - Generate the IR for a call to malloc:
893/// 1. Compute the malloc call's argument as the specified type's size,
894/// possibly multiplied by the array size if the array size is not
895/// constant 1.
896/// 2. Call malloc with that argument.
897/// 3. Bitcast the result of the malloc call to the specified type.
898/// Note: This function does not add the bitcast to the basic block, that is the
899/// responsibility of the caller.
901 Type *IntPtrTy, Type *AllocTy,
902 Value *AllocSize, Value *ArraySize,
903 Function *MallocF, const Twine &Name) {
904 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
905 ArraySize, std::nullopt, MallocF, Name);
906}
908 Type *IntPtrTy, Type *AllocTy,
909 Value *AllocSize, Value *ArraySize,
911 Function *MallocF, const Twine &Name) {
912 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
913 ArraySize, OpB, MallocF, Name);
914}
915
918 Instruction *InsertBefore,
919 BasicBlock *InsertAtEnd) {
920 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
921 "createFree needs either InsertBefore or InsertAtEnd");
922 assert(Source->getType()->isPointerTy() &&
923 "Can not free something of nonpointer type!");
924
925 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
926 Module *M = BB->getParent()->getParent();
927
928 Type *VoidTy = Type::getVoidTy(M->getContext());
929 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
930 // prototype free as "void free(void*)"
931 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
932 CallInst *Result = nullptr;
933 Value *PtrCast = Source;
934 if (InsertBefore) {
935 if (Source->getType() != IntPtrTy)
936 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
937 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
938 } else {
939 if (Source->getType() != IntPtrTy)
940 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
941 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
942 }
943 Result->setTailCall();
944 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
945 Result->setCallingConv(F->getCallingConv());
946
947 return Result;
948}
949
950/// CreateFree - Generate the IR for a call to the builtin free function.
952 return createFree(Source, std::nullopt, InsertBefore, nullptr);
953}
956 Instruction *InsertBefore) {
957 return createFree(Source, Bundles, InsertBefore, nullptr);
958}
959
960/// CreateFree - Generate the IR for a call to the builtin free function.
961/// Note: This function does not add the call to the basic block, that is the
962/// responsibility of the caller.
964 Instruction *FreeCall =
965 createFree(Source, std::nullopt, nullptr, InsertAtEnd);
966 assert(FreeCall && "CreateFree did not create a CallInst");
967 return FreeCall;
968}
971 BasicBlock *InsertAtEnd) {
972 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
973 assert(FreeCall && "CreateFree did not create a CallInst");
974 return FreeCall;
975}
976
977//===----------------------------------------------------------------------===//
978// InvokeInst Implementation
979//===----------------------------------------------------------------------===//
980
981void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
982 BasicBlock *IfException, ArrayRef<Value *> Args,
984 const Twine &NameStr) {
985 this->FTy = FTy;
986
987 assert((int)getNumOperands() ==
988 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
989 "NumOperands not set up?");
990
991#ifndef NDEBUG
992 assert(((Args.size() == FTy->getNumParams()) ||
993 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
994 "Invoking a function with bad signature");
995
996 for (unsigned i = 0, e = Args.size(); i != e; i++)
997 assert((i >= FTy->getNumParams() ||
998 FTy->getParamType(i) == Args[i]->getType()) &&
999 "Invoking a function with a bad signature!");
1000#endif
1001
1002 // Set operands in order of their index to match use-list-order
1003 // prediction.
1004 llvm::copy(Args, op_begin());
1005 setNormalDest(IfNormal);
1006 setUnwindDest(IfException);
1007 setCalledOperand(Fn);
1008
1009 auto It = populateBundleOperandInfos(Bundles, Args.size());
1010 (void)It;
1011 assert(It + 3 == op_end() && "Should add up!");
1012
1013 setName(NameStr);
1014}
1015
1016InvokeInst::InvokeInst(const InvokeInst &II)
1017 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
1018 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
1019 II.getNumOperands()) {
1021 std::copy(II.op_begin(), II.op_end(), op_begin());
1022 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
1025}
1026
1028 Instruction *InsertPt) {
1029 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
1030
1031 auto *NewII = InvokeInst::Create(
1033 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
1034 NewII->setCallingConv(II->getCallingConv());
1035 NewII->SubclassOptionalData = II->SubclassOptionalData;
1036 NewII->setAttributes(II->getAttributes());
1037 NewII->setDebugLoc(II->getDebugLoc());
1038 return NewII;
1039}
1040
1042 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
1043}
1044
1045//===----------------------------------------------------------------------===//
1046// CallBrInst Implementation
1047//===----------------------------------------------------------------------===//
1048
1049void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
1050 ArrayRef<BasicBlock *> IndirectDests,
1051 ArrayRef<Value *> Args,
1053 const Twine &NameStr) {
1054 this->FTy = FTy;
1055
1056 assert((int)getNumOperands() ==
1057 ComputeNumOperands(Args.size(), IndirectDests.size(),
1058 CountBundleInputs(Bundles)) &&
1059 "NumOperands not set up?");
1060
1061#ifndef NDEBUG
1062 assert(((Args.size() == FTy->getNumParams()) ||
1063 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
1064 "Calling a function with bad signature");
1065
1066 for (unsigned i = 0, e = Args.size(); i != e; i++)
1067 assert((i >= FTy->getNumParams() ||
1068 FTy->getParamType(i) == Args[i]->getType()) &&
1069 "Calling a function with a bad signature!");
1070#endif
1071
1072 // Set operands in order of their index to match use-list-order
1073 // prediction.
1074 std::copy(Args.begin(), Args.end(), op_begin());
1075 NumIndirectDests = IndirectDests.size();
1076 setDefaultDest(Fallthrough);
1077 for (unsigned i = 0; i != NumIndirectDests; ++i)
1078 setIndirectDest(i, IndirectDests[i]);
1079 setCalledOperand(Fn);
1080
1081 auto It = populateBundleOperandInfos(Bundles, Args.size());
1082 (void)It;
1083 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
1084
1085 setName(NameStr);
1086}
1087
1088CallBrInst::CallBrInst(const CallBrInst &CBI)
1089 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
1090 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
1091 CBI.getNumOperands()) {
1093 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
1094 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
1097 NumIndirectDests = CBI.NumIndirectDests;
1098}
1099
1101 Instruction *InsertPt) {
1102 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1103
1104 auto *NewCBI = CallBrInst::Create(
1105 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1106 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1107 NewCBI->setCallingConv(CBI->getCallingConv());
1108 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1109 NewCBI->setAttributes(CBI->getAttributes());
1110 NewCBI->setDebugLoc(CBI->getDebugLoc());
1111 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1112 return NewCBI;
1113}
1114
1115//===----------------------------------------------------------------------===//
1116// ReturnInst Implementation
1117//===----------------------------------------------------------------------===//
1118
1119ReturnInst::ReturnInst(const ReturnInst &RI)
1120 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1121 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1122 RI.getNumOperands()) {
1123 if (RI.getNumOperands())
1124 Op<0>() = RI.Op<0>();
1126}
1127
1128ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
1129 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1130 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1131 InsertBefore) {
1132 if (retVal)
1133 Op<0>() = retVal;
1134}
1135
1136ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1137 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1138 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1139 InsertAtEnd) {
1140 if (retVal)
1141 Op<0>() = retVal;
1142}
1143
1144ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1145 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1146 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1147
1148//===----------------------------------------------------------------------===//
1149// ResumeInst Implementation
1150//===----------------------------------------------------------------------===//
1151
1152ResumeInst::ResumeInst(const ResumeInst &RI)
1153 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1154 OperandTraits<ResumeInst>::op_begin(this), 1) {
1155 Op<0>() = RI.Op<0>();
1156}
1157
1158ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1159 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1160 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1161 Op<0>() = Exn;
1162}
1163
1164ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1165 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1166 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1167 Op<0>() = Exn;
1168}
1169
1170//===----------------------------------------------------------------------===//
1171// CleanupReturnInst Implementation
1172//===----------------------------------------------------------------------===//
1173
1174CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1175 : Instruction(CRI.getType(), Instruction::CleanupRet,
1176 OperandTraits<CleanupReturnInst>::op_end(this) -
1177 CRI.getNumOperands(),
1178 CRI.getNumOperands()) {
1179 setSubclassData<Instruction::OpaqueField>(
1181 Op<0>() = CRI.Op<0>();
1182 if (CRI.hasUnwindDest())
1183 Op<1>() = CRI.Op<1>();
1184}
1185
1186void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1187 if (UnwindBB)
1188 setSubclassData<UnwindDestField>(true);
1189
1190 Op<0>() = CleanupPad;
1191 if (UnwindBB)
1192 Op<1>() = UnwindBB;
1193}
1194
1195CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1196 unsigned Values, Instruction *InsertBefore)
1197 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1198 Instruction::CleanupRet,
1199 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1200 Values, InsertBefore) {
1201 init(CleanupPad, UnwindBB);
1202}
1203
1204CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1205 unsigned Values, BasicBlock *InsertAtEnd)
1206 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1207 Instruction::CleanupRet,
1208 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1209 Values, InsertAtEnd) {
1210 init(CleanupPad, UnwindBB);
1211}
1212
1213//===----------------------------------------------------------------------===//
1214// CatchReturnInst Implementation
1215//===----------------------------------------------------------------------===//
1216void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1217 Op<0>() = CatchPad;
1218 Op<1>() = BB;
1219}
1220
1221CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1222 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1223 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1224 Op<0>() = CRI.Op<0>();
1225 Op<1>() = CRI.Op<1>();
1226}
1227
1228CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1229 Instruction *InsertBefore)
1230 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1231 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1232 InsertBefore) {
1233 init(CatchPad, BB);
1234}
1235
1236CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1237 BasicBlock *InsertAtEnd)
1238 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1239 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1240 InsertAtEnd) {
1241 init(CatchPad, BB);
1242}
1243
1244//===----------------------------------------------------------------------===//
1245// CatchSwitchInst Implementation
1246//===----------------------------------------------------------------------===//
1247
1248CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1249 unsigned NumReservedValues,
1250 const Twine &NameStr,
1251 Instruction *InsertBefore)
1252 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1253 InsertBefore) {
1254 if (UnwindDest)
1255 ++NumReservedValues;
1256 init(ParentPad, UnwindDest, NumReservedValues + 1);
1257 setName(NameStr);
1258}
1259
1260CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1261 unsigned NumReservedValues,
1262 const Twine &NameStr, BasicBlock *InsertAtEnd)
1263 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1264 InsertAtEnd) {
1265 if (UnwindDest)
1266 ++NumReservedValues;
1267 init(ParentPad, UnwindDest, NumReservedValues + 1);
1268 setName(NameStr);
1269}
1270
1271CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1272 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1273 CSI.getNumOperands()) {
1274 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1275 setNumHungOffUseOperands(ReservedSpace);
1276 Use *OL = getOperandList();
1277 const Use *InOL = CSI.getOperandList();
1278 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1279 OL[I] = InOL[I];
1280}
1281
1282void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1283 unsigned NumReservedValues) {
1284 assert(ParentPad && NumReservedValues);
1285
1286 ReservedSpace = NumReservedValues;
1287 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1288 allocHungoffUses(ReservedSpace);
1289
1290 Op<0>() = ParentPad;
1291 if (UnwindDest) {
1292 setSubclassData<UnwindDestField>(true);
1293 setUnwindDest(UnwindDest);
1294 }
1295}
1296
1297/// growOperands - grow operands - This grows the operand list in response to a
1298/// push_back style of operation. This grows the number of ops by 2 times.
1299void CatchSwitchInst::growOperands(unsigned Size) {
1300 unsigned NumOperands = getNumOperands();
1301 assert(NumOperands >= 1);
1302 if (ReservedSpace >= NumOperands + Size)
1303 return;
1304 ReservedSpace = (NumOperands + Size / 2) * 2;
1305 growHungoffUses(ReservedSpace);
1306}
1307
1309 unsigned OpNo = getNumOperands();
1310 growOperands(1);
1311 assert(OpNo < ReservedSpace && "Growing didn't work!");
1313 getOperandList()[OpNo] = Handler;
1314}
1315
1317 // Move all subsequent handlers up one.
1318 Use *EndDst = op_end() - 1;
1319 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1320 *CurDst = *(CurDst + 1);
1321 // Null out the last handler use.
1322 *EndDst = nullptr;
1323
1325}
1326
1327//===----------------------------------------------------------------------===//
1328// FuncletPadInst Implementation
1329//===----------------------------------------------------------------------===//
1330void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1331 const Twine &NameStr) {
1332 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1333 llvm::copy(Args, op_begin());
1334 setParentPad(ParentPad);
1335 setName(NameStr);
1336}
1337
1338FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1339 : Instruction(FPI.getType(), FPI.getOpcode(),
1340 OperandTraits<FuncletPadInst>::op_end(this) -
1341 FPI.getNumOperands(),
1342 FPI.getNumOperands()) {
1343 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1345}
1346
1347FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1348 ArrayRef<Value *> Args, unsigned Values,
1349 const Twine &NameStr, Instruction *InsertBefore)
1350 : Instruction(ParentPad->getType(), Op,
1351 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1352 InsertBefore) {
1353 init(ParentPad, Args, NameStr);
1354}
1355
1356FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1357 ArrayRef<Value *> Args, unsigned Values,
1358 const Twine &NameStr, BasicBlock *InsertAtEnd)
1359 : Instruction(ParentPad->getType(), Op,
1360 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1361 InsertAtEnd) {
1362 init(ParentPad, Args, NameStr);
1363}
1364
1365//===----------------------------------------------------------------------===//
1366// UnreachableInst Implementation
1367//===----------------------------------------------------------------------===//
1368
1370 Instruction *InsertBefore)
1371 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1372 0, InsertBefore) {}
1374 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1375 0, InsertAtEnd) {}
1376
1377//===----------------------------------------------------------------------===//
1378// BranchInst Implementation
1379//===----------------------------------------------------------------------===//
1380
1381void BranchInst::AssertOK() {
1382 if (isConditional())
1383 assert(getCondition()->getType()->isIntegerTy(1) &&
1384 "May only branch on boolean predicates!");
1385}
1386
1387BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1388 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1389 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1390 InsertBefore) {
1391 assert(IfTrue && "Branch destination may not be null!");
1392 Op<-1>() = IfTrue;
1393}
1394
1395BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1396 Instruction *InsertBefore)
1397 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1398 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1399 InsertBefore) {
1400 // Assign in order of operand index to make use-list order predictable.
1401 Op<-3>() = Cond;
1402 Op<-2>() = IfFalse;
1403 Op<-1>() = IfTrue;
1404#ifndef NDEBUG
1405 AssertOK();
1406#endif
1407}
1408
1409BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1410 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1411 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1412 assert(IfTrue && "Branch destination may not be null!");
1413 Op<-1>() = IfTrue;
1414}
1415
1416BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1417 BasicBlock *InsertAtEnd)
1418 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1419 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1420 // Assign in order of operand index to make use-list order predictable.
1421 Op<-3>() = Cond;
1422 Op<-2>() = IfFalse;
1423 Op<-1>() = IfTrue;
1424#ifndef NDEBUG
1425 AssertOK();
1426#endif
1427}
1428
1429BranchInst::BranchInst(const BranchInst &BI)
1430 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1431 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1432 BI.getNumOperands()) {
1433 // Assign in order of operand index to make use-list order predictable.
1434 if (BI.getNumOperands() != 1) {
1435 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1436 Op<-3>() = BI.Op<-3>();
1437 Op<-2>() = BI.Op<-2>();
1438 }
1439 Op<-1>() = BI.Op<-1>();
1441}
1442
1445 "Cannot swap successors of an unconditional branch");
1446 Op<-1>().swap(Op<-2>());
1447
1448 // Update profile metadata if present and it matches our structural
1449 // expectations.
1451}
1452
1453//===----------------------------------------------------------------------===//
1454// AllocaInst Implementation
1455//===----------------------------------------------------------------------===//
1456
1457static Value *getAISize(LLVMContext &Context, Value *Amt) {
1458 if (!Amt)
1460 else {
1461 assert(!isa<BasicBlock>(Amt) &&
1462 "Passed basic block into allocation size parameter! Use other ctor");
1463 assert(Amt->getType()->isIntegerTy() &&
1464 "Allocation array size is not an integer!");
1465 }
1466 return Amt;
1467}
1468
1470 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1471 assert(BB->getParent() &&
1472 "BB must be in a Function when alignment not provided!");
1473 const DataLayout &DL = BB->getModule()->getDataLayout();
1474 return DL.getPrefTypeAlign(Ty);
1475}
1476
1478 assert(I && "Insertion position cannot be null when alignment not provided!");
1479 return computeAllocaDefaultAlign(Ty, I->getParent());
1480}
1481
1482AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1483 Instruction *InsertBefore)
1484 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1485
1486AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1487 BasicBlock *InsertAtEnd)
1488 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1489
1490AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1491 const Twine &Name, Instruction *InsertBefore)
1492 : AllocaInst(Ty, AddrSpace, ArraySize,
1493 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1494 InsertBefore) {}
1495
1496AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1497 const Twine &Name, BasicBlock *InsertAtEnd)
1498 : AllocaInst(Ty, AddrSpace, ArraySize,
1499 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1500 InsertAtEnd) {}
1501
1502AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1503 Align Align, const Twine &Name,
1504 Instruction *InsertBefore)
1505 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1506 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1507 AllocatedType(Ty) {
1509 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1510 setName(Name);
1511}
1512
1513AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1514 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1515 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1516 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1517 AllocatedType(Ty) {
1519 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1520 setName(Name);
1521}
1522
1523
1525 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1526 return !CI->isOne();
1527 return true;
1528}
1529
1530/// isStaticAlloca - Return true if this alloca is in the entry block of the
1531/// function and is a constant size. If so, the code generator will fold it
1532/// into the prolog/epilog code, so it is basically free.
1534 // Must be constant size.
1535 if (!isa<ConstantInt>(getArraySize())) return false;
1536
1537 // Must be in the entry block.
1538 const BasicBlock *Parent = getParent();
1539 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1540}
1541
1542//===----------------------------------------------------------------------===//
1543// LoadInst Implementation
1544//===----------------------------------------------------------------------===//
1545
1546void LoadInst::AssertOK() {
1547 assert(getOperand(0)->getType()->isPointerTy() &&
1548 "Ptr must have pointer type.");
1549}
1550
1552 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1553 assert(BB->getParent() &&
1554 "BB must be in a Function when alignment not provided!");
1555 const DataLayout &DL = BB->getModule()->getDataLayout();
1556 return DL.getABITypeAlign(Ty);
1557}
1558
1560 assert(I && "Insertion position cannot be null when alignment not provided!");
1561 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1562}
1563
1565 Instruction *InsertBef)
1566 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1567
1569 BasicBlock *InsertAE)
1570 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1571
1572LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1573 Instruction *InsertBef)
1574 : LoadInst(Ty, Ptr, Name, isVolatile,
1575 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1576
1577LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1578 BasicBlock *InsertAE)
1579 : LoadInst(Ty, Ptr, Name, isVolatile,
1580 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1581
1582LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1583 Align Align, Instruction *InsertBef)
1584 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1585 SyncScope::System, InsertBef) {}
1586
1587LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1588 Align Align, BasicBlock *InsertAE)
1589 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1590 SyncScope::System, InsertAE) {}
1591
1592LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1594 Instruction *InsertBef)
1595 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1596 assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1599 setAtomic(Order, SSID);
1600 AssertOK();
1601 setName(Name);
1602}
1603
1604LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1606 BasicBlock *InsertAE)
1607 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1608 assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1611 setAtomic(Order, SSID);
1612 AssertOK();
1613 setName(Name);
1614}
1615
1616//===----------------------------------------------------------------------===//
1617// StoreInst Implementation
1618//===----------------------------------------------------------------------===//
1619
1620void StoreInst::AssertOK() {
1621 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1622 assert(getOperand(1)->getType()->isPointerTy() &&
1623 "Ptr must have pointer type!");
1624 assert(cast<PointerType>(getOperand(1)->getType())
1625 ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) &&
1626 "Ptr must be a pointer to Val type!");
1627}
1628
1629StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1630 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1631
1632StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1633 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1634
1635StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1636 Instruction *InsertBefore)
1637 : StoreInst(val, addr, isVolatile,
1638 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1639 InsertBefore) {}
1640
1641StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1642 BasicBlock *InsertAtEnd)
1643 : StoreInst(val, addr, isVolatile,
1644 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1645 InsertAtEnd) {}
1646
1647StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1648 Instruction *InsertBefore)
1649 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1650 SyncScope::System, InsertBefore) {}
1651
1652StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1653 BasicBlock *InsertAtEnd)
1654 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1655 SyncScope::System, InsertAtEnd) {}
1656
1657StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1658 AtomicOrdering Order, SyncScope::ID SSID,
1659 Instruction *InsertBefore)
1660 : Instruction(Type::getVoidTy(val->getContext()), Store,
1661 OperandTraits<StoreInst>::op_begin(this),
1662 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1663 Op<0>() = val;
1664 Op<1>() = addr;
1667 setAtomic(Order, SSID);
1668 AssertOK();
1669}
1670
1671StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1672 AtomicOrdering Order, SyncScope::ID SSID,
1673 BasicBlock *InsertAtEnd)
1674 : Instruction(Type::getVoidTy(val->getContext()), Store,
1675 OperandTraits<StoreInst>::op_begin(this),
1676 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1677 Op<0>() = val;
1678 Op<1>() = addr;
1681 setAtomic(Order, SSID);
1682 AssertOK();
1683}
1684
1685
1686//===----------------------------------------------------------------------===//
1687// AtomicCmpXchgInst Implementation
1688//===----------------------------------------------------------------------===//
1689
1690void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1691 Align Alignment, AtomicOrdering SuccessOrdering,
1692 AtomicOrdering FailureOrdering,
1693 SyncScope::ID SSID) {
1694 Op<0>() = Ptr;
1695 Op<1>() = Cmp;
1696 Op<2>() = NewVal;
1697 setSuccessOrdering(SuccessOrdering);
1698 setFailureOrdering(FailureOrdering);
1699 setSyncScopeID(SSID);
1700 setAlignment(Alignment);
1701
1702 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1703 "All operands must be non-null!");
1704 assert(getOperand(0)->getType()->isPointerTy() &&
1705 "Ptr must have pointer type!");
1706 assert(cast<PointerType>(getOperand(0)->getType())
1707 ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1708 "Ptr must be a pointer to Cmp type!");
1709 assert(cast<PointerType>(getOperand(0)->getType())
1710 ->isOpaqueOrPointeeTypeMatches(getOperand(2)->getType()) &&
1711 "Ptr must be a pointer to NewVal type!");
1712 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1713 "Cmp type and NewVal type must be same!");
1714}
1715
1717 Align Alignment,
1718 AtomicOrdering SuccessOrdering,
1719 AtomicOrdering FailureOrdering,
1720 SyncScope::ID SSID,
1721 Instruction *InsertBefore)
1722 : Instruction(
1723 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1724 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1725 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1726 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1727}
1728
1730 Align Alignment,
1731 AtomicOrdering SuccessOrdering,
1732 AtomicOrdering FailureOrdering,
1733 SyncScope::ID SSID,
1734 BasicBlock *InsertAtEnd)
1735 : Instruction(
1736 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1737 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1738 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1739 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1740}
1741
1742//===----------------------------------------------------------------------===//
1743// AtomicRMWInst Implementation
1744//===----------------------------------------------------------------------===//
1745
1746void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1747 Align Alignment, AtomicOrdering Ordering,
1748 SyncScope::ID SSID) {
1749 assert(Ordering != AtomicOrdering::NotAtomic &&
1750 "atomicrmw instructions can only be atomic.");
1751 assert(Ordering != AtomicOrdering::Unordered &&
1752 "atomicrmw instructions cannot be unordered.");
1753 Op<0>() = Ptr;
1754 Op<1>() = Val;
1756 setOrdering(Ordering);
1757 setSyncScopeID(SSID);
1758 setAlignment(Alignment);
1759
1760 assert(getOperand(0) && getOperand(1) &&
1761 "All operands must be non-null!");
1762 assert(getOperand(0)->getType()->isPointerTy() &&
1763 "Ptr must have pointer type!");
1764 assert(cast<PointerType>(getOperand(0)->getType())
1765 ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1766 "Ptr must be a pointer to Val type!");
1767 assert(Ordering != AtomicOrdering::NotAtomic &&
1768 "AtomicRMW instructions must be atomic!");
1769}
1770
1772 Align Alignment, AtomicOrdering Ordering,
1773 SyncScope::ID SSID, Instruction *InsertBefore)
1774 : Instruction(Val->getType(), AtomicRMW,
1775 OperandTraits<AtomicRMWInst>::op_begin(this),
1776 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1777 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1778}
1779
1781 Align Alignment, AtomicOrdering Ordering,
1782 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1783 : Instruction(Val->getType(), AtomicRMW,
1784 OperandTraits<AtomicRMWInst>::op_begin(this),
1785 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1786 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1787}
1788
1790 switch (Op) {
1792 return "xchg";
1793 case AtomicRMWInst::Add:
1794 return "add";
1795 case AtomicRMWInst::Sub:
1796 return "sub";
1797 case AtomicRMWInst::And:
1798 return "and";
1800 return "nand";
1801 case AtomicRMWInst::Or:
1802 return "or";
1803 case AtomicRMWInst::Xor:
1804 return "xor";
1805 case AtomicRMWInst::Max:
1806 return "max";
1807 case AtomicRMWInst::Min:
1808 return "min";
1810 return "umax";
1812 return "umin";
1814 return "fadd";
1816 return "fsub";
1818 return "fmax";
1820 return "fmin";
1822 return "uinc_wrap";
1824 return "udec_wrap";
1826 return "<invalid operation>";
1827 }
1828
1829 llvm_unreachable("invalid atomicrmw operation");
1830}
1831
1832//===----------------------------------------------------------------------===//
1833// FenceInst Implementation
1834//===----------------------------------------------------------------------===//
1835
1837 SyncScope::ID SSID,
1838 Instruction *InsertBefore)
1839 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1840 setOrdering(Ordering);
1841 setSyncScopeID(SSID);
1842}
1843
1845 SyncScope::ID SSID,
1846 BasicBlock *InsertAtEnd)
1847 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1848 setOrdering(Ordering);
1849 setSyncScopeID(SSID);
1850}
1851
1852//===----------------------------------------------------------------------===//
1853// GetElementPtrInst Implementation
1854//===----------------------------------------------------------------------===//
1855
1856void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1857 const Twine &Name) {
1858 assert(getNumOperands() == 1 + IdxList.size() &&
1859 "NumOperands not initialized?");
1860 Op<0>() = Ptr;
1861 llvm::copy(IdxList, op_begin() + 1);
1862 setName(Name);
1863}
1864
1865GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1866 : Instruction(GEPI.getType(), GetElementPtr,
1867 OperandTraits<GetElementPtrInst>::op_end(this) -
1868 GEPI.getNumOperands(),
1869 GEPI.getNumOperands()),
1870 SourceElementType(GEPI.SourceElementType),
1871 ResultElementType(GEPI.ResultElementType) {
1872 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1874}
1875
1877 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1878 if (!Struct->indexValid(Idx))
1879 return nullptr;
1880 return Struct->getTypeAtIndex(Idx);
1881 }
1882 if (!Idx->getType()->isIntOrIntVectorTy())
1883 return nullptr;
1884 if (auto *Array = dyn_cast<ArrayType>(Ty))
1885 return Array->getElementType();
1886 if (auto *Vector = dyn_cast<VectorType>(Ty))
1887 return Vector->getElementType();
1888 return nullptr;
1889}
1890
1892 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1893 if (Idx >= Struct->getNumElements())
1894 return nullptr;
1895 return Struct->getElementType(Idx);
1896 }
1897 if (auto *Array = dyn_cast<ArrayType>(Ty))
1898 return Array->getElementType();
1899 if (auto *Vector = dyn_cast<VectorType>(Ty))
1900 return Vector->getElementType();
1901 return nullptr;
1902}
1903
1904template <typename IndexTy>
1906 if (IdxList.empty())
1907 return Ty;
1908 for (IndexTy V : IdxList.slice(1)) {
1910 if (!Ty)
1911 return Ty;
1912 }
1913 return Ty;
1914}
1915
1917 return getIndexedTypeInternal(Ty, IdxList);
1918}
1919
1921 ArrayRef<Constant *> IdxList) {
1922 return getIndexedTypeInternal(Ty, IdxList);
1923}
1924
1926 return getIndexedTypeInternal(Ty, IdxList);
1927}
1928
1929/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1930/// zeros. If so, the result pointer and the first operand have the same
1931/// value, just potentially different types.
1933 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1934 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1935 if (!CI->isZero()) return false;
1936 } else {
1937 return false;
1938 }
1939 }
1940 return true;
1941}
1942
1943/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1944/// constant integers. If so, the result pointer and the first operand have
1945/// a constant offset between them.
1947 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1948 if (!isa<ConstantInt>(getOperand(i)))
1949 return false;
1950 }
1951 return true;
1952}
1953
1955 cast<GEPOperator>(this)->setIsInBounds(B);
1956}
1957
1959 return cast<GEPOperator>(this)->isInBounds();
1960}
1961
1963 APInt &Offset) const {
1964 // Delegate to the generic GEPOperator implementation.
1965 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1966}
1967
1969 const DataLayout &DL, unsigned BitWidth,
1970 MapVector<Value *, APInt> &VariableOffsets,
1971 APInt &ConstantOffset) const {
1972 // Delegate to the generic GEPOperator implementation.
1973 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1974 ConstantOffset);
1975}
1976
1977//===----------------------------------------------------------------------===//
1978// ExtractElementInst Implementation
1979//===----------------------------------------------------------------------===//
1980
1981ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1982 const Twine &Name,
1983 Instruction *InsertBef)
1984 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1985 ExtractElement,
1986 OperandTraits<ExtractElementInst>::op_begin(this),
1987 2, InsertBef) {
1988 assert(isValidOperands(Val, Index) &&
1989 "Invalid extractelement instruction operands!");
1990 Op<0>() = Val;
1991 Op<1>() = Index;
1992 setName(Name);
1993}
1994
1995ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1996 const Twine &Name,
1997 BasicBlock *InsertAE)
1998 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1999 ExtractElement,
2000 OperandTraits<ExtractElementInst>::op_begin(this),
2001 2, InsertAE) {
2002 assert(isValidOperands(Val, Index) &&
2003 "Invalid extractelement instruction operands!");
2004
2005 Op<0>() = Val;
2006 Op<1>() = Index;
2007 setName(Name);
2008}
2009
2011 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2012 return false;
2013 return true;
2014}
2015
2016//===----------------------------------------------------------------------===//
2017// InsertElementInst Implementation
2018//===----------------------------------------------------------------------===//
2019
2020InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2021 const Twine &Name,
2022 Instruction *InsertBef)
2023 : Instruction(Vec->getType(), InsertElement,
2024 OperandTraits<InsertElementInst>::op_begin(this),
2025 3, InsertBef) {
2026 assert(isValidOperands(Vec, Elt, Index) &&
2027 "Invalid insertelement instruction operands!");
2028 Op<0>() = Vec;
2029 Op<1>() = Elt;
2030 Op<2>() = Index;
2031 setName(Name);
2032}
2033
2034InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2035 const Twine &Name,
2036 BasicBlock *InsertAE)
2037 : Instruction(Vec->getType(), InsertElement,
2038 OperandTraits<InsertElementInst>::op_begin(this),
2039 3, InsertAE) {
2040 assert(isValidOperands(Vec, Elt, Index) &&
2041 "Invalid insertelement instruction operands!");
2042
2043 Op<0>() = Vec;
2044 Op<1>() = Elt;
2045 Op<2>() = Index;
2046 setName(Name);
2047}
2048
2050 const Value *Index) {
2051 if (!Vec->getType()->isVectorTy())
2052 return false; // First operand of insertelement must be vector type.
2053
2054 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2055 return false;// Second operand of insertelement must be vector element type.
2056
2057 if (!Index->getType()->isIntegerTy())
2058 return false; // Third operand of insertelement must be i32.
2059 return true;
2060}
2061
2062//===----------------------------------------------------------------------===//
2063// ShuffleVectorInst Implementation
2064//===----------------------------------------------------------------------===//
2065
2067 assert(V && "Cannot create placeholder of nullptr V");
2068 return PoisonValue::get(V->getType());
2069}
2070
2072 Instruction *InsertBefore)
2074 InsertBefore) {}
2075
2077 BasicBlock *InsertAtEnd)
2079 InsertAtEnd) {}
2080
2082 const Twine &Name,
2083 Instruction *InsertBefore)
2085 InsertBefore) {}
2086
2088 const Twine &Name, BasicBlock *InsertAtEnd)
2090 InsertAtEnd) {}
2091
2093 const Twine &Name,
2094 Instruction *InsertBefore)
2095 : Instruction(
2096 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2097 cast<VectorType>(Mask->getType())->getElementCount()),
2098 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2099 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2100 assert(isValidOperands(V1, V2, Mask) &&
2101 "Invalid shuffle vector instruction operands!");
2102
2103 Op<0>() = V1;
2104 Op<1>() = V2;
2105 SmallVector<int, 16> MaskArr;
2106 getShuffleMask(cast<Constant>(Mask), MaskArr);
2107 setShuffleMask(MaskArr);
2108 setName(Name);
2109}
2110
2112 const Twine &Name, BasicBlock *InsertAtEnd)
2113 : Instruction(
2114 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2115 cast<VectorType>(Mask->getType())->getElementCount()),
2116 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2117 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2118 assert(isValidOperands(V1, V2, Mask) &&
2119 "Invalid shuffle vector instruction operands!");
2120
2121 Op<0>() = V1;
2122 Op<1>() = V2;
2123 SmallVector<int, 16> MaskArr;
2124 getShuffleMask(cast<Constant>(Mask), MaskArr);
2125 setShuffleMask(MaskArr);
2126 setName(Name);
2127}
2128
2130 const Twine &Name,
2131 Instruction *InsertBefore)
2132 : Instruction(
2133 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2134 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2135 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2136 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2137 assert(isValidOperands(V1, V2, Mask) &&
2138 "Invalid shuffle vector instruction operands!");
2139 Op<0>() = V1;
2140 Op<1>() = V2;
2141 setShuffleMask(Mask);
2142 setName(Name);
2143}
2144
2146 const Twine &Name, BasicBlock *InsertAtEnd)
2147 : Instruction(
2148 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2149 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2150 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2151 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2152 assert(isValidOperands(V1, V2, Mask) &&
2153 "Invalid shuffle vector instruction operands!");
2154
2155 Op<0>() = V1;
2156 Op<1>() = V2;
2157 setShuffleMask(Mask);
2158 setName(Name);
2159}
2160
2162 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2163 int NumMaskElts = ShuffleMask.size();
2164 SmallVector<int, 16> NewMask(NumMaskElts);
2165 for (int i = 0; i != NumMaskElts; ++i) {
2166 int MaskElt = getMaskValue(i);
2167 if (MaskElt == PoisonMaskElem) {
2168 NewMask[i] = PoisonMaskElem;
2169 continue;
2170 }
2171 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2172 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2173 NewMask[i] = MaskElt;
2174 }
2175 setShuffleMask(NewMask);
2176 Op<0>().swap(Op<1>());
2177}
2178
2180 ArrayRef<int> Mask) {
2181 // V1 and V2 must be vectors of the same type.
2182 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2183 return false;
2184
2185 // Make sure the mask elements make sense.
2186 int V1Size =
2187 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2188 for (int Elem : Mask)
2189 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2190 return false;
2191
2192 if (isa<ScalableVectorType>(V1->getType()))
2193 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2194 return false;
2195
2196 return true;
2197}
2198
2200 const Value *Mask) {
2201 // V1 and V2 must be vectors of the same type.
2202 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2203 return false;
2204
2205 // Mask must be vector of i32, and must be the same kind of vector as the
2206 // input vectors
2207 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2208 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2209 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2210 return false;
2211
2212 // Check to see if Mask is valid.
2213 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2214 return true;
2215
2216 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2217 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2218 for (Value *Op : MV->operands()) {
2219 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2220 if (CI->uge(V1Size*2))
2221 return false;
2222 } else if (!isa<UndefValue>(Op)) {
2223 return false;
2224 }
2225 }
2226 return true;
2227 }
2228
2229 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2230 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2231 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2232 i != e; ++i)
2233 if (CDS->getElementAsInteger(i) >= V1Size*2)
2234 return false;
2235 return true;
2236 }
2237
2238 return false;
2239}
2240
2242 SmallVectorImpl<int> &Result) {
2243 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2244
2245 if (isa<ConstantAggregateZero>(Mask)) {
2246 Result.resize(EC.getKnownMinValue(), 0);
2247 return;
2248 }
2249
2250 Result.reserve(EC.getKnownMinValue());
2251
2252 if (EC.isScalable()) {
2253 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2254 "Scalable vector shuffle mask must be undef or zeroinitializer");
2255 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2256 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2257 Result.emplace_back(MaskVal);
2258 return;
2259 }
2260
2261 unsigned NumElts = EC.getKnownMinValue();
2262
2263 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2264 for (unsigned i = 0; i != NumElts; ++i)
2265 Result.push_back(CDS->getElementAsInteger(i));
2266 return;
2267 }
2268 for (unsigned i = 0; i != NumElts; ++i) {
2269 Constant *C = Mask->getAggregateElement(i);
2270 Result.push_back(isa<UndefValue>(C) ? -1 :
2271 cast<ConstantInt>(C)->getZExtValue());
2272 }
2273}
2274
2276 ShuffleMask.assign(Mask.begin(), Mask.end());
2277 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2278}
2279
2281 Type *ResultTy) {
2282 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2283 if (isa<ScalableVectorType>(ResultTy)) {
2284 assert(all_equal(Mask) && "Unexpected shuffle");
2285 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2286 if (Mask[0] == 0)
2287 return Constant::getNullValue(VecTy);
2288 return UndefValue::get(VecTy);
2289 }
2291 for (int Elem : Mask) {
2292 if (Elem == PoisonMaskElem)
2293 MaskConst.push_back(UndefValue::get(Int32Ty));
2294 else
2295 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2296 }
2297 return ConstantVector::get(MaskConst);
2298}
2299
2300static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2301 assert(!Mask.empty() && "Shuffle mask must contain elements");
2302 bool UsesLHS = false;
2303 bool UsesRHS = false;
2304 for (int I : Mask) {
2305 if (I == -1)
2306 continue;
2307 assert(I >= 0 && I < (NumOpElts * 2) &&
2308 "Out-of-bounds shuffle mask element");
2309 UsesLHS |= (I < NumOpElts);
2310 UsesRHS |= (I >= NumOpElts);
2311 if (UsesLHS && UsesRHS)
2312 return false;
2313 }
2314 // Allow for degenerate case: completely undef mask means neither source is used.
2315 return UsesLHS || UsesRHS;
2316}
2317
2319 // We don't have vector operand size information, so assume operands are the
2320 // same size as the mask.
2321 return isSingleSourceMaskImpl(Mask, Mask.size());
2322}
2323
2324static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2325 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2326 return false;
2327 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2328 if (Mask[i] == -1)
2329 continue;
2330 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2331 return false;
2332 }
2333 return true;
2334}
2335
2337 // We don't have vector operand size information, so assume operands are the
2338 // same size as the mask.
2339 return isIdentityMaskImpl(Mask, Mask.size());
2340}
2341
2343 if (!isSingleSourceMask(Mask))
2344 return false;
2345
2346 // The number of elements in the mask must be at least 2.
2347 int NumElts = Mask.size();
2348 if (NumElts < 2)
2349 return false;
2350
2351 for (int i = 0; i < NumElts; ++i) {
2352 if (Mask[i] == -1)
2353 continue;
2354 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2355 return false;
2356 }
2357 return true;
2358}
2359
2361 if (!isSingleSourceMask(Mask))
2362 return false;
2363 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2364 if (Mask[i] == -1)
2365 continue;
2366 if (Mask[i] != 0 && Mask[i] != NumElts)
2367 return false;
2368 }
2369 return true;
2370}
2371
2373 // Select is differentiated from identity. It requires using both sources.
2374 if (isSingleSourceMask(Mask))
2375 return false;
2376 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2377 if (Mask[i] == -1)
2378 continue;
2379 if (Mask[i] != i && Mask[i] != (NumElts + i))
2380 return false;
2381 }
2382 return true;
2383}
2384
2386 // Example masks that will return true:
2387 // v1 = <a, b, c, d>
2388 // v2 = <e, f, g, h>
2389 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2390 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2391
2392 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2393 int NumElts = Mask.size();
2394 if (NumElts < 2 || !isPowerOf2_32(NumElts))
2395 return false;
2396
2397 // 2. The first element of the mask must be either a 0 or a 1.
2398 if (Mask[0] != 0 && Mask[0] != 1)
2399 return false;
2400
2401 // 3. The difference between the first 2 elements must be equal to the
2402 // number of elements in the mask.
2403 if ((Mask[1] - Mask[0]) != NumElts)
2404 return false;
2405
2406 // 4. The difference between consecutive even-numbered and odd-numbered
2407 // elements must be equal to 2.
2408 for (int i = 2; i < NumElts; ++i) {
2409 int MaskEltVal = Mask[i];
2410 if (MaskEltVal == -1)
2411 return false;
2412 int MaskEltPrevVal = Mask[i - 2];
2413 if (MaskEltVal - MaskEltPrevVal != 2)
2414 return false;
2415 }
2416 return true;
2417}
2418
2420 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2421 int StartIndex = -1;
2422 for (int I = 0, E = Mask.size(); I != E; ++I) {
2423 int MaskEltVal = Mask[I];
2424 if (MaskEltVal == -1)
2425 continue;
2426
2427 if (StartIndex == -1) {
2428 // Don't support a StartIndex that begins in the second input, or if the
2429 // first non-undef index would access below the StartIndex.
2430 if (MaskEltVal < I || E <= (MaskEltVal - I))
2431 return false;
2432
2433 StartIndex = MaskEltVal - I;
2434 continue;
2435 }
2436
2437 // Splice is sequential starting from StartIndex.
2438 if (MaskEltVal != (StartIndex + I))
2439 return false;
2440 }
2441
2442 if (StartIndex == -1)
2443 return false;
2444
2445 // NOTE: This accepts StartIndex == 0 (COPY).
2446 Index = StartIndex;
2447 return true;
2448}
2449
2451 int NumSrcElts, int &Index) {
2452 // Must extract from a single source.
2453 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2454 return false;
2455
2456 // Must be smaller (else this is an Identity shuffle).
2457 if (NumSrcElts <= (int)Mask.size())
2458 return false;
2459
2460 // Find start of extraction, accounting that we may start with an UNDEF.
2461 int SubIndex = -1;
2462 for (int i = 0, e = Mask.size(); i != e; ++i) {
2463 int M = Mask[i];
2464 if (M < 0)
2465 continue;
2466 int Offset = (M % NumSrcElts) - i;
2467 if (0 <= SubIndex && SubIndex != Offset)
2468 return false;
2469 SubIndex = Offset;
2470 }
2471
2472 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2473 Index = SubIndex;
2474 return true;
2475 }
2476 return false;
2477}
2478
2480 int NumSrcElts, int &NumSubElts,
2481 int &Index) {
2482 int NumMaskElts = Mask.size();
2483
2484 // Don't try to match if we're shuffling to a smaller size.
2485 if (NumMaskElts < NumSrcElts)
2486 return false;
2487
2488 // TODO: We don't recognize self-insertion/widening.
2489 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2490 return false;
2491
2492 // Determine which mask elements are attributed to which source.
2493 APInt UndefElts = APInt::getZero(NumMaskElts);
2494 APInt Src0Elts = APInt::getZero(NumMaskElts);
2495 APInt Src1Elts = APInt::getZero(NumMaskElts);
2496 bool Src0Identity = true;
2497 bool Src1Identity = true;
2498
2499 for (int i = 0; i != NumMaskElts; ++i) {
2500 int M = Mask[i];
2501 if (M < 0) {
2502 UndefElts.setBit(i);
2503 continue;
2504 }
2505 if (M < NumSrcElts) {
2506 Src0Elts.setBit(i);
2507 Src0Identity &= (M == i);
2508 continue;
2509 }
2510 Src1Elts.setBit(i);
2511 Src1Identity &= (M == (i + NumSrcElts));
2512 }
2513 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2514 "unknown shuffle elements");
2515 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2516 "2-source shuffle not found");
2517
2518 // Determine lo/hi span ranges.
2519 // TODO: How should we handle undefs at the start of subvector insertions?
2520 int Src0Lo = Src0Elts.countr_zero();
2521 int Src1Lo = Src1Elts.countr_zero();
2522 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2523 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2524
2525 // If src0 is in place, see if the src1 elements is inplace within its own
2526 // span.
2527 if (Src0Identity) {
2528 int NumSub1Elts = Src1Hi - Src1Lo;
2529 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2530 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2531 NumSubElts = NumSub1Elts;
2532 Index = Src1Lo;
2533 return true;
2534 }
2535 }
2536
2537 // If src1 is in place, see if the src0 elements is inplace within its own
2538 // span.
2539 if (Src1Identity) {
2540 int NumSub0Elts = Src0Hi - Src0Lo;
2541 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2542 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2543 NumSubElts = NumSub0Elts;
2544 Index = Src0Lo;
2545 return true;
2546 }
2547 }
2548
2549 return false;
2550}
2551
2553 if (isa<UndefValue>(Op<2>()))
2554 return false;
2555
2556 // FIXME: Not currently possible to express a shuffle mask for a scalable
2557 // vector for this case.
2558 if (isa<ScalableVectorType>(getType()))
2559 return false;
2560
2561 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2562 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2563 if (NumMaskElts <= NumOpElts)
2564 return false;
2565
2566 // The first part of the mask must choose elements from exactly 1 source op.
2568 if (!isIdentityMaskImpl(Mask, NumOpElts))
2569 return false;
2570
2571 // All extending must be with undef elements.
2572 for (int i = NumOpElts; i < NumMaskElts; ++i)
2573 if (Mask[i] != -1)
2574 return false;
2575
2576 return true;
2577}
2578
2580 if (isa<UndefValue>(Op<2>()))
2581 return false;
2582
2583 // FIXME: Not currently possible to express a shuffle mask for a scalable
2584 // vector for this case.
2585 if (isa<ScalableVectorType>(getType()))
2586 return false;
2587
2588 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2589 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2590 if (NumMaskElts >= NumOpElts)
2591 return false;
2592
2593 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2594}
2595
2597 // Vector concatenation is differentiated from identity with padding.
2598 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2599 isa<UndefValue>(Op<2>()))
2600 return false;
2601
2602 // FIXME: Not currently possible to express a shuffle mask for a scalable
2603 // vector for this case.
2604 if (isa<ScalableVectorType>(getType()))
2605 return false;
2606
2607 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2608 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2609 if (NumMaskElts != NumOpElts * 2)
2610 return false;
2611
2612 // Use the mask length rather than the operands' vector lengths here. We
2613 // already know that the shuffle returns a vector twice as long as the inputs,
2614 // and neither of the inputs are undef vectors. If the mask picks consecutive
2615 // elements from both inputs, then this is a concatenation of the inputs.
2616 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2617}
2618
2620 int ReplicationFactor, int VF) {
2621 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2622 "Unexpected mask size.");
2623
2624 for (int CurrElt : seq(0, VF)) {
2625 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2626 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2627 "Run out of mask?");
2628 Mask = Mask.drop_front(ReplicationFactor);
2629 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2630 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2631 }))
2632 return false;
2633 }
2634 assert(Mask.empty() && "Did not consume the whole mask?");
2635
2636 return true;
2637}
2638
2640 int &ReplicationFactor, int &VF) {
2641 // undef-less case is trivial.
2642 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2643 ReplicationFactor =
2644 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2645 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2646 return false;
2647 VF = Mask.size() / ReplicationFactor;
2648 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2649 }
2650
2651 // However, if the mask contains undef's, we have to enumerate possible tuples
2652 // and pick one. There are bounds on replication factor: [1, mask size]
2653 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2654 // Additionally, mask size is a replication factor multiplied by vector size,
2655 // which further significantly reduces the search space.
2656
2657 // Before doing that, let's perform basic correctness checking first.
2658 int Largest = -1;
2659 for (int MaskElt : Mask) {
2660 if (MaskElt == PoisonMaskElem)
2661 continue;
2662 // Elements must be in non-decreasing order.
2663 if (MaskElt < Largest)
2664 return false;
2665 Largest = std::max(Largest, MaskElt);
2666 }
2667
2668 // Prefer larger replication factor if all else equal.
2669 for (int PossibleReplicationFactor :
2670 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2671 if (Mask.size() % PossibleReplicationFactor != 0)
2672 continue;
2673 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2674 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2675 PossibleVF))
2676 continue;
2677 ReplicationFactor = PossibleReplicationFactor;
2678 VF = PossibleVF;
2679 return true;
2680 }
2681
2682 return false;
2683}
2684
2685bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2686 int &VF) const {
2687 // Not possible to express a shuffle mask for a scalable vector for this
2688 // case.
2689 if (isa<ScalableVectorType>(getType()))
2690 return false;
2691
2692 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2693 if (ShuffleMask.size() % VF != 0)
2694 return false;
2695 ReplicationFactor = ShuffleMask.size() / VF;
2696
2697 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2698}
2699
2701 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2702 Mask.size() % VF != 0)
2703 return false;
2704 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2705 ArrayRef<int> SubMask = Mask.slice(K, VF);
2706 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2707 continue;
2708 SmallBitVector Used(VF, false);
2709 for_each(SubMask, [&Used, VF](int Idx) {
2710 if (Idx != PoisonMaskElem && Idx < VF)
2711 Used.set(Idx);
2712 });
2713 if (!Used.all())
2714 return false;
2715 }
2716 return true;
2717}
2718
2719/// Return true if this shuffle mask is a replication mask.
2721 // Not possible to express a shuffle mask for a scalable vector for this
2722 // case.
2723 if (isa<ScalableVectorType>(getType()))
2724 return false;
2725 if (!isSingleSourceMask(ShuffleMask))
2726 return false;
2727
2728 return isOneUseSingleSourceMask(ShuffleMask, VF);
2729}
2730
2731bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2732 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2733 // shuffle_vector can only interleave fixed length vectors - for scalable
2734 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2735 if (!OpTy)
2736 return false;
2737 unsigned OpNumElts = OpTy->getNumElements();
2738
2739 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2740}
2741
2743 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2744 SmallVectorImpl<unsigned> &StartIndexes) {
2745 unsigned NumElts = Mask.size();
2746 if (NumElts % Factor)
2747 return false;
2748
2749 unsigned LaneLen = NumElts / Factor;
2750 if (!isPowerOf2_32(LaneLen))
2751 return false;
2752
2753 StartIndexes.resize(Factor);
2754
2755 // Check whether each element matches the general interleaved rule.
2756 // Ignore undef elements, as long as the defined elements match the rule.
2757 // Outer loop processes all factors (x, y, z in the above example)
2758 unsigned I = 0, J;
2759 for (; I < Factor; I++) {
2760 unsigned SavedLaneValue;
2761 unsigned SavedNoUndefs = 0;
2762
2763 // Inner loop processes consecutive accesses (x, x+1... in the example)
2764 for (J = 0; J < LaneLen - 1; J++) {
2765 // Lane computes x's position in the Mask
2766 unsigned Lane = J * Factor + I;
2767 unsigned NextLane = Lane + Factor;
2768 int LaneValue = Mask[Lane];
2769 int NextLaneValue = Mask[NextLane];
2770
2771 // If both are defined, values must be sequential
2772 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2773 LaneValue + 1 != NextLaneValue)
2774 break;
2775
2776 // If the next value is undef, save the current one as reference
2777 if (LaneValue >= 0 && NextLaneValue < 0) {
2778 SavedLaneValue = LaneValue;
2779 SavedNoUndefs = 1;
2780 }
2781
2782 // Undefs are allowed, but defined elements must still be consecutive:
2783 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2784 // Verify this by storing the last non-undef followed by an undef
2785 // Check that following non-undef masks are incremented with the
2786 // corresponding distance.
2787 if (SavedNoUndefs > 0 && LaneValue < 0) {
2788 SavedNoUndefs++;
2789 if (NextLaneValue >= 0 &&
2790 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2791 break;
2792 }
2793 }
2794
2795 if (J < LaneLen - 1)
2796 return false;
2797
2798 int StartMask = 0;
2799 if (Mask[I] >= 0) {
2800 // Check that the start of the I range (J=0) is greater than 0
2801 StartMask = Mask[I];
2802 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2803 // StartMask defined by the last value in lane
2804 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2805 } else if (SavedNoUndefs > 0) {
2806 // StartMask defined by some non-zero value in the j loop
2807 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2808 }
2809 // else StartMask remains set to 0, i.e. all elements are undefs
2810
2811 if (StartMask < 0)
2812 return false;
2813 // We must stay within the vectors; This case can happen with undefs.
2814 if (StartMask + LaneLen > NumInputElts)
2815 return false;
2816
2817 StartIndexes[I] = StartMask;
2818 }
2819
2820 return true;
2821}
2822
2823//===----------------------------------------------------------------------===//
2824// InsertValueInst Class
2825//===----------------------------------------------------------------------===//
2826
2827void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2828 const Twine &Name) {
2829 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2830
2831 // There's no fundamental reason why we require at least one index
2832 // (other than weirdness with &*IdxBegin being invalid; see
2833 // getelementptr's init routine for example). But there's no
2834 // present need to support it.
2835 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2836
2838 Val->getType() && "Inserted value must match indexed type!");
2839 Op<0>() = Agg;
2840 Op<1>() = Val;
2841
2842 Indices.append(Idxs.begin(), Idxs.end());
2843 setName(Name);
2844}
2845
2846InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2847 : Instruction(IVI.getType(), InsertValue,
2848 OperandTraits<InsertValueInst>::op_begin(this), 2),
2849 Indices(IVI.Indices) {
2850 Op<0>() = IVI.getOperand(0);
2851 Op<1>() = IVI.getOperand(1);
2853}
2854
2855//===----------------------------------------------------------------------===//
2856// ExtractValueInst Class
2857//===----------------------------------------------------------------------===//
2858
2859void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2860 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2861
2862 // There's no fundamental reason why we require at least one index.
2863 // But there's no present need to support it.
2864 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2865
2866 Indices.append(Idxs.begin(), Idxs.end());
2867 setName(Name);
2868}
2869
2870ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2871 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2872 Indices(EVI.Indices) {
2874}
2875
2876// getIndexedType - Returns the type of the element that would be extracted
2877// with an extractvalue instruction with the specified parameters.
2878//
2879// A null type is returned if the indices are invalid for the specified
2880// pointer type.
2881//
2883 ArrayRef<unsigned> Idxs) {
2884 for (unsigned Index : Idxs) {
2885 // We can't use CompositeType::indexValid(Index) here.
2886 // indexValid() always returns true for arrays because getelementptr allows
2887 // out-of-bounds indices. Since we don't allow those for extractvalue and
2888 // insertvalue we need to check array indexing manually.
2889 // Since the only other types we can index into are struct types it's just
2890 // as easy to check those manually as well.
2891 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2892 if (Index >= AT->getNumElements())
2893 return nullptr;
2894 Agg = AT->getElementType();
2895 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2896 if (Index >= ST->getNumElements())
2897 return nullptr;
2898 Agg = ST->getElementType(Index);
2899 } else {
2900 // Not a valid type to index into.
2901 return nullptr;
2902 }
2903 }
2904 return const_cast<Type*>(Agg);
2905}
2906
2907//===----------------------------------------------------------------------===//
2908// UnaryOperator Class
2909//===----------------------------------------------------------------------===//
2910
2912 Type *Ty, const Twine &Name,
2913 Instruction *InsertBefore)
2914 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2915 Op<0>() = S;
2916 setName(Name);
2917 AssertOK();
2918}
2919
2921 Type *Ty, const Twine &Name,
2922 BasicBlock *InsertAtEnd)
2923 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2924 Op<0>() = S;
2925 setName(Name);
2926 AssertOK();
2927}
2928
2930 const Twine &Name,
2931 Instruction *InsertBefore) {
2932 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2933}
2934
2936 const Twine &Name,
2937 BasicBlock *InsertAtEnd) {
2938 UnaryOperator *Res = Create(Op, S, Name);
2939 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2940 return Res;
2941}
2942
2943void UnaryOperator::AssertOK() {
2944 Value *LHS = getOperand(0);
2945 (void)LHS; // Silence warnings.
2946#ifndef NDEBUG
2947 switch (getOpcode()) {
2948 case FNeg:
2949 assert(getType() == LHS->getType() &&
2950 "Unary operation should return same type as operand!");
2951 assert(getType()->isFPOrFPVectorTy() &&
2952 "Tried to create a floating-point operation on a "
2953 "non-floating-point type!");
2954 break;
2955 default: llvm_unreachable("Invalid opcode provided");
2956 }
2957#endif
2958}
2959
2960//===----------------------------------------------------------------------===//
2961// BinaryOperator Class
2962//===----------------------------------------------------------------------===//
2963
2965 Type *Ty, const Twine &Name,
2966 Instruction *InsertBefore)
2967 : Instruction(Ty, iType,
2968 OperandTraits<BinaryOperator>::op_begin(this),
2969 OperandTraits<BinaryOperator>::operands(this),
2970 InsertBefore) {
2971 Op<0>() = S1;
2972 Op<1>() = S2;
2973 setName(Name);
2974 AssertOK();
2975}
2976
2978 Type *Ty, const Twine &Name,
2979 BasicBlock *InsertAtEnd)
2980 : Instruction(Ty, iType,
2981 OperandTraits<BinaryOperator>::op_begin(this),
2982 OperandTraits<BinaryOperator>::operands(this),
2983 InsertAtEnd) {
2984 Op<0>() = S1;
2985 Op<1>() = S2;
2986 setName(Name);
2987 AssertOK();
2988}
2989
2990void BinaryOperator::AssertOK() {
2991 Value *LHS = getOperand(0), *RHS = getOperand(1);
2992 (void)LHS; (void)RHS; // Silence warnings.
2993 assert(LHS->getType() == RHS->getType() &&
2994 "Binary operator operand types must match!");
2995#ifndef NDEBUG
2996 switch (getOpcode()) {
2997 case Add: case Sub:
2998 case Mul:
2999 assert(getType() == LHS->getType() &&
3000 "Arithmetic operation should return same type as operands!");
3001 assert(getType()->isIntOrIntVectorTy() &&
3002 "Tried to create an integer operation on a non-integer type!");
3003 break;
3004 case FAdd: case FSub:
3005 case FMul:
3006 assert(getType() == LHS->getType() &&
3007 "Arithmetic operation should return same type as operands!");
3008 assert(getType()->isFPOrFPVectorTy() &&
3009 "Tried to create a floating-point operation on a "
3010 "non-floating-point type!");
3011 break;
3012 case UDiv:
3013 case SDiv:
3014 assert(getType() == LHS->getType() &&
3015 "Arithmetic operation should return same type as operands!");
3016 assert(getType()->isIntOrIntVectorTy() &&
3017 "Incorrect operand type (not integer) for S/UDIV");
3018 break;
3019 case FDiv:
3020 assert(getType() == LHS->getType() &&
3021 "Arithmetic operation should return same type as operands!");
3022 assert(getType()->isFPOrFPVectorTy() &&
3023 "Incorrect operand type (not floating point) for FDIV");
3024 break;
3025 case URem:
3026 case SRem:
3027 assert(getType() == LHS->getType() &&
3028 "Arithmetic operation should return same type as operands!");
3029 assert(getType()->isIntOrIntVectorTy() &&
3030 "Incorrect operand type (not integer) for S/UREM");
3031 break;
3032 case FRem:
3033 assert(getType() == LHS->getType() &&
3034 "Arithmetic operation should return same type as operands!");
3035 assert(getType()->isFPOrFPVectorTy() &&
3036 "Incorrect operand type (not floating point) for FREM");
3037 break;
3038 case Shl:
3039 case LShr:
3040 case AShr:
3041 assert(getType() == LHS->getType() &&
3042 "Shift operation should return same type as operands!");
3043 assert(getType()->isIntOrIntVectorTy() &&
3044 "Tried to create a shift operation on a non-integral type!");
3045 break;
3046 case And: case Or:
3047 case Xor:
3048 assert(getType() == LHS->getType() &&
3049 "Logical operation should return same type as operands!");
3050 assert(getType()->isIntOrIntVectorTy() &&
3051 "Tried to create a logical operation on a non-integral type!");
3052 break;
3053 default: llvm_unreachable("Invalid opcode provided");
3054 }
3055#endif
3056}
3057
3059 const Twine &Name,
3060 Instruction *InsertBefore) {
3061 assert(S1->getType() == S2->getType() &&
3062 "Cannot create binary operator with two operands of differing type!");
3063 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3064}
3065
3067 const Twine &Name,
3068 BasicBlock *InsertAtEnd) {
3069 BinaryOperator *Res = Create(Op, S1, S2, Name);
3070 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3071 return Res;
3072}
3073
3075 Instruction *InsertBefore) {
3076 Value *Zero = ConstantInt::get(Op->getType(), 0);
3077 return new BinaryOperator(Instruction::Sub,
3078 Zero, Op,
3079 Op->getType(), Name, InsertBefore);
3080}
3081
3083 BasicBlock *InsertAtEnd) {
3084 Value *Zero = ConstantInt::get(Op->getType(), 0);
3085 return new BinaryOperator(Instruction::Sub,
3086 Zero, Op,
3087 Op->getType(), Name, InsertAtEnd);
3088}
3089
3091 Instruction *InsertBefore) {
3092 Value *Zero = ConstantInt::get(Op->getType(), 0);
3093 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3094}
3095
3097 BasicBlock *InsertAtEnd) {
3098 Value *Zero = ConstantInt::get(Op->getType(), 0);
3099 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3100}
3101
3103 Instruction *InsertBefore) {
3104 Value *Zero = ConstantInt::get(Op->getType(), 0);
3105 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
3106}
3107
3109 BasicBlock *InsertAtEnd) {
3110 Value *Zero = ConstantInt::get(Op->getType(), 0);
3111 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
3112}
3113
3115 Instruction *InsertBefore) {
3117 return new BinaryOperator(Instruction::Xor, Op, C,
3118 Op->getType(), Name, InsertBefore);
3119}
3120
3122 BasicBlock *InsertAtEnd) {
3124 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3125 Op->getType(), Name, InsertAtEnd);
3126}
3127
3128// Exchange the two operands to this instruction. This instruction is safe to
3129// use on any binary instruction and does not modify the semantics of the
3130// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3131// is changed.
3133 if (!isCommutative())
3134 return true; // Can't commute operands
3135 Op<0>().swap(Op<1>());
3136 return false;
3137}
3138
3139//===----------------------------------------------------------------------===//
3140// FPMathOperator Class
3141//===----------------------------------------------------------------------===//
3142
3144 const MDNode *MD =
3145 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3146 if (!MD)
3147 return 0.0;
3148 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3149 return Accuracy->getValueAPF().convertToFloat();
3150}
3151
3152//===----------------------------------------------------------------------===//
3153// CastInst Class
3154//===----------------------------------------------------------------------===//
3155
3156// Just determine if this cast only deals with integral->integral conversion.
3158 switch (getOpcode()) {
3159 default: return false;
3160 case Instruction::ZExt:
3161 case Instruction::SExt:
3162 case Instruction::Trunc:
3163 return true;
3164 case Instruction::BitCast:
3165 return getOperand(0)->getType()->isIntegerTy() &&
3166 getType()->isIntegerTy();
3167 }
3168}
3169
3170/// This function determines if the CastInst does not require any bits to be
3171/// changed in order to effect the cast. Essentially, it identifies cases where
3172/// no code gen is necessary for the cast, hence the name no-op cast. For
3173/// example, the following are all no-op casts:
3174/// # bitcast i32* %x to i8*
3175/// # bitcast <2 x i32> %x to <4 x i16>
3176/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3177/// Determine if the described cast is a no-op.
3179 Type *SrcTy,
3180 Type *DestTy,
3181 const DataLayout &DL) {
3182 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3183 switch (Opcode) {
3184 default: llvm_unreachable("Invalid CastOp");
3185 case Instruction::Trunc:
3186 case Instruction::ZExt:
3187 case Instruction::SExt:
3188 case Instruction::FPTrunc:
3189 case Instruction::FPExt:
3190 case Instruction::UIToFP:
3191 case Instruction::SIToFP:
3192 case Instruction::FPToUI:
3193 case Instruction::FPToSI:
3194 case Instruction::AddrSpaceCast:
3195 // TODO: Target informations may give a more accurate answer here.
3196 return false;
3197 case Instruction::BitCast:
3198 return true; // BitCast never modifies bits.
3199 case Instruction::PtrToInt:
3200 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3201 DestTy->getScalarSizeInBits();
3202 case Instruction::IntToPtr:
3203 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3204 SrcTy->getScalarSizeInBits();
3205 }
3206}
3207
3209 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3210}
3211
3212/// This function determines if a pair of casts can be eliminated and what
3213/// opcode should be used in the elimination. This assumes that there are two
3214/// instructions like this:
3215/// * %F = firstOpcode SrcTy %x to MidTy
3216/// * %S = secondOpcode MidTy %F to DstTy
3217/// The function returns a resultOpcode so these two casts can be replaced with:
3218/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3219/// If no such cast is permitted, the function returns 0.
3222 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3223 Type *DstIntPtrTy) {
3224 // Define the 144 possibilities for these two cast instructions. The values
3225 // in this matrix determine what to do in a given situation and select the
3226 // case in the switch below. The rows correspond to firstOp, the columns
3227 // correspond to secondOp. In looking at the table below, keep in mind
3228 // the following cast properties:
3229 //
3230 // Size Compare Source Destination
3231 // Operator Src ? Size Type Sign Type Sign
3232 // -------- ------------ ------------------- ---------------------
3233 // TRUNC > Integer Any Integral Any
3234 // ZEXT < Integral Unsigned Integer Any
3235 // SEXT < Integral Signed Integer Any
3236 // FPTOUI n/a FloatPt n/a Integral Unsigned
3237 // FPTOSI n/a FloatPt n/a Integral Signed
3238 // UITOFP n/a Integral Unsigned FloatPt n/a
3239 // SITOFP n/a Integral Signed FloatPt n/a
3240 // FPTRUNC > FloatPt n/a FloatPt n/a
3241 // FPEXT < FloatPt n/a FloatPt n/a
3242 // PTRTOINT n/a Pointer n/a Integral Unsigned
3243 // INTTOPTR n/a Integral Unsigned Pointer n/a
3244 // BITCAST = FirstClass n/a FirstClass n/a
3245 // ADDRSPCST n/a Pointer n/a Pointer n/a
3246 //
3247 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3248 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3249 // into "fptoui double to i64", but this loses information about the range
3250 // of the produced value (we no longer know the top-part is all zeros).
3251 // Further this conversion is often much more expensive for typical hardware,
3252 // and causes issues when building libgcc. We disallow fptosi+sext for the
3253 // same reason.
3254 const unsigned numCastOps =
3255 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3256 static const uint8_t CastResults[numCastOps][numCastOps] = {
3257 // T F F U S F F P I B A -+
3258 // R Z S P P I I T P 2 N T S |
3259 // U E E 2 2 2 2 R E I T C C +- secondOp
3260 // N X X U S F F N X N 2 V V |
3261 // C T T I I P P C T T P T T -+
3262 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3263 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3264 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3265 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3266 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3267 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3268 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3269 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3270 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3271 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3272 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3273 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3274 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3275 };
3276
3277 // TODO: This logic could be encoded into the table above and handled in the
3278 // switch below.
3279 // If either of the casts are a bitcast from scalar to vector, disallow the
3280 // merging. However, any pair of bitcasts are allowed.
3281 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3282 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3283 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3284
3285 // Check if any of the casts convert scalars <-> vectors.
3286 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3287 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3288 if (!AreBothBitcasts)
3289 return 0;
3290
3291 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3292 [secondOp-Instruction::CastOpsBegin];
3293 switch (ElimCase) {
3294 case 0:
3295 // Categorically disallowed.
3296 return 0;
3297 case 1:
3298 // Allowed, use first cast's opcode.
3299 return firstOp;
3300 case 2:
3301 // Allowed, use second cast's opcode.
3302 return secondOp;
3303 case 3:
3304 // No-op cast in second op implies firstOp as long as the DestTy
3305 // is integer and we are not converting between a vector and a
3306 // non-vector type.
3307 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3308 return firstOp;
3309 return 0;
3310 case 4:
3311 // No-op cast in second op implies firstOp as long as the DestTy
3312 // is floating point.
3313 if (DstTy->isFloatingPointTy())
3314 return firstOp;
3315 return 0;
3316 case 5:
3317 // No-op cast in first op implies secondOp as long as the SrcTy
3318 // is an integer.
3319 if (SrcTy->isIntegerTy())
3320 return secondOp;
3321 return 0;
3322 case 6:
3323 // No-op cast in first op implies secondOp as long as the SrcTy
3324 // is a floating point.
3325 if (SrcTy->isFloatingPointTy())
3326 return secondOp;
3327 return 0;
3328 case 7: {
3329 // Disable inttoptr/ptrtoint optimization if enabled.
3330 if (DisableI2pP2iOpt)
3331 return 0;
3332
3333 // Cannot simplify if address spaces are different!
3334 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3335 return 0;
3336
3337 unsigned MidSize = MidTy->getScalarSizeInBits();
3338 // We can still fold this without knowing the actual sizes as long we
3339 // know that the intermediate pointer is the largest possible
3340 // pointer size.
3341 // FIXME: Is this always true?
3342 if (MidSize == 64)
3343 return Instruction::BitCast;
3344
3345 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3346 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3347 return 0;
3348 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3349 if (MidSize >= PtrSize)
3350 return Instruction::BitCast;
3351 return 0;
3352 }
3353 case 8: {
3354 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3355 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3356 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3357 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3358 unsigned DstSize = DstTy->getScalarSizeInBits();
3359 if (SrcTy == DstTy)
3360 return Instruction::BitCast;
3361 if (SrcSize < DstSize)
3362 return firstOp;
3363 if (SrcSize > DstSize)
3364 return secondOp;
3365 return 0;
3366 }
3367 case 9:
3368 // zext, sext -> zext, because sext can't sign extend after zext
3369 return Instruction::ZExt;
3370 case 11: {
3371 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3372 if (!MidIntPtrTy)
3373 return 0;
3374 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3375 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3376 unsigned DstSize = DstTy->getScalarSizeInBits();
3377 if (SrcSize <= PtrSize && SrcSize == DstSize)
3378 return Instruction::BitCast;
3379 return 0;
3380 }
3381 case 12:
3382 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3383 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3384 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3385 return Instruction::AddrSpaceCast;
3386 return Instruction::BitCast;
3387 case 13:
3388 // FIXME: this state can be merged with (1), but the following assert
3389 // is useful to check the correcteness of the sequence due to semantic
3390 // change of bitcast.
3391 assert(
3392 SrcTy->isPtrOrPtrVectorTy() &&
3393 MidTy->isPtrOrPtrVectorTy() &&
3394 DstTy->isPtrOrPtrVectorTy() &&
3395 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3396 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3397 "Illegal addrspacecast, bitcast sequence!");
3398 // Allowed, use first cast's opcode
3399 return firstOp;
3400 case 14: {
3401 // bitcast, addrspacecast -> addrspacecast if the element type of
3402 // bitcast's source is the same as that of addrspacecast's destination.
3403 PointerType *SrcPtrTy = cast<PointerType>(SrcTy->getScalarType());
3404 PointerType *DstPtrTy = cast<PointerType>(DstTy->getScalarType());
3405 if (SrcPtrTy->hasSameElementTypeAs(DstPtrTy))
3406 return Instruction::AddrSpaceCast;
3407 return 0;
3408 }
3409 case 15:
3410 // FIXME: this state can be merged with (1), but the following assert
3411 // is useful to check the correcteness of the sequence due to semantic
3412 // change of bitcast.
3413 assert(
3414 SrcTy->isIntOrIntVectorTy() &&
3415 MidTy->isPtrOrPtrVectorTy() &&
3416 DstTy->isPtrOrPtrVectorTy() &&
3417 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3418 "Illegal inttoptr, bitcast sequence!");
3419 // Allowed, use first cast's opcode
3420 return firstOp;
3421 case 16:
3422 // FIXME: this state can be merged with (2), but the following assert
3423 // is useful to check the correcteness of the sequence due to semantic
3424 // change of bitcast.
3425 assert(
3426 SrcTy->isPtrOrPtrVectorTy() &&
3427 MidTy->isPtrOrPtrVectorTy() &&
3428 DstTy->isIntOrIntVectorTy() &&
3429 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3430 "Illegal bitcast, ptrtoint sequence!");
3431 // Allowed, use second cast's opcode
3432 return secondOp;
3433 case 17:
3434 // (sitofp (zext x)) -> (uitofp x)
3435 return Instruction::UIToFP;
3436 case 99:
3437 // Cast combination can't happen (error in input). This is for all cases
3438 // where the MidTy is not the same for the two cast instructions.
3439 llvm_unreachable("Invalid Cast Combination");
3440 default:
3441 llvm_unreachable("Error in CastResults table!!!");
3442 }
3443}
3444
3446 const Twine &Name, Instruction *InsertBefore) {
3447 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3448 // Construct and return the appropriate CastInst subclass
3449 switch (op) {
3450 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3451 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3452 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3453 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3454 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3455 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3456 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3457 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3458 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3459 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3460 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3461 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3462 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3463 default: llvm_unreachable("Invalid opcode provided");
3464 }
3465}
3466
3468 const Twine &Name, BasicBlock *InsertAtEnd) {
3469 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3470 // Construct and return the appropriate CastInst subclass
3471 switch (op) {
3472 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3473 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3474 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3475 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3476 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3477 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3478 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3479 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3480 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3481 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3482 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3483 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3484 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3485 default: llvm_unreachable("Invalid opcode provided");
3486 }
3487}
3488
3490 const Twine &Name,
3491 Instruction *InsertBefore) {
3492 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3493 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3494 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3495}
3496
3498 const Twine &Name,
3499 BasicBlock *InsertAtEnd) {
3500 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3501 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3502 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3503}
3504
3506 const Twine &Name,
3507 Instruction *InsertBefore) {
3508 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3509 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3510 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3511}
3512
3514 const Twine &Name,
3515 BasicBlock *InsertAtEnd) {
3516 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3517 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3518 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3519}
3520
3522 const Twine &Name,
3523 Instruction *InsertBefore) {
3524 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3525 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3526 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3527}
3528
3530 const Twine &Name,
3531 BasicBlock *InsertAtEnd) {
3532 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3533 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3534 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3535}
3536
3538 const Twine &Name,
3539 BasicBlock *InsertAtEnd) {
3540 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3541 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3542 "Invalid cast");
3543 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3544 assert((!Ty->isVectorTy() ||
3545 cast<VectorType>(Ty)->getElementCount() ==
3546 cast<VectorType>(S->getType())->getElementCount()) &&
3547 "Invalid cast");
3548
3549 if (Ty->isIntOrIntVectorTy())
3550 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3551
3552 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3553}
3554
3555/// Create a BitCast or a PtrToInt cast instruction
3557 const Twine &Name,
3558 Instruction *InsertBefore) {
3559 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3560 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3561 "Invalid cast");
3562 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3563 assert((!Ty->isVectorTy() ||
3564 cast<VectorType>(Ty)->getElementCount() ==
3565 cast<VectorType>(S->getType())->getElementCount()) &&
3566 "Invalid cast");
3567
3568 if (Ty->isIntOrIntVectorTy())
3569 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3570
3571 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3572}
3573
3575 Value *S, Type *Ty,
3576 const Twine &Name,
3577 BasicBlock *InsertAtEnd) {
3578 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3579 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3580
3582 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3583
3584 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3585}
3586
3588 Value *S, Type *Ty,
3589 const Twine &Name,
3590 Instruction *InsertBefore) {
3591 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3592 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3593
3595 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3596
3597 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3598}
3599
3601 const Twine &Name,
3602 Instruction *InsertBefore) {
3603 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3604 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3605 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3606 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3607
3608 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3609}
3610
3612 bool isSigned, const Twine &Name,
3613 Instruction *InsertBefore) {
3614 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3615 "Invalid integer cast");
3616 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3617 unsigned DstBits = Ty->getScalarSizeInBits();
3618 Instruction::CastOps opcode =
3619 (SrcBits == DstBits ? Instruction::BitCast :
3620 (SrcBits > DstBits ? Instruction::Trunc :
3621 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3622 return Create(opcode, C, Ty, Name, InsertBefore);
3623}
3624
3626 bool isSigned, const Twine &Name,
3627 BasicBlock *InsertAtEnd) {
3628 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3629 "Invalid cast");
3630 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3631 unsigned DstBits = Ty->getScalarSizeInBits();
3632 Instruction::CastOps opcode =
3633 (SrcBits == DstBits ? Instruction::BitCast :
3634 (SrcBits > DstBits ? Instruction::Trunc :
3635 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3636 return Create(opcode, C, Ty, Name, InsertAtEnd);
3637}
3638
3640 const Twine &Name,
3641 Instruction *InsertBefore) {
3642 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3643 "Invalid cast");
3644 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3645 unsigned DstBits = Ty->getScalarSizeInBits();
3646 Instruction::CastOps opcode =
3647 (SrcBits == DstBits ? Instruction::BitCast :
3648 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3649 return Create(opcode, C, Ty, Name, InsertBefore);
3650}
3651
3653 const Twine &Name,
3654 BasicBlock *InsertAtEnd) {
3655 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3656 "Invalid cast");
3657 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3658 unsigned DstBits = Ty->getScalarSizeInBits();
3659 Instruction::CastOps opcode =
3660 (SrcBits == DstBits ? Instruction::BitCast :
3661 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3662 return Create(opcode, C, Ty, Name, InsertAtEnd);
3663}
3664
3665bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3666 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3667 return false;
3668
3669 if (SrcTy == DestTy)
3670 return true;
3671
3672 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3673 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3674 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3675 // An element by element cast. Valid if casting the elements is valid.
3676 SrcTy = SrcVecTy->getElementType();
3677 DestTy = DestVecTy->getElementType();
3678 }
3679 }
3680 }
3681
3682 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3683 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3684 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3685 }
3686 }
3687
3688 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3689 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3690
3691 // Could still have vectors of pointers if the number of elements doesn't
3692 // match
3693 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3694 return false;
3695
3696 if (SrcBits != DestBits)
3697 return false;
3698
3699 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3700 return false;
3701
3702 return true;
3703}
3704
3706 const DataLayout &DL) {
3707 // ptrtoint and inttoptr are not allowed on non-integral pointers
3708 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3709 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3710 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3711 !DL.isNonIntegralPointerType(PtrTy));
3712 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3713 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3714 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3715 !DL.isNonIntegralPointerType(PtrTy));
3716
3717 return isBitCastable(SrcTy, DestTy);
3718}
3719
3720// Provide a way to get a "cast" where the cast opcode is inferred from the
3721// types and size of the operand. This, basically, is a parallel of the
3722// logic in the castIsValid function below. This axiom should hold:
3723// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3724// should not assert in castIsValid. In other words, this produces a "correct"
3725// casting opcode for the arguments passed to it.
3728 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3729 Type *SrcTy = Src->getType();
3730
3731 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3732 "Only first class types are castable!");
3733
3734 if (SrcTy == DestTy)
3735 return BitCast;
3736
3737 // FIXME: Check address space sizes here
3738 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3739 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3740 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3741 // An element by element cast. Find the appropriate opcode based on the
3742 // element types.
3743 SrcTy = SrcVecTy->getElementType();
3744 DestTy = DestVecTy->getElementType();
3745 }
3746
3747 // Get the bit sizes, we'll need these
3748 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3749 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3750
3751 // Run through the possibilities ...
3752 if (DestTy->isIntegerTy()) { // Casting to integral
3753 if (SrcTy->isIntegerTy()) { // Casting from integral
3754 if (DestBits < SrcBits)
3755 return Trunc; // int -> smaller int
3756 else if (DestBits > SrcBits) { // its an extension
3757 if (SrcIsSigned)
3758 return SExt; // signed -> SEXT
3759 else
3760 return ZExt; // unsigned -> ZEXT
3761 } else {
3762 return BitCast; // Same size, No-op cast
3763 }
3764 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3765 if (DestIsSigned)
3766 return FPToSI; // FP -> sint
3767 else
3768 return FPToUI; // FP -> uint
3769 } else if (SrcTy->isVectorTy()) {
3770 assert(DestBits == SrcBits &&
3771 "Casting vector to integer of different width");
3772 return BitCast; // Same size, no-op cast
3773 } else {
3774 assert(SrcTy->isPointerTy() &&
3775 "Casting from a value that is not first-class type");
3776 return PtrToInt; // ptr -> int
3777 }
3778 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3779 if (SrcTy->isIntegerTy()) { // Casting from integral
3780 if (SrcIsSigned)
3781 return SIToFP; // sint -> FP
3782 else
3783 return UIToFP; // uint -> FP
3784 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3785 if (DestBits < SrcBits) {
3786 return FPTrunc; // FP -> smaller FP
3787 } else if (DestBits > SrcBits) {
3788 return FPExt; // FP -> larger FP
3789 } else {
3790 return BitCast; // same size, no-op cast
3791 }
3792 } else if (SrcTy->isVectorTy()) {
3793 assert(DestBits == SrcBits &&
3794 "Casting vector to floating point of different width");
3795 return BitCast; // same size, no-op cast
3796 }
3797 llvm_unreachable("Casting pointer or non-first class to float");
3798 } else if (DestTy->isVectorTy()) {
3799 assert(DestBits == SrcBits &&
3800 "Illegal cast to vector (wrong type or size)");
3801 return BitCast;
3802 } else if (DestTy->isPointerTy()) {
3803 if (SrcTy->isPointerTy()) {
3804 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3805 return AddrSpaceCast;
3806 return BitCast; // ptr -> ptr
3807 } else if (SrcTy->isIntegerTy()) {
3808 return IntToPtr; // int -> ptr
3809 }
3810 llvm_unreachable("Casting pointer to other than pointer or int");
3811 } else if (DestTy->isX86_MMXTy()) {
3812 if (SrcTy->isVectorTy()) {
3813 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3814 return BitCast; // 64-bit vector to MMX
3815 }
3816 llvm_unreachable("Illegal cast to X86_MMX");
3817 }
3818 llvm_unreachable("Casting to type that is not first-class");
3819}
3820
3821//===----------------------------------------------------------------------===//
3822// CastInst SubClass Constructors
3823//===----------------------------------------------------------------------===//
3824
3825/// Check that the construction parameters for a CastInst are correct. This
3826/// could be broken out into the separate constructors but it is useful to have
3827/// it in one place and to eliminate the redundant code for getting the sizes
3828/// of the types involved.
3829bool
3831 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3832 SrcTy->isAggregateType() || DstTy->isAggregateType())
3833 return false;
3834
3835 // Get the size of the types in bits, and whether we are dealing
3836 // with vector types, we'll need this later.
3837 bool SrcIsVec = isa<VectorType>(SrcTy);
3838 bool DstIsVec = isa<VectorType>(DstTy);
3839 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3840 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3841
3842 // If these are vector types, get the lengths of the vectors (using zero for
3843 // scalar types means that checking that vector lengths match also checks that
3844 // scalars are not being converted to vectors or vectors to scalars).
3845 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3847 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3849
3850 // Switch on the opcode provided
3851 switch (op) {
3852 default: return false; // This is an input error
3853 case Instruction::Trunc:
3854 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3855 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3856 case Instruction::ZExt:
3857 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3858 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3859 case Instruction::SExt:
3860 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3861 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3862 case Instruction::FPTrunc:
3863 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3864 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3865 case Instruction::FPExt:
3866 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3867 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3868 case Instruction::UIToFP:
3869 case Instruction::SIToFP:
3870 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3871 SrcEC == DstEC;
3872 case Instruction::FPToUI:
3873 case Instruction::FPToSI:
3874 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3875 SrcEC == DstEC;
3876 case Instruction::PtrToInt:
3877 if (SrcEC != DstEC)
3878 return false;
3879 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3880 case Instruction::IntToPtr:
3881 if (SrcEC != DstEC)
3882 return false;
3883 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3884 case Instruction::BitCast: {
3885 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3886 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3887
3888 // BitCast implies a no-op cast of type only. No bits change.
3889 // However, you can't cast pointers to anything but pointers.
3890 if (!SrcPtrTy != !DstPtrTy)
3891 return false;
3892
3893 // For non-pointer cases, the cast is okay if the source and destination bit
3894 // widths are identical.
3895 if (!SrcPtrTy)
3896 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3897
3898 // If both are pointers then the address spaces must match.
3899 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3900 return false;
3901
3902 // A vector of pointers must have the same number of elements.
3903 if (SrcIsVec && DstIsVec)
3904 return SrcEC == DstEC;
3905 if (SrcIsVec)
3906 return SrcEC == ElementCount::getFixed(1);
3907 if (DstIsVec)
3908 return DstEC == ElementCount::getFixed(1);
3909
3910 return true;
3911 }
3912 case Instruction::AddrSpaceCast: {
3913 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3914 if (!SrcPtrTy)
3915 return false;
3916
3917 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3918 if (!DstPtrTy)
3919 return false;
3920
3921 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3922 return false;
3923
3924 return SrcEC == DstEC;
3925 }
3926 }
3927}
3928
3930 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3931) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3932 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3933}
3934
3936 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3937) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3938 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3939}
3940
3942 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3943) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3944 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3945}
3946
3948 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3949) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3950 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3951}
3953 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3954) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3955 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3956}
3957
3959 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3960) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3961 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3962}
3963
3965 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3966) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3967 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3968}
3969
3971 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3972) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3973 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3974}
3975
3977 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3978) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3979 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3980}
3981
3983 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3984) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3985 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3986}
3987
3989 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3990) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3991 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3992}
3993
3995 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3996) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3997 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3998}
3999
4001 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4002) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4003 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4004}
4005
4007 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4008) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
4009 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4010}
4011
4013 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4014) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4015 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4016}
4017
4019 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4020) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4021 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4022}
4023
4025 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4026) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4027 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4028}
4029
4031 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4032) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4033 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4034}
4035
4037 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4038) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4039 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4040}
4041
4043 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4044) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4045 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4046}
4047
4049 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4050) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4051 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4052}
4053
4055 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4056) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4057 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4058}
4059
4061 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4062) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4063 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4064}
4065
4067 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4068) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4069 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4070}
4071
4073 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4074) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4075 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4076}
4077
4079 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4080) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4081 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4082}
4083
4084//===----------------------------------------------------------------------===//
4085// CmpInst Classes
4086//===----------------------------------------------------------------------===//
4087
4089 Value *RHS, const Twine &Name, Instruction *InsertBefore,
4090 Instruction *FlagsSource)
4091 : Instruction(ty, op,
4092 OperandTraits<CmpInst>::op_begin(this),
4093 OperandTraits<CmpInst>::operands(this),
4094 InsertBefore) {
4095 Op<0>() = LHS;
4096 Op<1>() = RHS;
4097 setPredicate((Predicate)predicate);
4098 setName(Name);
4099 if (FlagsSource)
4100 copyIRFlags(FlagsSource);
4101}
4102
4104 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4105 : Instruction(ty, op,
4106 OperandTraits<CmpInst>::op_begin(this),
4107 OperandTraits<CmpInst>::operands(this),
4108 InsertAtEnd) {
4109 Op<0>() = LHS;
4110 Op<1>() = RHS;
4111 setPredicate((Predicate)predicate);
4112 setName(Name);
4113}
4114
4115CmpInst *
4117 const Twine &Name, Instruction *InsertBefore) {
4118 if (Op == Instruction::ICmp) {
4119 if (InsertBefore)
4120 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4121 S1, S2, Name);
4122 else
4123 return new ICmpInst(CmpInst::Predicate(predicate),
4124 S1, S2, Name);
4125 }
4126
4127 if (InsertBefore)
4128 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4129 S1, S2, Name);
4130 else
4131 return new FCmpInst(CmpInst::Predicate(predicate),
4132 S1, S2, Name);
4133}
4134
4135CmpInst *
4137 const Twine &Name, BasicBlock *InsertAtEnd) {
4138 if (Op == Instruction::ICmp) {
4139 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4140 S1, S2, Name);
4141 }
4142 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4143 S1, S2, Name);
4144}
4145
4147 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4148 IC->swapOperands();
4149 else
4150 cast<FCmpInst>(this)->swapOperands();
4151}
4152
4154 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4155 return IC->isCommutative();
4156 return cast<FCmpInst>(this)->isCommutative();
4157}
4158
4161 return ICmpInst::isEquality(P);
4163 return FCmpInst::isEquality(P);
4164 llvm_unreachable("Unsupported predicate kind");
4165}
4166
4168 switch (pred) {
4169 default: llvm_unreachable("Unknown cmp predicate!");
4170 case ICMP_EQ: return ICMP_NE;
4171 case ICMP_NE: return ICMP_EQ;
4172 case ICMP_UGT: return ICMP_ULE;
4173 case ICMP_ULT: return ICMP_UGE;
4174 case ICMP_UGE: return ICMP_ULT;
4175 case ICMP_ULE: return ICMP_UGT;
4176 case ICMP_SGT: return ICMP_SLE;
4177 case ICMP_SLT: return ICMP_SGE;
4178 case ICMP_SGE: return ICMP_SLT;
4179 case ICMP_SLE: return ICMP_SGT;
4180
4181 case FCMP_OEQ: return FCMP_UNE;
4182 case FCMP_ONE: return FCMP_UEQ;
4183 case FCMP_OGT: return FCMP_ULE;
4184 case FCMP_OLT: return FCMP_UGE;
4185 case FCMP_OGE: return FCMP_ULT;
4186 case FCMP_OLE: return FCMP_UGT;
4187 case FCMP_UEQ: return FCMP_ONE;
4188 case FCMP_UNE: return FCMP_OEQ;
4189 case FCMP_UGT: return FCMP_OLE;
4190 case FCMP_ULT: return FCMP_OGE;
4191 case FCMP_UGE: return FCMP_OLT;
4192 case FCMP_ULE: return FCMP_OGT;
4193 case FCMP_ORD: return FCMP_UNO;
4194 case FCMP_UNO: return FCMP_ORD;
4195 case FCMP_TRUE: return FCMP_FALSE;
4196 case FCMP_FALSE: return FCMP_TRUE;
4197 }
4198}
4199
4201 switch (Pred) {
4202 default: return "unknown";
4203 case FCmpInst::FCMP_FALSE: return "false";
4204 case FCmpInst::FCMP_OEQ: return "oeq";
4205 case FCmpInst::FCMP_OGT: return "ogt";
4206 case FCmpInst::FCMP_OGE: return "oge";
4207 case FCmpInst::FCMP_OLT: return "olt";
4208 case FCmpInst::FCMP_OLE: return "ole";
4209 case FCmpInst::FCMP_ONE: return "one";
4210 case FCmpInst::FCMP_ORD: return "ord";
4211 case FCmpInst::FCMP_UNO: return "uno";
4212 case FCmpInst::FCMP_UEQ: return "ueq";
4213 case FCmpInst::FCMP_UGT: return "ugt";
4214 case FCmpInst::FCMP_UGE: return "uge";
4215 case FCmpInst::FCMP_ULT: return "ult";
4216 case FCmpInst::FCMP_ULE: return "ule";
4217 case FCmpInst::FCMP_UNE: return "une";
4218 case FCmpInst::FCMP_TRUE: return "true";
4219 case ICmpInst::ICMP_EQ: return "eq";
4220 case ICmpInst::ICMP_NE: return "ne";
4221 case ICmpInst::ICMP_SGT: return "sgt";
4222 case ICmpInst::ICMP_SGE: return "sge";
4223 case ICmpInst::ICMP_SLT: return "slt";
4224 case ICmpInst::ICMP_SLE: return "sle";
4225 case ICmpInst::ICMP_UGT: return "ugt";
4226 case ICmpInst::ICMP_UGE: return "uge";
4227 case ICmpInst::ICMP_ULT: return "ult";
4228 case ICmpInst::ICMP_ULE: return "ule";
4229 }
4230}
4231
4234 return OS;
4235}
4236
4238 switch (pred) {
4239 default: llvm_unreachable("Unknown icmp predicate!");
4240 case ICMP_EQ: case ICMP_NE:
4241 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4242 return pred;
4243 case ICMP_UGT: return ICMP_SGT;
4244 case ICMP_ULT: return ICMP_SLT;
4245 case ICMP_UGE: return ICMP_SGE;
4246 case ICMP_ULE: return ICMP_SLE;
4247 }
4248}
4249
4251 switch (pred) {
4252 default: llvm_unreachable("Unknown icmp predicate!");
4253 case ICMP_EQ: case ICMP_NE:
4254 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4255 return pred;
4256 case ICMP_SGT: return ICMP_UGT;
4257 case ICMP_SLT: return ICMP_ULT;
4258 case ICMP_SGE: return ICMP_UGE;
4259 case ICMP_SLE: return ICMP_ULE;
4260 }
4261}
4262
4264 switch (pred) {
4265 default: llvm_unreachable("Unknown cmp predicate!");
4266 case ICMP_EQ: case ICMP_NE:
4267 return pred;
4268 case ICMP_SGT: return ICMP_SLT;
4269 case ICMP_SLT: return ICMP_SGT;
4270 case ICMP_SGE: return ICMP_SLE;
4271 case ICMP_SLE: return ICMP_SGE;
4272 case ICMP_UGT: return ICMP_ULT;
4273 case ICMP_ULT: return ICMP_UGT;
4274 case ICMP_UGE: return ICMP_ULE;
4275 case ICMP_ULE: return ICMP_UGE;
4276
4277 case FCMP_FALSE: case FCMP_TRUE:
4278 case FCMP_OEQ: case FCMP_ONE:
4279 case FCMP_UEQ: case FCMP_UNE:
4280 case FCMP_ORD: case FCMP_UNO:
4281 return pred;
4282 case FCMP_OGT: return FCMP_OLT;
4283 case FCMP_OLT: return FCMP_OGT;
4284 case FCMP_OGE: return FCMP_OLE;
4285 case FCMP_OLE: return FCMP_OGE;
4286 case FCMP_UGT: return FCMP_ULT;
4287 case FCMP_ULT: return FCMP_UGT;
4288 case FCMP_UGE: return FCMP_ULE;
4289 case FCMP_ULE: return FCMP_UGE;
4290 }
4291}
4292
4294 switch (pred) {
4295 case ICMP_SGE:
4296 case ICMP_SLE:
4297 case ICMP_UGE:
4298 case ICMP_ULE:
4299 case FCMP_OGE:
4300 case FCMP_OLE:
4301 case FCMP_UGE:
4302 case FCMP_ULE:
4303 return true;
4304 default:
4305 return false;
4306 }
4307}
4308
4310 switch (pred) {
4311 case ICMP_SGT:
4312 case ICMP_SLT:
4313 case ICMP_UGT:
4314 case ICMP_ULT:
4315 case FCMP_OGT:
4316 case FCMP_OLT:
4317 case FCMP_UGT:
4318 case FCMP_ULT:
4319 return true;
4320 default:
4321 return false;
4322 }
4323}
4324
4326 switch (pred) {
4327 case ICMP_SGE:
4328 return ICMP_SGT;
4329 case ICMP_SLE:
4330 return ICMP_SLT;
4331 case ICMP_UGE:
4332 return ICMP_UGT;
4333 case ICMP_ULE:
4334 return ICMP_ULT;
4335 case FCMP_OGE:
4336 return FCMP_OGT;
4337 case FCMP_OLE:
4338 return FCMP_OLT;
4339 case FCMP_UGE:
4340 return FCMP_UGT;
4341 case FCMP_ULE:
4342 return FCMP_ULT;
4343 default:
4344 return pred;
4345 }
4346}
4347
4349 switch (pred) {
4350 case ICMP_SGT:
4351 return ICMP_SGE;
4352 case ICMP_SLT:
4353 return ICMP_SLE;
4354 case ICMP_UGT:
4355 return ICMP_UGE;
4356 case ICMP_ULT:
4357 return ICMP_ULE;
4358 case FCMP_OGT:
4359 return FCMP_OGE;
4360 case FCMP_OLT:
4361 return FCMP_OLE;
4362 case FCMP_UGT:
4363 return FCMP_UGE;
4364 case FCMP_ULT:
4365 return FCMP_ULE;
4366 default:
4367 return pred;
4368 }
4369}
4370
4372 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4373
4377 return getStrictPredicate(pred);
4378
4379 llvm_unreachable("Unknown predicate!");
4380}
4381
4383 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4384
4385 switch (pred) {
4386 default:
4387 llvm_unreachable("Unknown predicate!");
4388 case CmpInst::ICMP_ULT:
4389 return CmpInst::ICMP_SLT;
4390 case CmpInst::ICMP_ULE:
4391 return CmpInst::ICMP_SLE;
4392 case CmpInst::ICMP_UGT:
4393 return CmpInst::ICMP_SGT;
4394 case CmpInst::ICMP_UGE:
4395 return CmpInst::ICMP_SGE;
4396 }
4397}
4398
4400 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4401
4402 switch (pred) {
4403 default:
4404 llvm_unreachable("Unknown predicate!");
4405 case CmpInst::ICMP_SLT:
4406 return CmpInst::ICMP_ULT;
4407 case CmpInst::ICMP_SLE:
4408 return CmpInst::ICMP_ULE;
4409 case CmpInst::ICMP_SGT:
4410 return CmpInst::ICMP_UGT;
4411 case CmpInst::ICMP_SGE:
4412 return CmpInst::ICMP_UGE;
4413 }
4414}
4415
4417 switch (predicate) {
4418 default: return false;
4420 case ICmpInst::ICMP_UGE: return true;
4421 }
4422}
4423
4425 switch (predicate) {