LLVM  17.0.0git
Instructions.cpp
Go to the documentation of this file.
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/ProfDataUtils.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
38 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/ModRef.h"
42 #include "llvm/Support/TypeSize.h"
43 #include <algorithm>
44 #include <cassert>
45 #include <cstdint>
46 #include <optional>
47 #include <vector>
48 
49 using namespace llvm;
50 
52  "disable-i2p-p2i-opt", cl::init(false),
53  cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54 
55 //===----------------------------------------------------------------------===//
56 // AllocaInst Class
57 //===----------------------------------------------------------------------===//
58 
59 std::optional<TypeSize>
61  TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62  if (isArrayAllocation()) {
63  auto *C = dyn_cast<ConstantInt>(getArraySize());
64  if (!C)
65  return std::nullopt;
66  assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67  Size *= C->getZExtValue();
68  }
69  return Size;
70 }
71 
72 std::optional<TypeSize>
74  std::optional<TypeSize> Size = getAllocationSize(DL);
75  if (Size)
76  return *Size * 8;
77  return std::nullopt;
78 }
79 
80 //===----------------------------------------------------------------------===//
81 // SelectInst Class
82 //===----------------------------------------------------------------------===//
83 
84 /// areInvalidOperands - Return a string if the specified operands are invalid
85 /// for a select operation, otherwise return null.
86 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87  if (Op1->getType() != Op2->getType())
88  return "both values to select must have same type";
89 
90  if (Op1->getType()->isTokenTy())
91  return "select values cannot have token type";
92 
93  if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94  // Vector select.
95  if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96  return "vector select condition element type must be i1";
97  VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98  if (!ET)
99  return "selected values for vector select must be vectors";
100  if (ET->getElementCount() != VT->getElementCount())
101  return "vector select requires selected vectors to have "
102  "the same vector length as select condition";
103  } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104  return "select condition must be i1 or <n x i1>";
105  }
106  return nullptr;
107 }
108 
109 //===----------------------------------------------------------------------===//
110 // PHINode Class
111 //===----------------------------------------------------------------------===//
112 
113 PHINode::PHINode(const PHINode &PN)
114  : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115  ReservedSpace(PN.getNumOperands()) {
117  std::copy(PN.op_begin(), PN.op_end(), op_begin());
118  copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
120 }
121 
122 // removeIncomingValue - Remove an incoming value. This is useful if a
123 // predecessor basic block is deleted.
124 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125  Value *Removed = getIncomingValue(Idx);
126 
127  // Move everything after this operand down.
128  //
129  // FIXME: we could just swap with the end of the list, then erase. However,
130  // clients might not expect this to happen. The code as it is thrashes the
131  // use/def lists, which is kinda lame.
132  std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
133  copyIncomingBlocks(make_range(block_begin() + Idx + 1, block_end()), Idx);
134 
135  // Nuke the last value.
136  Op<-1>().set(nullptr);
138 
139  // If the PHI node is dead, because it has zero entries, nuke it now.
140  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141  // If anyone is using this PHI, make them use a dummy value instead...
143  eraseFromParent();
144  }
145  return Removed;
146 }
147 
148 /// growOperands - grow operands - This grows the operand list in response
149 /// to a push_back style of operation. This grows the number of ops by 1.5
150 /// times.
151 ///
152 void PHINode::growOperands() {
153  unsigned e = getNumOperands();
154  unsigned NumOps = e + e / 2;
155  if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
156 
157  ReservedSpace = NumOps;
158  growHungoffUses(ReservedSpace, /* IsPhi */ true);
159 }
160 
161 /// hasConstantValue - If the specified PHI node always merges together the same
162 /// value, return the value, otherwise return null.
164  // Exploit the fact that phi nodes always have at least one entry.
165  Value *ConstantValue = getIncomingValue(0);
166  for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
167  if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
168  if (ConstantValue != this)
169  return nullptr; // Incoming values not all the same.
170  // The case where the first value is this PHI.
171  ConstantValue = getIncomingValue(i);
172  }
173  if (ConstantValue == this)
174  return UndefValue::get(getType());
175  return ConstantValue;
176 }
177 
178 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
179 /// together the same value, assuming that undefs result in the same value as
180 /// non-undefs.
181 /// Unlike \ref hasConstantValue, this does not return a value because the
182 /// unique non-undef incoming value need not dominate the PHI node.
184  Value *ConstantValue = nullptr;
185  for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
186  Value *Incoming = getIncomingValue(i);
187  if (Incoming != this && !isa<UndefValue>(Incoming)) {
188  if (ConstantValue && ConstantValue != Incoming)
189  return false;
190  ConstantValue = Incoming;
191  }
192  }
193  return true;
194 }
195 
196 //===----------------------------------------------------------------------===//
197 // LandingPadInst Implementation
198 //===----------------------------------------------------------------------===//
199 
200 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
201  const Twine &NameStr, Instruction *InsertBefore)
202  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
203  init(NumReservedValues, NameStr);
204 }
205 
206 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
207  const Twine &NameStr, BasicBlock *InsertAtEnd)
208  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
209  init(NumReservedValues, NameStr);
210 }
211 
212 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
213  : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
214  LP.getNumOperands()),
215  ReservedSpace(LP.getNumOperands()) {
217  Use *OL = getOperandList();
218  const Use *InOL = LP.getOperandList();
219  for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
220  OL[I] = InOL[I];
221 
222  setCleanup(LP.isCleanup());
223 }
224 
225 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
226  const Twine &NameStr,
227  Instruction *InsertBefore) {
228  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
229 }
230 
231 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
232  const Twine &NameStr,
233  BasicBlock *InsertAtEnd) {
234  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
235 }
236 
237 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
238  ReservedSpace = NumReservedValues;
240  allocHungoffUses(ReservedSpace);
241  setName(NameStr);
242  setCleanup(false);
243 }
244 
245 /// growOperands - grow operands - This grows the operand list in response to a
246 /// push_back style of operation. This grows the number of ops by 2 times.
247 void LandingPadInst::growOperands(unsigned Size) {
248  unsigned e = getNumOperands();
249  if (ReservedSpace >= e + Size) return;
250  ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
251  growHungoffUses(ReservedSpace);
252 }
253 
255  unsigned OpNo = getNumOperands();
256  growOperands(1);
257  assert(OpNo < ReservedSpace && "Growing didn't work!");
259  getOperandList()[OpNo] = Val;
260 }
261 
262 //===----------------------------------------------------------------------===//
263 // CallBase Implementation
264 //===----------------------------------------------------------------------===//
265 
267  Instruction *InsertPt) {
268  switch (CB->getOpcode()) {
269  case Instruction::Call:
270  return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
271  case Instruction::Invoke:
272  return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
273  case Instruction::CallBr:
274  return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
275  default:
276  llvm_unreachable("Unknown CallBase sub-class!");
277  }
278 }
279 
281  Instruction *InsertPt) {
283  for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
284  auto ChildOB = CI->getOperandBundleAt(i);
285  if (ChildOB.getTagName() != OpB.getTag())
286  OpDefs.emplace_back(ChildOB);
287  }
288  OpDefs.emplace_back(OpB);
289  return CallBase::Create(CI, OpDefs, InsertPt);
290 }
291 
292 
294 
296  assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
297  return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
298 }
299 
301  const Value *V = getCalledOperand();
302  if (isa<Function>(V) || isa<Constant>(V))
303  return false;
304  return !isInlineAsm();
305 }
306 
307 /// Tests if this call site must be tail call optimized. Only a CallInst can
308 /// be tail call optimized.
310  if (auto *CI = dyn_cast<CallInst>(this))
311  return CI->isMustTailCall();
312  return false;
313 }
314 
315 /// Tests if this call site is marked as a tail call.
316 bool CallBase::isTailCall() const {
317  if (auto *CI = dyn_cast<CallInst>(this))
318  return CI->isTailCall();
319  return false;
320 }
321 
323  if (auto *F = getCalledFunction())
324  return F->getIntrinsicID();
326 }
327 
329  if (hasRetAttr(Attribute::NonNull))
330  return true;
331 
332  if (getRetDereferenceableBytes() > 0 &&
333  !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
334  return true;
335 
336  return false;
337 }
338 
340  unsigned Index;
341 
342  if (Attrs.hasAttrSomewhere(Kind, &Index))
344  if (const Function *F = getCalledFunction())
345  if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
347 
348  return nullptr;
349 }
350 
351 /// Determine whether the argument or parameter has the given attribute.
352 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
353  assert(ArgNo < arg_size() && "Param index out of bounds!");
354 
355  if (Attrs.hasParamAttr(ArgNo, Kind))
356  return true;
357 
358  const Function *F = getCalledFunction();
359  if (!F)
360  return false;
361 
362  if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
363  return false;
364 
365  // Take into account mod/ref by operand bundles.
366  switch (Kind) {
367  case Attribute::ReadNone:
369  case Attribute::ReadOnly:
370  return !hasClobberingOperandBundles();
371  case Attribute::WriteOnly:
372  return !hasReadingOperandBundles();
373  default:
374  return true;
375  }
376 }
377 
378 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
379  Value *V = getCalledOperand();
380  if (auto *CE = dyn_cast<ConstantExpr>(V))
381  if (CE->getOpcode() == BitCast)
382  V = CE->getOperand(0);
383 
384  if (auto *F = dyn_cast<Function>(V))
385  return F->getAttributes().hasFnAttr(Kind);
386 
387  return false;
388 }
389 
390 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
391  Value *V = getCalledOperand();
392  if (auto *CE = dyn_cast<ConstantExpr>(V))
393  if (CE->getOpcode() == BitCast)
394  V = CE->getOperand(0);
395 
396  if (auto *F = dyn_cast<Function>(V))
397  return F->getAttributes().hasFnAttr(Kind);
398 
399  return false;
400 }
401 
402 template <typename AK>
403 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
404  if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
405  // getMemoryEffects() correctly combines memory effects from the call-site,
406  // operand bundles and function.
407  assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
408  }
409 
410  Value *V = getCalledOperand();
411  if (auto *CE = dyn_cast<ConstantExpr>(V))
412  if (CE->getOpcode() == BitCast)
413  V = CE->getOperand(0);
414 
415  if (auto *F = dyn_cast<Function>(V))
416  return F->getAttributes().getFnAttr(Kind);
417 
418  return Attribute();
419 }
420 
421 template Attribute
422 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
423 template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
424 
426  SmallVectorImpl<OperandBundleDef> &Defs) const {
427  for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
429 }
430 
433  const unsigned BeginIndex) {
434  auto It = op_begin() + BeginIndex;
435  for (auto &B : Bundles)
436  It = std::copy(B.input_begin(), B.input_end(), It);
437 
438  auto *ContextImpl = getContext().pImpl;
439  auto BI = Bundles.begin();
440  unsigned CurrentIndex = BeginIndex;
441 
442  for (auto &BOI : bundle_op_infos()) {
443  assert(BI != Bundles.end() && "Incorrect allocation?");
444 
445  BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
446  BOI.Begin = CurrentIndex;
447  BOI.End = CurrentIndex + BI->input_size();
448  CurrentIndex = BOI.End;
449  BI++;
450  }
451 
452  assert(BI == Bundles.end() && "Incorrect allocation?");
453 
454  return It;
455 }
456 
458  /// When there isn't many bundles, we do a simple linear search.
459  /// Else fallback to a binary-search that use the fact that bundles usually
460  /// have similar number of argument to get faster convergence.
462  for (auto &BOI : bundle_op_infos())
463  if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
464  return BOI;
465 
466  llvm_unreachable("Did not find operand bundle for operand!");
467  }
468 
469  assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
471  OpIdx < std::prev(bundle_op_info_end())->End &&
472  "The Idx isn't in the operand bundle");
473 
474  /// We need a decimal number below and to prevent using floating point numbers
475  /// we use an intergal value multiplied by this constant.
476  constexpr unsigned NumberScaling = 1024;
477 
480  bundle_op_iterator Current = Begin;
481 
482  while (Begin != End) {
483  unsigned ScaledOperandPerBundle =
484  NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
485  Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
486  ScaledOperandPerBundle);
487  if (Current >= End)
488  Current = std::prev(End);
489  assert(Current < End && Current >= Begin &&
490  "the operand bundle doesn't cover every value in the range");
491  if (OpIdx >= Current->Begin && OpIdx < Current->End)
492  break;
493  if (OpIdx >= Current->End)
494  Begin = Current + 1;
495  else
496  End = Current;
497  }
498 
499  assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
500  "the operand bundle doesn't cover every value in the range");
501  return *Current;
502 }
503 
506  Instruction *InsertPt) {
507  if (CB->getOperandBundle(ID))
508  return CB;
509 
511  CB->getOperandBundlesAsDefs(Bundles);
512  Bundles.push_back(OB);
513  return Create(CB, Bundles, InsertPt);
514 }
515 
517  Instruction *InsertPt) {
519  bool CreateNew = false;
520 
521  for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
522  auto Bundle = CB->getOperandBundleAt(I);
523  if (Bundle.getTagID() == ID) {
524  CreateNew = true;
525  continue;
526  }
527  Bundles.emplace_back(Bundle);
528  }
529 
530  return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
531 }
532 
534  // Implementation note: this is a conservative implementation of operand
535  // bundle semantics, where *any* non-assume operand bundle (other than
536  // ptrauth) forces a callsite to be at least readonly.
539  getIntrinsicID() != Intrinsic::assume;
540 }
541 
546  getIntrinsicID() != Intrinsic::assume;
547 }
548 
551  if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
552  MemoryEffects FnME = Fn->getMemoryEffects();
553  if (hasOperandBundles()) {
554  // TODO: Add a method to get memory effects for operand bundles instead.
556  FnME |= MemoryEffects::readOnly();
558  FnME |= MemoryEffects::writeOnly();
559  }
560  ME &= FnME;
561  }
562  return ME;
563 }
566 }
567 
568 /// Determine if the function does not access memory.
571 }
574 }
575 
576 /// Determine if the function does not access or only reads memory.
579 }
582 }
583 
584 /// Determine if the function does not access or only writes memory.
587 }
590 }
591 
592 /// Determine if the call can access memmory only using pointers based
593 /// on its arguments.
596 }
599 }
600 
601 /// Determine if the function may only access memory that is
602 /// inaccessible from the IR.
605 }
608 }
609 
610 /// Determine if the function may only access memory that is
611 /// either inaccessible from the IR or pointed to by its arguments.
614 }
618 }
619 
620 //===----------------------------------------------------------------------===//
621 // CallInst Implementation
622 //===----------------------------------------------------------------------===//
623 
624 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
625  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
626  this->FTy = FTy;
627  assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
628  "NumOperands not set up?");
629 
630 #ifndef NDEBUG
631  assert((Args.size() == FTy->getNumParams() ||
632  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
633  "Calling a function with bad signature!");
634 
635  for (unsigned i = 0; i != Args.size(); ++i)
636  assert((i >= FTy->getNumParams() ||
637  FTy->getParamType(i) == Args[i]->getType()) &&
638  "Calling a function with a bad signature!");
639 #endif
640 
641  // Set operands in order of their index to match use-list-order
642  // prediction.
644  setCalledOperand(Func);
645 
646  auto It = populateBundleOperandInfos(Bundles, Args.size());
647  (void)It;
648  assert(It + 1 == op_end() && "Should add up!");
649 
650  setName(NameStr);
651 }
652 
653 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
654  this->FTy = FTy;
655  assert(getNumOperands() == 1 && "NumOperands not set up?");
656  setCalledOperand(Func);
657 
658  assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
659 
660  setName(NameStr);
661 }
662 
663 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
664  Instruction *InsertBefore)
665  : CallBase(Ty->getReturnType(), Instruction::Call,
666  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
667  init(Ty, Func, Name);
668 }
669 
670 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
671  BasicBlock *InsertAtEnd)
672  : CallBase(Ty->getReturnType(), Instruction::Call,
673  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
674  init(Ty, Func, Name);
675 }
676 
677 CallInst::CallInst(const CallInst &CI)
678  : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
679  OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
680  CI.getNumOperands()) {
681  setTailCallKind(CI.getTailCallKind());
683 
684  std::copy(CI.op_begin(), CI.op_end(), op_begin());
688 }
689 
691  Instruction *InsertPt) {
692  std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
693 
694  auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
695  Args, OpB, CI->getName(), InsertPt);
696  NewCI->setTailCallKind(CI->getTailCallKind());
697  NewCI->setCallingConv(CI->getCallingConv());
698  NewCI->SubclassOptionalData = CI->SubclassOptionalData;
699  NewCI->setAttributes(CI->getAttributes());
700  NewCI->setDebugLoc(CI->getDebugLoc());
701  return NewCI;
702 }
703 
704 // Update profile weight for call instruction by scaling it using the ratio
705 // of S/T. The meaning of "branch_weights" meta data for call instruction is
706 // transfered to represent call count.
708  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
709  if (ProfileData == nullptr)
710  return;
711 
712  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
713  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
714  !ProfDataName->getString().equals("VP")))
715  return;
716 
717  if (T == 0) {
718  LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
719  "div by 0. Ignoring. Likely the function "
720  << getParent()->getParent()->getName()
721  << " has 0 entry count, and contains call instructions "
722  "with non-zero prof info.");
723  return;
724  }
725 
726  MDBuilder MDB(getContext());
728  Vals.push_back(ProfileData->getOperand(0));
729  APInt APS(128, S), APT(128, T);
730  if (ProfDataName->getString().equals("branch_weights") &&
731  ProfileData->getNumOperands() > 0) {
732  // Using APInt::div may be expensive, but most cases should fit 64 bits.
733  APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
734  ->getValue()
735  .getZExtValue());
736  Val *= APS;
737  Vals.push_back(MDB.createConstant(
739  Val.udiv(APT).getLimitedValue(UINT32_MAX))));
740  } else if (ProfDataName->getString().equals("VP"))
741  for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
742  // The first value is the key of the value profile, which will not change.
743  Vals.push_back(ProfileData->getOperand(i));
744  uint64_t Count =
745  mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
746  ->getValue()
747  .getZExtValue();
748  // Don't scale the magic number.
749  if (Count == NOMORE_ICP_MAGICNUM) {
750  Vals.push_back(ProfileData->getOperand(i + 1));
751  continue;
752  }
753  // Using APInt::div may be expensive, but most cases should fit 64 bits.
754  APInt Val(128, Count);
755  Val *= APS;
756  Vals.push_back(MDB.createConstant(
758  Val.udiv(APT).getLimitedValue())));
759  }
760  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
761 }
762 
763 /// IsConstantOne - Return true only if val is constant int 1
764 static bool IsConstantOne(Value *val) {
765  assert(val && "IsConstantOne does not work with nullptr val");
766  const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
767  return CVal && CVal->isOne();
768 }
769 
770 static Instruction *createMalloc(Instruction *InsertBefore,
771  BasicBlock *InsertAtEnd, Type *IntPtrTy,
772  Type *AllocTy, Value *AllocSize,
773  Value *ArraySize,
775  Function *MallocF, const Twine &Name) {
776  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
777  "createMalloc needs either InsertBefore or InsertAtEnd");
778 
779  // malloc(type) becomes:
780  // bitcast (i8* malloc(typeSize)) to type*
781  // malloc(type, arraySize) becomes:
782  // bitcast (i8* malloc(typeSize*arraySize)) to type*
783  if (!ArraySize)
784  ArraySize = ConstantInt::get(IntPtrTy, 1);
785  else if (ArraySize->getType() != IntPtrTy) {
786  if (InsertBefore)
787  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
788  "", InsertBefore);
789  else
790  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
791  "", InsertAtEnd);
792  }
793 
794  if (!IsConstantOne(ArraySize)) {
795  if (IsConstantOne(AllocSize)) {
796  AllocSize = ArraySize; // Operand * 1 = Operand
797  } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
798  Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
799  false /*ZExt*/);
800  // Malloc arg is constant product of type size and array size
801  AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
802  } else {
803  // Multiply type size by the array size...
804  if (InsertBefore)
805  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
806  "mallocsize", InsertBefore);
807  else
808  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
809  "mallocsize", InsertAtEnd);
810  }
811  }
812 
813  assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
814  // Create the call to Malloc.
815  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
816  Module *M = BB->getParent()->getParent();
817  Type *BPTy = Type::getInt8PtrTy(BB->getContext());
818  FunctionCallee MallocFunc = MallocF;
819  if (!MallocFunc)
820  // prototype malloc as "void *malloc(size_t)"
821  MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
822  PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
823  CallInst *MCall = nullptr;
824  Instruction *Result = nullptr;
825  if (InsertBefore) {
826  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
827  InsertBefore);
828  Result = MCall;
829  if (Result->getType() != AllocPtrType)
830  // Create a cast instruction to convert to the right type...
831  Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
832  } else {
833  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
834  Result = MCall;
835  if (Result->getType() != AllocPtrType) {
836  MCall->insertInto(InsertAtEnd, InsertAtEnd->end());
837  // Create a cast instruction to convert to the right type...
838  Result = new BitCastInst(MCall, AllocPtrType, Name);
839  }
840  }
841  MCall->setTailCall();
842  if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
843  MCall->setCallingConv(F->getCallingConv());
844  if (!F->returnDoesNotAlias())
845  F->setReturnDoesNotAlias();
846  }
847  assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
848 
849  return Result;
850 }
851 
852 /// CreateMalloc - Generate the IR for a call to malloc:
853 /// 1. Compute the malloc call's argument as the specified type's size,
854 /// possibly multiplied by the array size if the array size is not
855 /// constant 1.
856 /// 2. Call malloc with that argument.
857 /// 3. Bitcast the result of the malloc call to the specified type.
859  Type *IntPtrTy, Type *AllocTy,
860  Value *AllocSize, Value *ArraySize,
861  Function *MallocF,
862  const Twine &Name) {
863  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
864  ArraySize, std::nullopt, MallocF, Name);
865 }
867  Type *IntPtrTy, Type *AllocTy,
868  Value *AllocSize, Value *ArraySize,
870  Function *MallocF,
871  const Twine &Name) {
872  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
873  ArraySize, OpB, MallocF, Name);
874 }
875 
876 /// CreateMalloc - Generate the IR for a call to malloc:
877 /// 1. Compute the malloc call's argument as the specified type's size,
878 /// possibly multiplied by the array size if the array size is not
879 /// constant 1.
880 /// 2. Call malloc with that argument.
881 /// 3. Bitcast the result of the malloc call to the specified type.
882 /// Note: This function does not add the bitcast to the basic block, that is the
883 /// responsibility of the caller.
885  Type *IntPtrTy, Type *AllocTy,
886  Value *AllocSize, Value *ArraySize,
887  Function *MallocF, const Twine &Name) {
888  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
889  ArraySize, std::nullopt, MallocF, Name);
890 }
892  Type *IntPtrTy, Type *AllocTy,
893  Value *AllocSize, Value *ArraySize,
895  Function *MallocF, const Twine &Name) {
896  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
897  ArraySize, OpB, MallocF, Name);
898 }
899 
902  Instruction *InsertBefore,
903  BasicBlock *InsertAtEnd) {
904  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
905  "createFree needs either InsertBefore or InsertAtEnd");
906  assert(Source->getType()->isPointerTy() &&
907  "Can not free something of nonpointer type!");
908 
909  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
910  Module *M = BB->getParent()->getParent();
911 
912  Type *VoidTy = Type::getVoidTy(M->getContext());
913  Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
914  // prototype free as "void free(void*)"
915  FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
916  CallInst *Result = nullptr;
917  Value *PtrCast = Source;
918  if (InsertBefore) {
919  if (Source->getType() != IntPtrTy)
920  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
921  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
922  } else {
923  if (Source->getType() != IntPtrTy)
924  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
925  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
926  }
927  Result->setTailCall();
928  if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
929  Result->setCallingConv(F->getCallingConv());
930 
931  return Result;
932 }
933 
934 /// CreateFree - Generate the IR for a call to the builtin free function.
936  return createFree(Source, std::nullopt, InsertBefore, nullptr);
937 }
940  Instruction *InsertBefore) {
941  return createFree(Source, Bundles, InsertBefore, nullptr);
942 }
943 
944 /// CreateFree - Generate the IR for a call to the builtin free function.
945 /// Note: This function does not add the call to the basic block, that is the
946 /// responsibility of the caller.
948  Instruction *FreeCall =
949  createFree(Source, std::nullopt, nullptr, InsertAtEnd);
950  assert(FreeCall && "CreateFree did not create a CallInst");
951  return FreeCall;
952 }
955  BasicBlock *InsertAtEnd) {
956  Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
957  assert(FreeCall && "CreateFree did not create a CallInst");
958  return FreeCall;
959 }
960 
961 //===----------------------------------------------------------------------===//
962 // InvokeInst Implementation
963 //===----------------------------------------------------------------------===//
964 
965 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
966  BasicBlock *IfException, ArrayRef<Value *> Args,
968  const Twine &NameStr) {
969  this->FTy = FTy;
970 
971  assert((int)getNumOperands() ==
972  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
973  "NumOperands not set up?");
974 
975 #ifndef NDEBUG
976  assert(((Args.size() == FTy->getNumParams()) ||
977  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
978  "Invoking a function with bad signature");
979 
980  for (unsigned i = 0, e = Args.size(); i != e; i++)
981  assert((i >= FTy->getNumParams() ||
982  FTy->getParamType(i) == Args[i]->getType()) &&
983  "Invoking a function with a bad signature!");
984 #endif
985 
986  // Set operands in order of their index to match use-list-order
987  // prediction.
989  setNormalDest(IfNormal);
990  setUnwindDest(IfException);
991  setCalledOperand(Fn);
992 
993  auto It = populateBundleOperandInfos(Bundles, Args.size());
994  (void)It;
995  assert(It + 3 == op_end() && "Should add up!");
996 
997  setName(NameStr);
998 }
999 
1000 InvokeInst::InvokeInst(const InvokeInst &II)
1001  : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
1002  OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
1003  II.getNumOperands()) {
1005  std::copy(II.op_begin(), II.op_end(), op_begin());
1009 }
1010 
1012  Instruction *InsertPt) {
1013  std::vector<Value *> Args(II->arg_begin(), II->arg_end());
1014 
1015  auto *NewII = InvokeInst::Create(
1016  II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
1017  II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
1018  NewII->setCallingConv(II->getCallingConv());
1019  NewII->SubclassOptionalData = II->SubclassOptionalData;
1020  NewII->setAttributes(II->getAttributes());
1021  NewII->setDebugLoc(II->getDebugLoc());
1022  return NewII;
1023 }
1024 
1026  return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
1027 }
1028 
1029 //===----------------------------------------------------------------------===//
1030 // CallBrInst Implementation
1031 //===----------------------------------------------------------------------===//
1032 
1033 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
1034  ArrayRef<BasicBlock *> IndirectDests,
1037  const Twine &NameStr) {
1038  this->FTy = FTy;
1039 
1040  assert((int)getNumOperands() ==
1041  ComputeNumOperands(Args.size(), IndirectDests.size(),
1042  CountBundleInputs(Bundles)) &&
1043  "NumOperands not set up?");
1044 
1045 #ifndef NDEBUG
1046  assert(((Args.size() == FTy->getNumParams()) ||
1047  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
1048  "Calling a function with bad signature");
1049 
1050  for (unsigned i = 0, e = Args.size(); i != e; i++)
1051  assert((i >= FTy->getNumParams() ||
1052  FTy->getParamType(i) == Args[i]->getType()) &&
1053  "Calling a function with a bad signature!");
1054 #endif
1055 
1056  // Set operands in order of their index to match use-list-order
1057  // prediction.
1058  std::copy(Args.begin(), Args.end(), op_begin());
1059  NumIndirectDests = IndirectDests.size();
1060  setDefaultDest(Fallthrough);
1061  for (unsigned i = 0; i != NumIndirectDests; ++i)
1062  setIndirectDest(i, IndirectDests[i]);
1063  setCalledOperand(Fn);
1064 
1065  auto It = populateBundleOperandInfos(Bundles, Args.size());
1066  (void)It;
1067  assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
1068 
1069  setName(NameStr);
1070 }
1071 
1072 CallBrInst::CallBrInst(const CallBrInst &CBI)
1073  : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
1074  OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
1075  CBI.getNumOperands()) {
1077  std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
1081  NumIndirectDests = CBI.NumIndirectDests;
1082 }
1083 
1085  Instruction *InsertPt) {
1086  std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1087 
1088  auto *NewCBI = CallBrInst::Create(
1089  CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1090  CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1091  NewCBI->setCallingConv(CBI->getCallingConv());
1092  NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1093  NewCBI->setAttributes(CBI->getAttributes());
1094  NewCBI->setDebugLoc(CBI->getDebugLoc());
1095  NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1096  return NewCBI;
1097 }
1098 
1099 //===----------------------------------------------------------------------===//
1100 // ReturnInst Implementation
1101 //===----------------------------------------------------------------------===//
1102 
1103 ReturnInst::ReturnInst(const ReturnInst &RI)
1104  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1105  OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1106  RI.getNumOperands()) {
1107  if (RI.getNumOperands())
1108  Op<0>() = RI.Op<0>();
1110 }
1111 
1112 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
1113  : Instruction(Type::getVoidTy(C), Instruction::Ret,
1114  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1115  InsertBefore) {
1116  if (retVal)
1117  Op<0>() = retVal;
1118 }
1119 
1120 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1121  : Instruction(Type::getVoidTy(C), Instruction::Ret,
1122  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1123  InsertAtEnd) {
1124  if (retVal)
1125  Op<0>() = retVal;
1126 }
1127 
1128 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1129  : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1130  OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1131 
1132 //===----------------------------------------------------------------------===//
1133 // ResumeInst Implementation
1134 //===----------------------------------------------------------------------===//
1135 
1136 ResumeInst::ResumeInst(const ResumeInst &RI)
1137  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1138  OperandTraits<ResumeInst>::op_begin(this), 1) {
1139  Op<0>() = RI.Op<0>();
1140 }
1141 
1142 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1143  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1144  OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1145  Op<0>() = Exn;
1146 }
1147 
1148 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1149  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1150  OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1151  Op<0>() = Exn;
1152 }
1153 
1154 //===----------------------------------------------------------------------===//
1155 // CleanupReturnInst Implementation
1156 //===----------------------------------------------------------------------===//
1157 
1158 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1159  : Instruction(CRI.getType(), Instruction::CleanupRet,
1161  CRI.getNumOperands(),
1162  CRI.getNumOperands()) {
1163  setSubclassData<Instruction::OpaqueField>(
1165  Op<0>() = CRI.Op<0>();
1166  if (CRI.hasUnwindDest())
1167  Op<1>() = CRI.Op<1>();
1168 }
1169 
1170 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1171  if (UnwindBB)
1172  setSubclassData<UnwindDestField>(true);
1173 
1174  Op<0>() = CleanupPad;
1175  if (UnwindBB)
1176  Op<1>() = UnwindBB;
1177 }
1178 
1179 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1180  unsigned Values, Instruction *InsertBefore)
1181  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1182  Instruction::CleanupRet,
1183  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1184  Values, InsertBefore) {
1185  init(CleanupPad, UnwindBB);
1186 }
1187 
1188 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1189  unsigned Values, BasicBlock *InsertAtEnd)
1190  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1191  Instruction::CleanupRet,
1192  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1193  Values, InsertAtEnd) {
1194  init(CleanupPad, UnwindBB);
1195 }
1196 
1197 //===----------------------------------------------------------------------===//
1198 // CatchReturnInst Implementation
1199 //===----------------------------------------------------------------------===//
1200 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1201  Op<0>() = CatchPad;
1202  Op<1>() = BB;
1203 }
1204 
1205 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1206  : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1207  OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1208  Op<0>() = CRI.Op<0>();
1209  Op<1>() = CRI.Op<1>();
1210 }
1211 
1212 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1213  Instruction *InsertBefore)
1214  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1215  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1216  InsertBefore) {
1217  init(CatchPad, BB);
1218 }
1219 
1220 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1221  BasicBlock *InsertAtEnd)
1222  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1223  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1224  InsertAtEnd) {
1225  init(CatchPad, BB);
1226 }
1227 
1228 //===----------------------------------------------------------------------===//
1229 // CatchSwitchInst Implementation
1230 //===----------------------------------------------------------------------===//
1231 
1232 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1233  unsigned NumReservedValues,
1234  const Twine &NameStr,
1235  Instruction *InsertBefore)
1236  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1237  InsertBefore) {
1238  if (UnwindDest)
1239  ++NumReservedValues;
1240  init(ParentPad, UnwindDest, NumReservedValues + 1);
1241  setName(NameStr);
1242 }
1243 
1244 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1245  unsigned NumReservedValues,
1246  const Twine &NameStr, BasicBlock *InsertAtEnd)
1247  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1248  InsertAtEnd) {
1249  if (UnwindDest)
1250  ++NumReservedValues;
1251  init(ParentPad, UnwindDest, NumReservedValues + 1);
1252  setName(NameStr);
1253 }
1254 
1255 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1256  : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1257  CSI.getNumOperands()) {
1258  init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1259  setNumHungOffUseOperands(ReservedSpace);
1260  Use *OL = getOperandList();
1261  const Use *InOL = CSI.getOperandList();
1262  for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1263  OL[I] = InOL[I];
1264 }
1265 
1266 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1267  unsigned NumReservedValues) {
1268  assert(ParentPad && NumReservedValues);
1269 
1270  ReservedSpace = NumReservedValues;
1271  setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1272  allocHungoffUses(ReservedSpace);
1273 
1274  Op<0>() = ParentPad;
1275  if (UnwindDest) {
1276  setSubclassData<UnwindDestField>(true);
1277  setUnwindDest(UnwindDest);
1278  }
1279 }
1280 
1281 /// growOperands - grow operands - This grows the operand list in response to a
1282 /// push_back style of operation. This grows the number of ops by 2 times.
1283 void CatchSwitchInst::growOperands(unsigned Size) {
1284  unsigned NumOperands = getNumOperands();
1285  assert(NumOperands >= 1);
1286  if (ReservedSpace >= NumOperands + Size)
1287  return;
1288  ReservedSpace = (NumOperands + Size / 2) * 2;
1289  growHungoffUses(ReservedSpace);
1290 }
1291 
1293  unsigned OpNo = getNumOperands();
1294  growOperands(1);
1295  assert(OpNo < ReservedSpace && "Growing didn't work!");
1297  getOperandList()[OpNo] = Handler;
1298 }
1299 
1301  // Move all subsequent handlers up one.
1302  Use *EndDst = op_end() - 1;
1303  for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1304  *CurDst = *(CurDst + 1);
1305  // Null out the last handler use.
1306  *EndDst = nullptr;
1307 
1309 }
1310 
1311 //===----------------------------------------------------------------------===//
1312 // FuncletPadInst Implementation
1313 //===----------------------------------------------------------------------===//
1314 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1315  const Twine &NameStr) {
1316  assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1317  llvm::copy(Args, op_begin());
1318  setParentPad(ParentPad);
1319  setName(NameStr);
1320 }
1321 
1322 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1323  : Instruction(FPI.getType(), FPI.getOpcode(),
1324  OperandTraits<FuncletPadInst>::op_end(this) -
1325  FPI.getNumOperands(),
1326  FPI.getNumOperands()) {
1327  std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1328  setParentPad(FPI.getParentPad());
1329 }
1330 
1331 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1332  ArrayRef<Value *> Args, unsigned Values,
1333  const Twine &NameStr, Instruction *InsertBefore)
1334  : Instruction(ParentPad->getType(), Op,
1335  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1336  InsertBefore) {
1337  init(ParentPad, Args, NameStr);
1338 }
1339 
1340 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1341  ArrayRef<Value *> Args, unsigned Values,
1342  const Twine &NameStr, BasicBlock *InsertAtEnd)
1343  : Instruction(ParentPad->getType(), Op,
1344  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1345  InsertAtEnd) {
1346  init(ParentPad, Args, NameStr);
1347 }
1348 
1349 //===----------------------------------------------------------------------===//
1350 // UnreachableInst Implementation
1351 //===----------------------------------------------------------------------===//
1352 
1354  Instruction *InsertBefore)
1355  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1356  0, InsertBefore) {}
1358  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1359  0, InsertAtEnd) {}
1360 
1361 //===----------------------------------------------------------------------===//
1362 // BranchInst Implementation
1363 //===----------------------------------------------------------------------===//
1364 
1365 void BranchInst::AssertOK() {
1366  if (isConditional())
1367  assert(getCondition()->getType()->isIntegerTy(1) &&
1368  "May only branch on boolean predicates!");
1369 }
1370 
1371 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1372  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1373  OperandTraits<BranchInst>::op_end(this) - 1, 1,
1374  InsertBefore) {
1375  assert(IfTrue && "Branch destination may not be null!");
1376  Op<-1>() = IfTrue;
1377 }
1378 
1379 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1380  Instruction *InsertBefore)
1381  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1382  OperandTraits<BranchInst>::op_end(this) - 3, 3,
1383  InsertBefore) {
1384  // Assign in order of operand index to make use-list order predictable.
1385  Op<-3>() = Cond;
1386  Op<-2>() = IfFalse;
1387  Op<-1>() = IfTrue;
1388 #ifndef NDEBUG
1389  AssertOK();
1390 #endif
1391 }
1392 
1393 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1394  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1395  OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1396  assert(IfTrue && "Branch destination may not be null!");
1397  Op<-1>() = IfTrue;
1398 }
1399 
1400 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1401  BasicBlock *InsertAtEnd)
1402  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1403  OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1404  // Assign in order of operand index to make use-list order predictable.
1405  Op<-3>() = Cond;
1406  Op<-2>() = IfFalse;
1407  Op<-1>() = IfTrue;
1408 #ifndef NDEBUG
1409  AssertOK();
1410 #endif
1411 }
1412 
1413 BranchInst::BranchInst(const BranchInst &BI)
1414  : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1415  OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1416  BI.getNumOperands()) {
1417  // Assign in order of operand index to make use-list order predictable.
1418  if (BI.getNumOperands() != 1) {
1419  assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1420  Op<-3>() = BI.Op<-3>();
1421  Op<-2>() = BI.Op<-2>();
1422  }
1423  Op<-1>() = BI.Op<-1>();
1425 }
1426 
1428  assert(isConditional() &&
1429  "Cannot swap successors of an unconditional branch");
1430  Op<-1>().swap(Op<-2>());
1431 
1432  // Update profile metadata if present and it matches our structural
1433  // expectations.
1434  swapProfMetadata();
1435 }
1436 
1437 //===----------------------------------------------------------------------===//
1438 // AllocaInst Implementation
1439 //===----------------------------------------------------------------------===//
1440 
1442  if (!Amt)
1444  else {
1445  assert(!isa<BasicBlock>(Amt) &&
1446  "Passed basic block into allocation size parameter! Use other ctor");
1447  assert(Amt->getType()->isIntegerTy() &&
1448  "Allocation array size is not an integer!");
1449  }
1450  return Amt;
1451 }
1452 
1454  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1455  assert(BB->getParent() &&
1456  "BB must be in a Function when alignment not provided!");
1457  const DataLayout &DL = BB->getModule()->getDataLayout();
1458  return DL.getPrefTypeAlign(Ty);
1459 }
1460 
1462  assert(I && "Insertion position cannot be null when alignment not provided!");
1463  return computeAllocaDefaultAlign(Ty, I->getParent());
1464 }
1465 
1466 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1467  Instruction *InsertBefore)
1468  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1469 
1470 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1471  BasicBlock *InsertAtEnd)
1472  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1473 
1474 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1475  const Twine &Name, Instruction *InsertBefore)
1476  : AllocaInst(Ty, AddrSpace, ArraySize,
1477  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1478  InsertBefore) {}
1479 
1480 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1481  const Twine &Name, BasicBlock *InsertAtEnd)
1482  : AllocaInst(Ty, AddrSpace, ArraySize,
1483  computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1484  InsertAtEnd) {}
1485 
1486 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1487  Align Align, const Twine &Name,
1488  Instruction *InsertBefore)
1489  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1490  getAISize(Ty->getContext(), ArraySize), InsertBefore),
1491  AllocatedType(Ty) {
1493  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1494  setName(Name);
1495 }
1496 
1497 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1498  Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1499  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1500  getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1501  AllocatedType(Ty) {
1503  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1504  setName(Name);
1505 }
1506 
1507 
1509  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1510  return !CI->isOne();
1511  return true;
1512 }
1513 
1514 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1515 /// function and is a constant size. If so, the code generator will fold it
1516 /// into the prolog/epilog code, so it is basically free.
1518  // Must be constant size.
1519  if (!isa<ConstantInt>(getArraySize())) return false;
1520 
1521  // Must be in the entry block.
1522  const BasicBlock *Parent = getParent();
1523  return Parent->isEntryBlock() && !isUsedWithInAlloca();
1524 }
1525 
1526 //===----------------------------------------------------------------------===//
1527 // LoadInst Implementation
1528 //===----------------------------------------------------------------------===//
1529 
1530 void LoadInst::AssertOK() {
1531  assert(getOperand(0)->getType()->isPointerTy() &&
1532  "Ptr must have pointer type.");
1533 }
1534 
1536  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1537  assert(BB->getParent() &&
1538  "BB must be in a Function when alignment not provided!");
1539  const DataLayout &DL = BB->getModule()->getDataLayout();
1540  return DL.getABITypeAlign(Ty);
1541 }
1542 
1544  assert(I && "Insertion position cannot be null when alignment not provided!");
1545  return computeLoadStoreDefaultAlign(Ty, I->getParent());
1546 }
1547 
1549  Instruction *InsertBef)
1550  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1551 
1553  BasicBlock *InsertAE)
1554  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1555 
1556 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1557  Instruction *InsertBef)
1558  : LoadInst(Ty, Ptr, Name, isVolatile,
1559  computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1560 
1561 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1562  BasicBlock *InsertAE)
1563  : LoadInst(Ty, Ptr, Name, isVolatile,
1564  computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1565 
1566 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1567  Align Align, Instruction *InsertBef)
1568  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1569  SyncScope::System, InsertBef) {}
1570 
1571 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1572  Align Align, BasicBlock *InsertAE)
1573  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1574  SyncScope::System, InsertAE) {}
1575 
1576 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1577  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1578  Instruction *InsertBef)
1579  : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1580  assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1583  setAtomic(Order, SSID);
1584  AssertOK();
1585  setName(Name);
1586 }
1587 
1588 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1589  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1590  BasicBlock *InsertAE)
1591  : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1592  assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1595  setAtomic(Order, SSID);
1596  AssertOK();
1597  setName(Name);
1598 }
1599 
1600 //===----------------------------------------------------------------------===//
1601 // StoreInst Implementation
1602 //===----------------------------------------------------------------------===//
1603 
1604 void StoreInst::AssertOK() {
1605  assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1606  assert(getOperand(1)->getType()->isPointerTy() &&
1607  "Ptr must have pointer type!");
1608  assert(cast<PointerType>(getOperand(1)->getType())
1609  ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) &&
1610  "Ptr must be a pointer to Val type!");
1611 }
1612 
1614  : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1615 
1617  : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1618 
1619 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1620  Instruction *InsertBefore)
1621  : StoreInst(val, addr, isVolatile,
1622  computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1623  InsertBefore) {}
1624 
1625 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1626  BasicBlock *InsertAtEnd)
1627  : StoreInst(val, addr, isVolatile,
1628  computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1629  InsertAtEnd) {}
1630 
1631 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1632  Instruction *InsertBefore)
1633  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1634  SyncScope::System, InsertBefore) {}
1635 
1636 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1637  BasicBlock *InsertAtEnd)
1638  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1639  SyncScope::System, InsertAtEnd) {}
1640 
1641 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1642  AtomicOrdering Order, SyncScope::ID SSID,
1643  Instruction *InsertBefore)
1644  : Instruction(Type::getVoidTy(val->getContext()), Store,
1645  OperandTraits<StoreInst>::op_begin(this),
1646  OperandTraits<StoreInst>::operands(this), InsertBefore) {
1647  Op<0>() = val;
1648  Op<1>() = addr;
1651  setAtomic(Order, SSID);
1652  AssertOK();
1653 }
1654 
1655 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1656  AtomicOrdering Order, SyncScope::ID SSID,
1657  BasicBlock *InsertAtEnd)
1658  : Instruction(Type::getVoidTy(val->getContext()), Store,
1659  OperandTraits<StoreInst>::op_begin(this),
1660  OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1661  Op<0>() = val;
1662  Op<1>() = addr;
1665  setAtomic(Order, SSID);
1666  AssertOK();
1667 }
1668 
1669 
1670 //===----------------------------------------------------------------------===//
1671 // AtomicCmpXchgInst Implementation
1672 //===----------------------------------------------------------------------===//
1673 
1674 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1675  Align Alignment, AtomicOrdering SuccessOrdering,
1676  AtomicOrdering FailureOrdering,
1677  SyncScope::ID SSID) {
1678  Op<0>() = Ptr;
1679  Op<1>() = Cmp;
1680  Op<2>() = NewVal;
1681  setSuccessOrdering(SuccessOrdering);
1682  setFailureOrdering(FailureOrdering);
1683  setSyncScopeID(SSID);
1684  setAlignment(Alignment);
1685 
1686  assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1687  "All operands must be non-null!");
1688  assert(getOperand(0)->getType()->isPointerTy() &&
1689  "Ptr must have pointer type!");
1690  assert(cast<PointerType>(getOperand(0)->getType())
1691  ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1692  "Ptr must be a pointer to Cmp type!");
1693  assert(cast<PointerType>(getOperand(0)->getType())
1694  ->isOpaqueOrPointeeTypeMatches(getOperand(2)->getType()) &&
1695  "Ptr must be a pointer to NewVal type!");
1696  assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1697  "Cmp type and NewVal type must be same!");
1698 }
1699 
1701  Align Alignment,
1702  AtomicOrdering SuccessOrdering,
1703  AtomicOrdering FailureOrdering,
1704  SyncScope::ID SSID,
1705  Instruction *InsertBefore)
1706  : Instruction(
1707  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1708  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1709  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1710  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1711 }
1712 
1714  Align Alignment,
1715  AtomicOrdering SuccessOrdering,
1716  AtomicOrdering FailureOrdering,
1717  SyncScope::ID SSID,
1718  BasicBlock *InsertAtEnd)
1719  : Instruction(
1720  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1721  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1722  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1723  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1724 }
1725 
1726 //===----------------------------------------------------------------------===//
1727 // AtomicRMWInst Implementation
1728 //===----------------------------------------------------------------------===//
1729 
1730 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1731  Align Alignment, AtomicOrdering Ordering,
1732  SyncScope::ID SSID) {
1733  assert(Ordering != AtomicOrdering::NotAtomic &&
1734  "atomicrmw instructions can only be atomic.");
1735  assert(Ordering != AtomicOrdering::Unordered &&
1736  "atomicrmw instructions cannot be unordered.");
1737  Op<0>() = Ptr;
1738  Op<1>() = Val;
1740  setOrdering(Ordering);
1741  setSyncScopeID(SSID);
1742  setAlignment(Alignment);
1743 
1744  assert(getOperand(0) && getOperand(1) &&
1745  "All operands must be non-null!");
1746  assert(getOperand(0)->getType()->isPointerTy() &&
1747  "Ptr must have pointer type!");
1748  assert(cast<PointerType>(getOperand(0)->getType())
1749  ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1750  "Ptr must be a pointer to Val type!");
1751  assert(Ordering != AtomicOrdering::NotAtomic &&
1752  "AtomicRMW instructions must be atomic!");
1753 }
1754 
1756  Align Alignment, AtomicOrdering Ordering,
1757  SyncScope::ID SSID, Instruction *InsertBefore)
1758  : Instruction(Val->getType(), AtomicRMW,
1759  OperandTraits<AtomicRMWInst>::op_begin(this),
1760  OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1761  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1762 }
1763 
1765  Align Alignment, AtomicOrdering Ordering,
1766  SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1767  : Instruction(Val->getType(), AtomicRMW,
1768  OperandTraits<AtomicRMWInst>::op_begin(this),
1769  OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1770  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1771 }
1772 
1774  switch (Op) {
1775  case AtomicRMWInst::Xchg:
1776  return "xchg";
1777  case AtomicRMWInst::Add:
1778  return "add";
1779  case AtomicRMWInst::Sub:
1780  return "sub";
1781  case AtomicRMWInst::And:
1782  return "and";
1783  case AtomicRMWInst::Nand:
1784  return "nand";
1785  case AtomicRMWInst::Or:
1786  return "or";
1787  case AtomicRMWInst::Xor:
1788  return "xor";
1789  case AtomicRMWInst::Max:
1790  return "max";
1791  case AtomicRMWInst::Min:
1792  return "min";
1793  case AtomicRMWInst::UMax:
1794  return "umax";
1795  case AtomicRMWInst::UMin:
1796  return "umin";
1797  case AtomicRMWInst::FAdd:
1798  return "fadd";
1799  case AtomicRMWInst::FSub:
1800  return "fsub";
1801  case AtomicRMWInst::FMax:
1802  return "fmax";
1803  case AtomicRMWInst::FMin:
1804  return "fmin";
1806  return "uinc_wrap";
1808  return "udec_wrap";
1810  return "<invalid operation>";
1811  }
1812 
1813  llvm_unreachable("invalid atomicrmw operation");
1814 }
1815 
1816 //===----------------------------------------------------------------------===//
1817 // FenceInst Implementation
1818 //===----------------------------------------------------------------------===//
1819 
1821  SyncScope::ID SSID,
1822  Instruction *InsertBefore)
1823  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1824  setOrdering(Ordering);
1825  setSyncScopeID(SSID);
1826 }
1827 
1829  SyncScope::ID SSID,
1830  BasicBlock *InsertAtEnd)
1831  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1832  setOrdering(Ordering);
1833  setSyncScopeID(SSID);
1834 }
1835 
1836 //===----------------------------------------------------------------------===//
1837 // GetElementPtrInst Implementation
1838 //===----------------------------------------------------------------------===//
1839 
1840 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1841  const Twine &Name) {
1842  assert(getNumOperands() == 1 + IdxList.size() &&
1843  "NumOperands not initialized?");
1844  Op<0>() = Ptr;
1845  llvm::copy(IdxList, op_begin() + 1);
1846  setName(Name);
1847 }
1848 
1849 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1850  : Instruction(GEPI.getType(), GetElementPtr,
1852  GEPI.getNumOperands(),
1853  GEPI.getNumOperands()),
1854  SourceElementType(GEPI.SourceElementType),
1855  ResultElementType(GEPI.ResultElementType) {
1856  std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1858 }
1859 
1861  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1862  if (!Struct->indexValid(Idx))
1863  return nullptr;
1864  return Struct->getTypeAtIndex(Idx);
1865  }
1866  if (!Idx->getType()->isIntOrIntVectorTy())
1867  return nullptr;
1868  if (auto *Array = dyn_cast<ArrayType>(Ty))
1869  return Array->getElementType();
1870  if (auto *Vector = dyn_cast<VectorType>(Ty))
1871  return Vector->getElementType();
1872  return nullptr;
1873 }
1874 
1876  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1877  if (Idx >= Struct->getNumElements())
1878  return nullptr;
1879  return Struct->getElementType(Idx);
1880  }
1881  if (auto *Array = dyn_cast<ArrayType>(Ty))
1882  return Array->getElementType();
1883  if (auto *Vector = dyn_cast<VectorType>(Ty))
1884  return Vector->getElementType();
1885  return nullptr;
1886 }
1887 
1888 template <typename IndexTy>
1890  if (IdxList.empty())
1891  return Ty;
1892  for (IndexTy V : IdxList.slice(1)) {
1894  if (!Ty)
1895  return Ty;
1896  }
1897  return Ty;
1898 }
1899 
1901  return getIndexedTypeInternal(Ty, IdxList);
1902 }
1903 
1905  ArrayRef<Constant *> IdxList) {
1906  return getIndexedTypeInternal(Ty, IdxList);
1907 }
1908 
1910  return getIndexedTypeInternal(Ty, IdxList);
1911 }
1912 
1913 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1914 /// zeros. If so, the result pointer and the first operand have the same
1915 /// value, just potentially different types.
1917  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1918  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1919  if (!CI->isZero()) return false;
1920  } else {
1921  return false;
1922  }
1923  }
1924  return true;
1925 }
1926 
1927 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1928 /// constant integers. If so, the result pointer and the first operand have
1929 /// a constant offset between them.
1931  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1932  if (!isa<ConstantInt>(getOperand(i)))
1933  return false;
1934  }
1935  return true;
1936 }
1937 
1939  cast<GEPOperator>(this)->setIsInBounds(B);
1940 }
1941 
1943  return cast<GEPOperator>(this)->isInBounds();
1944 }
1945 
1947  APInt &Offset) const {
1948  // Delegate to the generic GEPOperator implementation.
1949  return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1950 }
1951 
1953  const DataLayout &DL, unsigned BitWidth,
1954  MapVector<Value *, APInt> &VariableOffsets,
1955  APInt &ConstantOffset) const {
1956  // Delegate to the generic GEPOperator implementation.
1957  return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1958  ConstantOffset);
1959 }
1960 
1961 //===----------------------------------------------------------------------===//
1962 // ExtractElementInst Implementation
1963 //===----------------------------------------------------------------------===//
1964 
1965 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1966  const Twine &Name,
1967  Instruction *InsertBef)
1968  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1969  ExtractElement,
1971  2, InsertBef) {
1972  assert(isValidOperands(Val, Index) &&
1973  "Invalid extractelement instruction operands!");
1974  Op<0>() = Val;
1975  Op<1>() = Index;
1976  setName(Name);
1977 }
1978 
1979 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1980  const Twine &Name,
1981  BasicBlock *InsertAE)
1982  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1983  ExtractElement,
1985  2, InsertAE) {
1986  assert(isValidOperands(Val, Index) &&
1987  "Invalid extractelement instruction operands!");
1988 
1989  Op<0>() = Val;
1990  Op<1>() = Index;
1991  setName(Name);
1992 }
1993 
1995  if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1996  return false;
1997  return true;
1998 }
1999 
2000 //===----------------------------------------------------------------------===//
2001 // InsertElementInst Implementation
2002 //===----------------------------------------------------------------------===//
2003 
2004 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2005  const Twine &Name,
2006  Instruction *InsertBef)
2007  : Instruction(Vec->getType(), InsertElement,
2008  OperandTraits<InsertElementInst>::op_begin(this),
2009  3, InsertBef) {
2010  assert(isValidOperands(Vec, Elt, Index) &&
2011  "Invalid insertelement instruction operands!");
2012  Op<0>() = Vec;
2013  Op<1>() = Elt;
2014  Op<2>() = Index;
2015  setName(Name);
2016 }
2017 
2018 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2019  const Twine &Name,
2020  BasicBlock *InsertAE)
2021  : Instruction(Vec->getType(), InsertElement,
2022  OperandTraits<InsertElementInst>::op_begin(this),
2023  3, InsertAE) {
2024  assert(isValidOperands(Vec, Elt, Index) &&
2025  "Invalid insertelement instruction operands!");
2026 
2027  Op<0>() = Vec;
2028  Op<1>() = Elt;
2029  Op<2>() = Index;
2030  setName(Name);
2031 }
2032 
2033 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
2034  const Value *Index) {
2035  if (!Vec->getType()->isVectorTy())
2036  return false; // First operand of insertelement must be vector type.
2037 
2038  if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2039  return false;// Second operand of insertelement must be vector element type.
2040 
2041  if (!Index->getType()->isIntegerTy())
2042  return false; // Third operand of insertelement must be i32.
2043  return true;
2044 }
2045 
2046 //===----------------------------------------------------------------------===//
2047 // ShuffleVectorInst Implementation
2048 //===----------------------------------------------------------------------===//
2049 
2051  assert(V && "Cannot create placeholder of nullptr V");
2052  return PoisonValue::get(V->getType());
2053 }
2054 
2056  Instruction *InsertBefore)
2058  InsertBefore) {}
2059 
2061  BasicBlock *InsertAtEnd)
2063  InsertAtEnd) {}
2064 
2066  const Twine &Name,
2067  Instruction *InsertBefore)
2069  InsertBefore) {}
2070 
2072  const Twine &Name, BasicBlock *InsertAtEnd)
2074  InsertAtEnd) {}
2075 
2077  const Twine &Name,
2078  Instruction *InsertBefore)
2079  : Instruction(
2080  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2081  cast<VectorType>(Mask->getType())->getElementCount()),
2082  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2083  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2084  assert(isValidOperands(V1, V2, Mask) &&
2085  "Invalid shuffle vector instruction operands!");
2086 
2087  Op<0>() = V1;
2088  Op<1>() = V2;
2089  SmallVector<int, 16> MaskArr;
2090  getShuffleMask(cast<Constant>(Mask), MaskArr);
2091  setShuffleMask(MaskArr);
2092  setName(Name);
2093 }
2094 
2096  const Twine &Name, BasicBlock *InsertAtEnd)
2097  : Instruction(
2098  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2099  cast<VectorType>(Mask->getType())->getElementCount()),
2100  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2101  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2102  assert(isValidOperands(V1, V2, Mask) &&
2103  "Invalid shuffle vector instruction operands!");
2104 
2105  Op<0>() = V1;
2106  Op<1>() = V2;
2107  SmallVector<int, 16> MaskArr;
2108  getShuffleMask(cast<Constant>(Mask), MaskArr);
2109  setShuffleMask(MaskArr);
2110  setName(Name);
2111 }
2112 
2114  const Twine &Name,
2115  Instruction *InsertBefore)
2116  : Instruction(
2117  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2118  Mask.size(), isa<ScalableVectorType>(V1->getType())),
2119  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2120  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2121  assert(isValidOperands(V1, V2, Mask) &&
2122  "Invalid shuffle vector instruction operands!");
2123  Op<0>() = V1;
2124  Op<1>() = V2;
2126  setName(Name);
2127 }
2128 
2130  const Twine &Name, BasicBlock *InsertAtEnd)
2131  : Instruction(
2132  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2133  Mask.size(), isa<ScalableVectorType>(V1->getType())),
2134  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2135  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2136  assert(isValidOperands(V1, V2, Mask) &&
2137  "Invalid shuffle vector instruction operands!");
2138 
2139  Op<0>() = V1;
2140  Op<1>() = V2;
2142  setName(Name);
2143 }
2144 
2146  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2147  int NumMaskElts = ShuffleMask.size();
2148  SmallVector<int, 16> NewMask(NumMaskElts);
2149  for (int i = 0; i != NumMaskElts; ++i) {
2150  int MaskElt = getMaskValue(i);
2151  if (MaskElt == UndefMaskElem) {
2152  NewMask[i] = UndefMaskElem;
2153  continue;
2154  }
2155  assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2156  MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2157  NewMask[i] = MaskElt;
2158  }
2159  setShuffleMask(NewMask);
2160  Op<0>().swap(Op<1>());
2161 }
2162 
2164  ArrayRef<int> Mask) {
2165  // V1 and V2 must be vectors of the same type.
2166  if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2167  return false;
2168 
2169  // Make sure the mask elements make sense.
2170  int V1Size =
2171  cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2172  for (int Elem : Mask)
2173  if (Elem != UndefMaskElem && Elem >= V1Size * 2)
2174  return false;
2175 
2176  if (isa<ScalableVectorType>(V1->getType()))
2177  if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !all_equal(Mask))
2178  return false;
2179 
2180  return true;
2181 }
2182 
2184  const Value *Mask) {
2185  // V1 and V2 must be vectors of the same type.
2186  if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2187  return false;
2188 
2189  // Mask must be vector of i32, and must be the same kind of vector as the
2190  // input vectors
2191  auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2192  if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2193  isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2194  return false;
2195 
2196  // Check to see if Mask is valid.
2197  if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2198  return true;
2199 
2200  if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2201  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2202  for (Value *Op : MV->operands()) {
2203  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2204  if (CI->uge(V1Size*2))
2205  return false;
2206  } else if (!isa<UndefValue>(Op)) {
2207  return false;
2208  }
2209  }
2210  return true;
2211  }
2212 
2213  if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2214  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2215  for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2216  i != e; ++i)
2217  if (CDS->getElementAsInteger(i) >= V1Size*2)
2218  return false;
2219  return true;
2220  }
2221 
2222  return false;
2223 }
2224 
2226  SmallVectorImpl<int> &Result) {
2227  ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2228 
2229  if (isa<ConstantAggregateZero>(Mask)) {
2230  Result.resize(EC.getKnownMinValue(), 0);
2231  return;
2232  }
2233 
2234  Result.reserve(EC.getKnownMinValue());
2235 
2236  if (EC.isScalable()) {
2237  assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2238  "Scalable vector shuffle mask must be undef or zeroinitializer");
2239  int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2240  for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2241  Result.emplace_back(MaskVal);
2242  return;
2243  }
2244 
2245  unsigned NumElts = EC.getKnownMinValue();
2246 
2247  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2248  for (unsigned i = 0; i != NumElts; ++i)
2249  Result.push_back(CDS->getElementAsInteger(i));
2250  return;
2251  }
2252  for (unsigned i = 0; i != NumElts; ++i) {
2253  Constant *C = Mask->getAggregateElement(i);
2254  Result.push_back(isa<UndefValue>(C) ? -1 :
2255  cast<ConstantInt>(C)->getZExtValue());
2256  }
2257 }
2258 
2260  ShuffleMask.assign(Mask.begin(), Mask.end());
2261  ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2262 }
2263 
2265  Type *ResultTy) {
2266  Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2267  if (isa<ScalableVectorType>(ResultTy)) {
2268  assert(all_equal(Mask) && "Unexpected shuffle");
2269  Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2270  if (Mask[0] == 0)
2271  return Constant::getNullValue(VecTy);
2272  return UndefValue::get(VecTy);
2273  }
2274  SmallVector<Constant *, 16> MaskConst;
2275  for (int Elem : Mask) {
2276  if (Elem == UndefMaskElem)
2277  MaskConst.push_back(UndefValue::get(Int32Ty));
2278  else
2279  MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2280  }
2281  return ConstantVector::get(MaskConst);
2282 }
2283 
2284 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2285  assert(!Mask.empty() && "Shuffle mask must contain elements");
2286  bool UsesLHS = false;
2287  bool UsesRHS = false;
2288  for (int I : Mask) {
2289  if (I == -1)
2290  continue;
2291  assert(I >= 0 && I < (NumOpElts * 2) &&
2292  "Out-of-bounds shuffle mask element");
2293  UsesLHS |= (I < NumOpElts);
2294  UsesRHS |= (I >= NumOpElts);
2295  if (UsesLHS && UsesRHS)
2296  return false;
2297  }
2298  // Allow for degenerate case: completely undef mask means neither source is used.
2299  return UsesLHS || UsesRHS;
2300 }
2301 
2303  // We don't have vector operand size information, so assume operands are the
2304  // same size as the mask.
2305  return isSingleSourceMaskImpl(Mask, Mask.size());
2306 }
2307 
2308 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2309  if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2310  return false;
2311  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2312  if (Mask[i] == -1)
2313  continue;
2314  if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2315  return false;
2316  }
2317  return true;
2318 }
2319 
2321  // We don't have vector operand size information, so assume operands are the
2322  // same size as the mask.
2323  return isIdentityMaskImpl(Mask, Mask.size());
2324 }
2325 
2327  if (!isSingleSourceMask(Mask))
2328  return false;
2329 
2330  // The number of elements in the mask must be at least 2.
2331  int NumElts = Mask.size();
2332  if (NumElts < 2)
2333  return false;
2334 
2335  for (int i = 0; i < NumElts; ++i) {
2336  if (Mask[i] == -1)
2337  continue;
2338  if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2339  return false;
2340  }
2341  return true;
2342 }
2343 
2345  if (!isSingleSourceMask(Mask))
2346  return false;
2347  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2348  if (Mask[i] == -1)
2349  continue;
2350  if (Mask[i] != 0 && Mask[i] != NumElts)
2351  return false;
2352  }
2353  return true;
2354 }
2355 
2357  // Select is differentiated from identity. It requires using both sources.
2358  if (isSingleSourceMask(Mask))
2359  return false;
2360  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2361  if (Mask[i] == -1)
2362  continue;
2363  if (Mask[i] != i && Mask[i] != (NumElts + i))
2364  return false;
2365  }
2366  return true;
2367 }
2368 
2370  // Example masks that will return true:
2371  // v1 = <a, b, c, d>
2372  // v2 = <e, f, g, h>
2373  // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2374  // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2375 
2376  // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2377  int NumElts = Mask.size();
2378  if (NumElts < 2 || !isPowerOf2_32(NumElts))
2379  return false;
2380 
2381  // 2. The first element of the mask must be either a 0 or a 1.
2382  if (Mask[0] != 0 && Mask[0] != 1)
2383  return false;
2384 
2385  // 3. The difference between the first 2 elements must be equal to the
2386  // number of elements in the mask.
2387  if ((Mask[1] - Mask[0]) != NumElts)
2388  return false;
2389 
2390  // 4. The difference between consecutive even-numbered and odd-numbered
2391  // elements must be equal to 2.
2392  for (int i = 2; i < NumElts; ++i) {
2393  int MaskEltVal = Mask[i];
2394  if (MaskEltVal == -1)
2395  return false;
2396  int MaskEltPrevVal = Mask[i - 2];
2397  if (MaskEltVal - MaskEltPrevVal != 2)
2398  return false;
2399  }
2400  return true;
2401 }
2402 
2404  // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2405  int StartIndex = -1;
2406  for (int I = 0, E = Mask.size(); I != E; ++I) {
2407  int MaskEltVal = Mask[I];
2408  if (MaskEltVal == -1)
2409  continue;
2410 
2411  if (StartIndex == -1) {
2412  // Don't support a StartIndex that begins in the second input, or if the
2413  // first non-undef index would access below the StartIndex.
2414  if (MaskEltVal < I || E <= (MaskEltVal - I))
2415  return false;
2416 
2417  StartIndex = MaskEltVal - I;
2418  continue;
2419  }
2420 
2421  // Splice is sequential starting from StartIndex.
2422  if (MaskEltVal != (StartIndex + I))
2423  return false;
2424  }
2425 
2426  if (StartIndex == -1)
2427  return false;
2428 
2429  // NOTE: This accepts StartIndex == 0 (COPY).
2430  Index = StartIndex;
2431  return true;
2432 }
2433 
2435  int NumSrcElts, int &Index) {
2436  // Must extract from a single source.
2437  if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2438  return false;
2439 
2440  // Must be smaller (else this is an Identity shuffle).
2441  if (NumSrcElts <= (int)Mask.size())
2442  return false;
2443 
2444  // Find start of extraction, accounting that we may start with an UNDEF.
2445  int SubIndex = -1;
2446  for (int i = 0, e = Mask.size(); i != e; ++i) {
2447  int M = Mask[i];
2448  if (M < 0)
2449  continue;
2450  int Offset = (M % NumSrcElts) - i;
2451  if (0 <= SubIndex && SubIndex != Offset)
2452  return false;
2453  SubIndex = Offset;
2454  }
2455 
2456  if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2457  Index = SubIndex;
2458  return true;
2459  }
2460  return false;
2461 }
2462 
2464  int NumSrcElts, int &NumSubElts,
2465  int &Index) {
2466  int NumMaskElts = Mask.size();
2467 
2468  // Don't try to match if we're shuffling to a smaller size.
2469  if (NumMaskElts < NumSrcElts)
2470  return false;
2471 
2472  // TODO: We don't recognize self-insertion/widening.
2473  if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2474  return false;
2475 
2476  // Determine which mask elements are attributed to which source.
2477  APInt UndefElts = APInt::getZero(NumMaskElts);
2478  APInt Src0Elts = APInt::getZero(NumMaskElts);
2479  APInt Src1Elts = APInt::getZero(NumMaskElts);
2480  bool Src0Identity = true;
2481  bool Src1Identity = true;
2482 
2483  for (int i = 0; i != NumMaskElts; ++i) {
2484  int M = Mask[i];
2485  if (M < 0) {
2486  UndefElts.setBit(i);
2487  continue;
2488  }
2489  if (M < NumSrcElts) {
2490  Src0Elts.setBit(i);
2491  Src0Identity &= (M == i);
2492  continue;
2493  }
2494  Src1Elts.setBit(i);
2495  Src1Identity &= (M == (i + NumSrcElts));
2496  }
2497  assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2498  "unknown shuffle elements");
2499  assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2500  "2-source shuffle not found");
2501 
2502  // Determine lo/hi span ranges.
2503  // TODO: How should we handle undefs at the start of subvector insertions?
2504  int Src0Lo = Src0Elts.countTrailingZeros();
2505  int Src1Lo = Src1Elts.countTrailingZeros();
2506  int Src0Hi = NumMaskElts - Src0Elts.countLeadingZeros();
2507  int Src1Hi = NumMaskElts - Src1Elts.countLeadingZeros();
2508 
2509  // If src0 is in place, see if the src1 elements is inplace within its own
2510  // span.
2511  if (Src0Identity) {
2512  int NumSub1Elts = Src1Hi - Src1Lo;
2513  ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2514  if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2515  NumSubElts = NumSub1Elts;
2516  Index = Src1Lo;
2517  return true;
2518  }
2519  }
2520 
2521  // If src1 is in place, see if the src0 elements is inplace within its own
2522  // span.
2523  if (Src1Identity) {
2524  int NumSub0Elts = Src0Hi - Src0Lo;
2525  ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2526  if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2527  NumSubElts = NumSub0Elts;
2528  Index = Src0Lo;
2529  return true;
2530  }
2531  }
2532 
2533  return false;
2534 }
2535 
2537  if (isa<UndefValue>(Op<2>()))
2538  return false;
2539 
2540  // FIXME: Not currently possible to express a shuffle mask for a scalable
2541  // vector for this case.
2542  if (isa<ScalableVectorType>(getType()))
2543  return false;
2544 
2545  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2546  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2547  if (NumMaskElts <= NumOpElts)
2548  return false;
2549 
2550  // The first part of the mask must choose elements from exactly 1 source op.
2552  if (!isIdentityMaskImpl(Mask, NumOpElts))
2553  return false;
2554 
2555  // All extending must be with undef elements.
2556  for (int i = NumOpElts; i < NumMaskElts; ++i)
2557  if (Mask[i] != -1)
2558  return false;
2559 
2560  return true;
2561 }
2562 
2564  if (isa<UndefValue>(Op<2>()))
2565  return false;
2566 
2567  // FIXME: Not currently possible to express a shuffle mask for a scalable
2568  // vector for this case.
2569  if (isa<ScalableVectorType>(getType()))
2570  return false;
2571 
2572  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2573  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2574  if (NumMaskElts >= NumOpElts)
2575  return false;
2576 
2577  return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2578 }
2579 
2581  // Vector concatenation is differentiated from identity with padding.
2582  if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2583  isa<UndefValue>(Op<2>()))
2584  return false;
2585 
2586  // FIXME: Not currently possible to express a shuffle mask for a scalable
2587  // vector for this case.
2588  if (isa<ScalableVectorType>(getType()))
2589  return false;
2590 
2591  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2592  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2593  if (NumMaskElts != NumOpElts * 2)
2594  return false;
2595 
2596  // Use the mask length rather than the operands' vector lengths here. We
2597  // already know that the shuffle returns a vector twice as long as the inputs,
2598  // and neither of the inputs are undef vectors. If the mask picks consecutive
2599  // elements from both inputs, then this is a concatenation of the inputs.
2600  return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2601 }
2602 
2604  int ReplicationFactor, int VF) {
2605  assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2606  "Unexpected mask size.");
2607 
2608  for (int CurrElt : seq(0, VF)) {
2609  ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2610  assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2611  "Run out of mask?");
2612  Mask = Mask.drop_front(ReplicationFactor);
2613  if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2614  return MaskElt == UndefMaskElem || MaskElt == CurrElt;
2615  }))
2616  return false;
2617  }
2618  assert(Mask.empty() && "Did not consume the whole mask?");
2619 
2620  return true;
2621 }
2622 
2624  int &ReplicationFactor, int &VF) {
2625  // undef-less case is trivial.
2627  ReplicationFactor =
2628  Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2629  if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2630  return false;
2631  VF = Mask.size() / ReplicationFactor;
2632  return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2633  }
2634 
2635  // However, if the mask contains undef's, we have to enumerate possible tuples
2636  // and pick one. There are bounds on replication factor: [1, mask size]
2637  // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2638  // Additionally, mask size is a replication factor multiplied by vector size,
2639  // which further significantly reduces the search space.
2640 
2641  // Before doing that, let's perform basic correctness checking first.
2642  int Largest = -1;
2643  for (int MaskElt : Mask) {
2644  if (MaskElt == UndefMaskElem)
2645  continue;
2646  // Elements must be in non-decreasing order.
2647  if (MaskElt < Largest)
2648  return false;
2649  Largest = std::max(Largest, MaskElt);
2650  }
2651 
2652  // Prefer larger replication factor if all else equal.
2653  for (int PossibleReplicationFactor :
2654  reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2655  if (Mask.size() % PossibleReplicationFactor != 0)
2656  continue;
2657  int PossibleVF = Mask.size() / PossibleReplicationFactor;
2658  if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2659  PossibleVF))
2660  continue;
2661  ReplicationFactor = PossibleReplicationFactor;
2662  VF = PossibleVF;
2663  return true;
2664  }
2665 
2666  return false;
2667 }
2668 
2669 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2670  int &VF) const {
2671  // Not possible to express a shuffle mask for a scalable vector for this
2672  // case.
2673  if (isa<ScalableVectorType>(getType()))
2674  return false;
2675 
2676  VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2677  if (ShuffleMask.size() % VF != 0)
2678  return false;
2679  ReplicationFactor = ShuffleMask.size() / VF;
2680 
2681  return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2682 }
2683 
2685  if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2686  Mask.size() % VF != 0)
2687  return false;
2688  for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2689  ArrayRef<int> SubMask = Mask.slice(K, VF);
2690  if (all_of(SubMask, [](int Idx) { return Idx == UndefMaskElem; }))
2691  continue;
2692  SmallBitVector Used(VF, false);
2693  for_each(SubMask, [&Used, VF](int Idx) {
2694  if (Idx != UndefMaskElem && Idx < VF)
2695  Used.set(Idx);
2696  });
2697  if (!Used.all())
2698  return false;
2699  }
2700  return true;
2701 }
2702 
2703 /// Return true if this shuffle mask is a replication mask.
2705  // Not possible to express a shuffle mask for a scalable vector for this
2706  // case.
2707  if (isa<ScalableVectorType>(getType()))
2708  return false;
2709  if (!isSingleSourceMask(ShuffleMask))
2710  return false;
2711 
2712  return isOneUseSingleSourceMask(ShuffleMask, VF);
2713 }
2714 
2715 //===----------------------------------------------------------------------===//
2716 // InsertValueInst Class
2717 //===----------------------------------------------------------------------===//
2718 
2719 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2720  const Twine &Name) {
2721  assert(getNumOperands() == 2 && "NumOperands not initialized?");
2722 
2723  // There's no fundamental reason why we require at least one index
2724  // (other than weirdness with &*IdxBegin being invalid; see
2725  // getelementptr's init routine for example). But there's no
2726  // present need to support it.
2727  assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2728 
2730  Val->getType() && "Inserted value must match indexed type!");
2731  Op<0>() = Agg;
2732  Op<1>() = Val;
2733 
2734  Indices.append(Idxs.begin(), Idxs.end());
2735  setName(Name);
2736 }
2737 
2738 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2739  : Instruction(IVI.getType(), InsertValue,
2740  OperandTraits<InsertValueInst>::op_begin(this), 2),
2741  Indices(IVI.Indices) {
2742  Op<0>() = IVI.getOperand(0);
2743  Op<1>() = IVI.getOperand(1);
2745 }
2746 
2747 //===----------------------------------------------------------------------===//
2748 // ExtractValueInst Class
2749 //===----------------------------------------------------------------------===//
2750 
2751 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2752  assert(getNumOperands() == 1 && "NumOperands not initialized?");
2753 
2754  // There's no fundamental reason why we require at least one index.
2755  // But there's no present need to support it.
2756  assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2757 
2758  Indices.append(Idxs.begin(), Idxs.end());
2759  setName(Name);
2760 }
2761 
2762 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2763  : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2764  Indices(EVI.Indices) {
2766 }
2767 
2768 // getIndexedType - Returns the type of the element that would be extracted
2769 // with an extractvalue instruction with the specified parameters.
2770 //
2771 // A null type is returned if the indices are invalid for the specified
2772 // pointer type.
2773 //
2775  ArrayRef<unsigned> Idxs) {
2776  for (unsigned Index : Idxs) {
2777  // We can't use CompositeType::indexValid(Index) here.
2778  // indexValid() always returns true for arrays because getelementptr allows
2779  // out-of-bounds indices. Since we don't allow those for extractvalue and
2780  // insertvalue we need to check array indexing manually.
2781  // Since the only other types we can index into are struct types it's just
2782  // as easy to check those manually as well.
2783  if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2784  if (Index >= AT->getNumElements())
2785  return nullptr;
2786  Agg = AT->getElementType();
2787  } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2788  if (Index >= ST->getNumElements())
2789  return nullptr;
2790  Agg = ST->getElementType(Index);
2791  } else {
2792  // Not a valid type to index into.
2793  return nullptr;
2794  }
2795  }
2796  return const_cast<Type*>(Agg);
2797 }
2798 
2799 //===----------------------------------------------------------------------===//
2800 // UnaryOperator Class
2801 //===----------------------------------------------------------------------===//
2802 
2804  Type *Ty, const Twine &Name,
2805  Instruction *InsertBefore)
2806  : UnaryInstruction(Ty, iType, S, InsertBefore) {
2807  Op<0>() = S;
2808  setName(Name);
2809  AssertOK();
2810 }
2811 
2813  Type *Ty, const Twine &Name,
2814  BasicBlock *InsertAtEnd)
2815  : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2816  Op<0>() = S;
2817  setName(Name);
2818  AssertOK();
2819 }
2820 
2822  const Twine &Name,
2823  Instruction *InsertBefore) {
2824  return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2825 }
2826 
2828  const Twine &Name,
2829  BasicBlock *InsertAtEnd) {
2830  UnaryOperator *Res = Create(Op, S, Name);
2831  Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2832  return Res;
2833 }
2834 
2835 void UnaryOperator::AssertOK() {
2836  Value *LHS = getOperand(0);
2837  (void)LHS; // Silence warnings.
2838 #ifndef NDEBUG
2839  switch (getOpcode()) {
2840  case FNeg:
2841  assert(getType() == LHS->getType() &&
2842  "Unary operation should return same type as operand!");
2843  assert(getType()->isFPOrFPVectorTy() &&
2844  "Tried to create a floating-point operation on a "
2845  "non-floating-point type!");
2846  break;
2847  default: llvm_unreachable("Invalid opcode provided");
2848  }
2849 #endif
2850 }
2851 
2852 //===----------------------------------------------------------------------===//
2853 // BinaryOperator Class
2854 //===----------------------------------------------------------------------===//
2855 
2857  Type *Ty, const Twine &Name,
2858  Instruction *InsertBefore)
2859  : Instruction(Ty, iType,
2860  OperandTraits<BinaryOperator>::op_begin(this),
2861  OperandTraits<BinaryOperator>::operands(this),
2862  InsertBefore) {
2863  Op<0>() = S1;
2864  Op<1>() = S2;
2865  setName(Name);
2866  AssertOK();
2867 }
2868 
2870  Type *Ty, const Twine &Name,
2871  BasicBlock *InsertAtEnd)
2872  : Instruction(Ty, iType,
2873  OperandTraits<BinaryOperator>::op_begin(this),
2874  OperandTraits<BinaryOperator>::operands(this),
2875  InsertAtEnd) {
2876  Op<0>() = S1;
2877  Op<1>() = S2;
2878  setName(Name);
2879  AssertOK();
2880 }
2881 
2882 void BinaryOperator::AssertOK() {
2883  Value *LHS = getOperand(0), *RHS = getOperand(1);
2884  (void)LHS; (void)RHS; // Silence warnings.
2885  assert(LHS->getType() == RHS->getType() &&
2886  "Binary operator operand types must match!");
2887 #ifndef NDEBUG
2888  switch (getOpcode()) {
2889  case Add: case Sub:
2890  case Mul:
2891  assert(getType() == LHS->getType() &&
2892  "Arithmetic operation should return same type as operands!");
2893  assert(getType()->isIntOrIntVectorTy() &&
2894  "Tried to create an integer operation on a non-integer type!");
2895  break;
2896  case FAdd: case FSub:
2897  case FMul:
2898  assert(getType() == LHS->getType() &&
2899  "Arithmetic operation should return same type as operands!");
2900  assert(getType()->isFPOrFPVectorTy() &&
2901  "Tried to create a floating-point operation on a "
2902  "non-floating-point type!");
2903  break;
2904  case UDiv:
2905  case SDiv:
2906  assert(getType() == LHS->getType() &&
2907  "Arithmetic operation should return same type as operands!");
2908  assert(getType()->isIntOrIntVectorTy() &&
2909  "Incorrect operand type (not integer) for S/UDIV");
2910  break;
2911  case FDiv:
2912  assert(getType() == LHS->getType() &&
2913  "Arithmetic operation should return same type as operands!");
2914  assert(getType()->isFPOrFPVectorTy() &&
2915  "Incorrect operand type (not floating point) for FDIV");
2916  break;
2917  case URem:
2918  case SRem:
2919  assert(getType() == LHS->getType() &&
2920  "Arithmetic operation should return same type as operands!");
2921  assert(getType()->isIntOrIntVectorTy() &&
2922  "Incorrect operand type (not integer) for S/UREM");
2923  break;
2924  case FRem:
2925  assert(getType() == LHS->getType() &&
2926  "Arithmetic operation should return same type as operands!");
2927  assert(getType()->isFPOrFPVectorTy() &&
2928  "Incorrect operand type (not floating point) for FREM");
2929  break;
2930  case Shl:
2931  case LShr:
2932  case AShr:
2933  assert(getType() == LHS->getType() &&
2934  "Shift operation should return same type as operands!");
2935  assert(getType()->isIntOrIntVectorTy() &&
2936  "Tried to create a shift operation on a non-integral type!");
2937  break;
2938  case And: case Or:
2939  case Xor:
2940  assert(getType() == LHS->getType() &&
2941  "Logical operation should return same type as operands!");
2942  assert(getType()->isIntOrIntVectorTy() &&
2943  "Tried to create a logical operation on a non-integral type!");
2944  break;
2945  default: llvm_unreachable("Invalid opcode provided");
2946  }
2947 #endif
2948 }
2949 
2951  const Twine &Name,
2952  Instruction *InsertBefore) {
2953  assert(S1->getType() == S2->getType() &&
2954  "Cannot create binary operator with two operands of differing type!");
2955  return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2956 }
2957 
2959  const Twine &Name,
2960  BasicBlock *InsertAtEnd) {
2961  BinaryOperator *Res = Create(Op, S1, S2, Name);
2962  Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2963  return Res;
2964 }
2965 
2967  Instruction *InsertBefore) {
2969  return new BinaryOperator(Instruction::Sub,
2970  zero, Op,
2971  Op->getType(), Name, InsertBefore);
2972 }
2973 
2975  BasicBlock *InsertAtEnd) {
2977  return new BinaryOperator(Instruction::Sub,
2978  zero, Op,
2979  Op->getType(), Name, InsertAtEnd);
2980 }
2981 
2983  Instruction *InsertBefore) {
2985  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2986 }
2987 
2989  BasicBlock *InsertAtEnd) {
2991  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2992 }
2993 
2995  Instruction *InsertBefore) {
2997  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2998 }
2999 
3001  BasicBlock *InsertAtEnd) {
3003  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
3004 }
3005 
3007  Instruction *InsertBefore) {
3009  return new BinaryOperator(Instruction::Xor, Op, C,
3010  Op->getType(), Name, InsertBefore);
3011 }
3012 
3014  BasicBlock *InsertAtEnd) {
3016  return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3017  Op->getType(), Name, InsertAtEnd);
3018 }
3019 
3020 // Exchange the two operands to this instruction. This instruction is safe to
3021 // use on any binary instruction and does not modify the semantics of the
3022 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3023 // is changed.
3025  if (!isCommutative())
3026  return true; // Can't commute operands
3027  Op<0>().swap(Op<1>());
3028  return false;
3029 }
3030 
3031 //===----------------------------------------------------------------------===//
3032 // FPMathOperator Class
3033 //===----------------------------------------------------------------------===//
3034 
3036  const MDNode *MD =
3037  cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3038  if (!MD)
3039  return 0.0;
3040  ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3041  return Accuracy->getValueAPF().convertToFloat();
3042 }
3043 
3044 //===----------------------------------------------------------------------===//
3045 // CastInst Class
3046 //===----------------------------------------------------------------------===//
3047 
3048 // Just determine if this cast only deals with integral->integral conversion.
3050  switch (getOpcode()) {
3051  default: return false;
3052  case Instruction::ZExt:
3053  case Instruction::SExt:
3054  case Instruction::Trunc:
3055  return true;
3056  case Instruction::BitCast:
3057  return getOperand(0)->getType()->isIntegerTy() &&
3058  getType()->isIntegerTy();
3059  }
3060 }
3061 
3063  // Only BitCast can be lossless, exit fast if we're not BitCast
3064  if (getOpcode() != Instruction::BitCast)
3065  return false;
3066 
3067  // Identity cast is always lossless
3068  Type *SrcTy = getOperand(0)->getType();
3069  Type *DstTy = getType();
3070  if (SrcTy == DstTy)
3071  return true;
3072 
3073  // Pointer to pointer is always lossless.
3074  if (SrcTy->isPointerTy())
3075  return DstTy->isPointerTy();
3076  return false; // Other types have no identity values
3077 }
3078 
3079 /// This function determines if the CastInst does not require any bits to be
3080 /// changed in order to effect the cast. Essentially, it identifies cases where
3081 /// no code gen is necessary for the cast, hence the name no-op cast. For
3082 /// example, the following are all no-op casts:
3083 /// # bitcast i32* %x to i8*
3084 /// # bitcast <2 x i32> %x to <4 x i16>
3085 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3086 /// Determine if the described cast is a no-op.
3088  Type *SrcTy,
3089  Type *DestTy,
3090  const DataLayout &DL) {
3091  assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3092  switch (Opcode) {
3093  default: llvm_unreachable("Invalid CastOp");
3094  case Instruction::Trunc:
3095  case Instruction::ZExt:
3096  case Instruction::SExt:
3097  case Instruction::FPTrunc:
3098  case Instruction::FPExt:
3099  case Instruction::UIToFP:
3100  case Instruction::SIToFP:
3101  case Instruction::FPToUI:
3102  case Instruction::FPToSI:
3103  case Instruction::AddrSpaceCast:
3104  // TODO: Target informations may give a more accurate answer here.
3105  return false;
3106  case Instruction::BitCast:
3107  return true; // BitCast never modifies bits.
3108  case Instruction::PtrToInt:
3109  return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3110  DestTy->getScalarSizeInBits();
3111  case Instruction::IntToPtr:
3112  return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3113  SrcTy->getScalarSizeInBits();
3114  }
3115 }
3116 
3117 bool CastInst::isNoopCast(const DataLayout &DL) const {
3118  return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3119 }
3120 
3121 /// This function determines if a pair of casts can be eliminated and what
3122 /// opcode should be used in the elimination. This assumes that there are two
3123 /// instructions like this:
3124 /// * %F = firstOpcode SrcTy %x to MidTy
3125 /// * %S = secondOpcode MidTy %F to DstTy
3126 /// The function returns a resultOpcode so these two casts can be replaced with:
3127 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
3128 /// If no such cast is permitted, the function returns 0.
3130  Instruction::CastOps firstOp, Instruction::CastOps secondOp,
3131  Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3132  Type *DstIntPtrTy) {
3133  // Define the 144 possibilities for these two cast instructions. The values
3134  // in this matrix determine what to do in a given situation and select the
3135  // case in the switch below. The rows correspond to firstOp, the columns
3136  // correspond to secondOp. In looking at the table below, keep in mind
3137  // the following cast properties:
3138  //
3139  // Size Compare Source Destination
3140  // Operator Src ? Size Type Sign Type Sign
3141  // -------- ------------ ------------------- ---------------------
3142  // TRUNC > Integer Any Integral Any
3143  // ZEXT < Integral Unsigned Integer Any
3144  // SEXT < Integral Signed Integer Any
3145  // FPTOUI n/a FloatPt n/a Integral Unsigned
3146  // FPTOSI n/a FloatPt n/a Integral Signed
3147  // UITOFP n/a Integral Unsigned FloatPt n/a
3148  // SITOFP n/a Integral Signed FloatPt n/a
3149  // FPTRUNC > FloatPt n/a FloatPt n/a
3150  // FPEXT < FloatPt n/a FloatPt n/a
3151  // PTRTOINT n/a Pointer n/a Integral Unsigned
3152  // INTTOPTR n/a Integral Unsigned Pointer n/a
3153  // BITCAST = FirstClass n/a FirstClass n/a
3154  // ADDRSPCST n/a Pointer n/a Pointer n/a
3155  //
3156  // NOTE: some transforms are safe, but we consider them to be non-profitable.
3157  // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3158  // into "fptoui double to i64", but this loses information about the range
3159  // of the produced value (we no longer know the top-part is all zeros).
3160  // Further this conversion is often much more expensive for typical hardware,
3161  // and causes issues when building libgcc. We disallow fptosi+sext for the
3162  // same reason.
3163  const unsigned numCastOps =
3164  Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3165  static const uint8_t CastResults[numCastOps][numCastOps] = {
3166  // T F F U S F F P I B A -+
3167  // R Z S P P I I T P 2 N T S |
3168  // U E E 2 2 2 2 R E I T C C +- secondOp
3169  // N X X U S F F N X N 2 V V |
3170  // C T T I I P P C T T P T T -+
3171  { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3172  { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3173  { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3174  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3175  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3176  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3177  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3178  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3179  { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3180  { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3181  { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3182  { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3183  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3184  };
3185 
3186  // TODO: This logic could be encoded into the table above and handled in the
3187  // switch below.
3188  // If either of the casts are a bitcast from scalar to vector, disallow the
3189  // merging. However, any pair of bitcasts are allowed.
3190  bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3191  bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3192  bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3193 
3194  // Check if any of the casts convert scalars <-> vectors.
3195  if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3196  (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3197  if (!AreBothBitcasts)
3198  return 0;
3199 
3200  int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3201  [secondOp-Instruction::CastOpsBegin];
3202  switch (ElimCase) {
3203  case 0:
3204  // Categorically disallowed.
3205  return 0;
3206  case 1:
3207  // Allowed, use first cast's opcode.
3208  return firstOp;
3209  case 2:
3210  // Allowed, use second cast's opcode.
3211  return secondOp;
3212  case 3:
3213  // No-op cast in second op implies firstOp as long as the DestTy
3214  // is integer and we are not converting between a vector and a
3215  // non-vector type.
3216  if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3217  return firstOp;
3218  return 0;
3219  case 4:
3220  // No-op cast in second op implies firstOp as long as the DestTy
3221  // is floating point.
3222  if (DstTy->isFloatingPointTy())
3223  return firstOp;
3224  return 0;
3225  case 5:
3226  // No-op cast in first op implies secondOp as long as the SrcTy
3227  // is an integer.
3228  if (SrcTy->isIntegerTy())
3229  return secondOp;
3230  return 0;
3231  case 6:
3232  // No-op cast in first op implies secondOp as long as the SrcTy
3233  // is a floating point.
3234  if (SrcTy->isFloatingPointTy())
3235  return secondOp;
3236  return 0;
3237  case 7: {
3238  // Disable inttoptr/ptrtoint optimization if enabled.
3239  if (DisableI2pP2iOpt)
3240  return 0;
3241 
3242  // Cannot simplify if address spaces are different!
3243  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3244  return 0;
3245 
3246  unsigned MidSize = MidTy->getScalarSizeInBits();
3247  // We can still fold this without knowing the actual sizes as long we
3248  // know that the intermediate pointer is the largest possible
3249  // pointer size.
3250  // FIXME: Is this always true?
3251  if (MidSize == 64)
3252  return Instruction::BitCast;
3253 
3254  // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3255  if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3256  return 0;
3257  unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3258  if (MidSize >= PtrSize)
3259  return Instruction::BitCast;
3260  return 0;
3261  }
3262  case 8: {
3263  // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3264  // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3265  // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3266  unsigned SrcSize = SrcTy->getScalarSizeInBits();
3267  unsigned DstSize = DstTy->getScalarSizeInBits();
3268  if (SrcTy == DstTy)
3269  return Instruction::BitCast;
3270  if (SrcSize < DstSize)
3271  return firstOp;
3272  if (SrcSize > DstSize)
3273  return secondOp;
3274  return 0;
3275  }
3276  case 9:
3277  // zext, sext -> zext, because sext can't sign extend after zext
3278  return Instruction::ZExt;
3279  case 11: {
3280  // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3281  if (!MidIntPtrTy)
3282  return 0;
3283  unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3284  unsigned SrcSize = SrcTy->getScalarSizeInBits();
3285  unsigned DstSize = DstTy->getScalarSizeInBits();
3286  if (SrcSize <= PtrSize && SrcSize == DstSize)
3287  return Instruction::BitCast;
3288  return 0;
3289  }
3290  case 12:
3291  // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3292  // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3293  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3294  return Instruction::AddrSpaceCast;
3295  return Instruction::BitCast;
3296  case 13:
3297  // FIXME: this state can be merged with (1), but the following assert
3298  // is useful to check the correcteness of the sequence due to semantic
3299  // change of bitcast.
3300  assert(
3301  SrcTy->isPtrOrPtrVectorTy() &&
3302  MidTy->isPtrOrPtrVectorTy() &&
3303  DstTy->isPtrOrPtrVectorTy() &&
3304  SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3305  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3306  "Illegal addrspacecast, bitcast sequence!");
3307  // Allowed, use first cast's opcode
3308  return firstOp;
3309  case 14: {
3310  // bitcast, addrspacecast -> addrspacecast if the element type of
3311  // bitcast's source is the same as that of addrspacecast's destination.
3312  PointerType *SrcPtrTy = cast<PointerType>(SrcTy->getScalarType());
3313  PointerType *DstPtrTy = cast<PointerType>(DstTy->getScalarType());
3314  if (SrcPtrTy->hasSameElementTypeAs(DstPtrTy))
3315  return Instruction::AddrSpaceCast;
3316  return 0;
3317  }
3318  case 15:
3319  // FIXME: this state can be merged with (1), but the following assert
3320  // is useful to check the correcteness of the sequence due to semantic
3321  // change of bitcast.
3322  assert(
3323  SrcTy->isIntOrIntVectorTy() &&
3324  MidTy->isPtrOrPtrVectorTy() &&
3325  DstTy->isPtrOrPtrVectorTy() &&
3326  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3327  "Illegal inttoptr, bitcast sequence!");
3328  // Allowed, use first cast's opcode
3329  return firstOp;
3330  case 16:
3331  // FIXME: this state can be merged with (2), but the following assert
3332  // is useful to check the correcteness of the sequence due to semantic
3333  // change of bitcast.
3334  assert(
3335  SrcTy->isPtrOrPtrVectorTy() &&
3336  MidTy->isPtrOrPtrVectorTy() &&
3337  DstTy->isIntOrIntVectorTy() &&
3338  SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3339  "Illegal bitcast, ptrtoint sequence!");
3340  // Allowed, use second cast's opcode
3341  return secondOp;
3342  case 17:
3343  // (sitofp (zext x)) -> (uitofp x)
3344  return Instruction::UIToFP;
3345  case 99:
3346  // Cast combination can't happen (error in input). This is for all cases
3347  // where the MidTy is not the same for the two cast instructions.
3348  llvm_unreachable("Invalid Cast Combination");
3349  default:
3350  llvm_unreachable("Error in CastResults table!!!");
3351  }
3352 }
3353 
3355  const Twine &Name, Instruction *InsertBefore) {
3356  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3357  // Construct and return the appropriate CastInst subclass
3358  switch (op) {
3359  case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3360  case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3361  case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3362  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3363  case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3364  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3365  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3366  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3367  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3368  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3369  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3370  case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3371  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3372  default: llvm_unreachable("Invalid opcode provided");
3373  }
3374 }
3375 
3377  const Twine &Name, BasicBlock *InsertAtEnd) {
3378  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3379  // Construct and return the appropriate CastInst subclass
3380  switch (op) {
3381  case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3382  case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3383  case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3384  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3385  case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3386  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3387  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3388  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3389  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3390  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3391  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3392  case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3393  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3394  default: llvm_unreachable("Invalid opcode provided");
3395  }
3396 }
3397 
3399  const Twine &Name,
3400  Instruction *InsertBefore) {
3401  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3402  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3403  return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3404 }
3405 
3407  const Twine &Name,
3408  BasicBlock *InsertAtEnd) {
3409  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3410  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3411  return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3412 }
3413 
3415  const Twine &Name,
3416  Instruction *InsertBefore) {
3417  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3418  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3419  return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3420 }
3421 
3423  const Twine &Name,
3424  BasicBlock *InsertAtEnd) {
3425  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3426  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3427  return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3428 }
3429 
3431  const Twine &Name,
3432  Instruction *InsertBefore) {
3433  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3434  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3435  return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3436 }
3437 
3439  const Twine &Name,
3440  BasicBlock *InsertAtEnd) {
3441  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3442  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3443  return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3444 }
3445 
3447  const Twine &Name,
3448  BasicBlock *InsertAtEnd) {
3449  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3450  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3451  "Invalid cast");
3452  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3453  assert((!Ty->isVectorTy() ||
3454  cast<VectorType>(Ty)->getElementCount() ==
3455  cast<VectorType>(S->getType())->getElementCount()) &&
3456  "Invalid cast");
3457 
3458  if (Ty->isIntOrIntVectorTy())
3459  return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3460 
3461  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3462 }
3463 
3464 /// Create a BitCast or a PtrToInt cast instruction
3466  const Twine &Name,
3467  Instruction *InsertBefore) {
3468  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3469  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3470  "Invalid cast");
3471  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3472  assert((!Ty->isVectorTy() ||
3473  cast<VectorType>(Ty)->getElementCount() ==
3474  cast<VectorType>(S->getType())->getElementCount()) &&
3475  "Invalid cast");
3476 
3477  if (Ty->isIntOrIntVectorTy())
3478  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3479 
3480  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3481 }
3482 
3484  Value *S, Type *Ty,
3485  const Twine &Name,
3486  BasicBlock *InsertAtEnd) {
3487  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3488  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3489 
3490  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3491  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3492 
3493  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3494 }
3495 
3497  Value *S, Type *Ty,
3498  const Twine &Name,
3499  Instruction *InsertBefore) {
3500  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3501  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3502 
3503  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3504  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3505 
3506  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3507 }
3508 
3510  const Twine &Name,
3511  Instruction *InsertBefore) {
3512  if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3513  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3514  if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3515  return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3516 
3517  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3518 }
3519 
3521  bool isSigned, const Twine &Name,
3522  Instruction *InsertBefore) {
3523  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3524  "Invalid integer cast");
3525  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3526  unsigned DstBits = Ty->getScalarSizeInBits();
3527  Instruction::CastOps opcode =
3528  (SrcBits == DstBits ? Instruction::BitCast :
3529  (SrcBits > DstBits ? Instruction::Trunc :
3530  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3531  return Create(opcode, C, Ty, Name, InsertBefore);
3532 }
3533 
3535  bool isSigned, const Twine &Name,
3536  BasicBlock *InsertAtEnd) {
3537  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3538  "Invalid cast");
3539  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3540  unsigned DstBits = Ty->getScalarSizeInBits();
3541  Instruction::CastOps opcode =
3542  (SrcBits == DstBits ? Instruction::BitCast :
3543  (SrcBits > DstBits ? Instruction::Trunc :
3544  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3545  return Create(opcode, C, Ty, Name, InsertAtEnd);
3546 }
3547 
3549  const Twine &Name,
3550  Instruction *InsertBefore) {
3551  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3552  "Invalid cast");
3553  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3554  unsigned DstBits = Ty->getScalarSizeInBits();
3555  Instruction::CastOps opcode =
3556  (SrcBits == DstBits ? Instruction::BitCast :
3557  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3558  return Create(opcode, C, Ty, Name, InsertBefore);
3559 }
3560 
3562  const Twine &Name,
3563  BasicBlock *InsertAtEnd) {
3564  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3565  "Invalid cast");
3566  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3567  unsigned DstBits = Ty->getScalarSizeInBits();
3568  Instruction::CastOps opcode =
3569  (SrcBits == DstBits ? Instruction::BitCast :
3570  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3571  return Create(opcode, C, Ty, Name, InsertAtEnd);
3572 }
3573 
3574 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3575  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3576  return false;
3577 
3578  if (SrcTy == DestTy)
3579  return true;
3580 
3581  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3582  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3583  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3584  // An element by element cast. Valid if casting the elements is valid.
3585  SrcTy = SrcVecTy->getElementType();
3586  DestTy = DestVecTy->getElementType();
3587  }
3588  }
3589  }
3590 
3591  if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3592  if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3593  return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3594  }
3595  }
3596 
3597  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3598  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3599 
3600  // Could still have vectors of pointers if the number of elements doesn't
3601  // match
3602  if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3603  return false;
3604 
3605  if (SrcBits != DestBits)
3606  return false;
3607 
3608  if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3609  return false;
3610 
3611  return true;
3612 }
3613 
3615  const DataLayout &DL) {
3616  // ptrtoint and inttoptr are not allowed on non-integral pointers
3617  if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3618  if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3619  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3620  !DL.isNonIntegralPointerType(PtrTy));
3621  if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3622  if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3623  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3624  !DL.isNonIntegralPointerType(PtrTy));
3625 
3626  return isBitCastable(SrcTy, DestTy);
3627 }
3628 
3629 // Provide a way to get a "cast" where the cast opcode is inferred from the
3630 // types and size of the operand. This, basically, is a parallel of the
3631 // logic in the castIsValid function below. This axiom should hold:
3632 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3633 // should not assert in castIsValid. In other words, this produces a "correct"
3634 // casting opcode for the arguments passed to it.
3637  const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3638  Type *SrcTy = Src->getType();
3639 
3640  assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3641  "Only first class types are castable!");
3642 
3643  if (SrcTy == DestTy)
3644  return BitCast;
3645 
3646  // FIXME: Check address space sizes here
3647  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3648  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3649  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3650  // An element by element cast. Find the appropriate opcode based on the
3651  // element types.
3652  SrcTy = SrcVecTy->getElementType();
3653  DestTy = DestVecTy->getElementType();
3654  }
3655 
3656  // Get the bit sizes, we'll need these
3657  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3658  unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3659 
3660  // Run through the possibilities ...
3661  if (DestTy->isIntegerTy()) { // Casting to integral
3662  if (SrcTy->isIntegerTy()) { // Casting from integral
3663  if (DestBits < SrcBits)
3664  return Trunc; // int -> smaller int
3665  else if (DestBits > SrcBits) { // its an extension
3666  if (SrcIsSigned)
3667  return SExt; // signed -> SEXT
3668  else
3669  return ZExt; // unsigned -> ZEXT
3670  } else {
3671  return BitCast; // Same size, No-op cast
3672  }
3673  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3674  if (DestIsSigned)
3675  return FPToSI; // FP -> sint
3676  else
3677  return FPToUI; // FP -> uint
3678  } else if (SrcTy->isVectorTy()) {
3679  assert(DestBits == SrcBits &&
3680  "Casting vector to integer of different width");
3681  return BitCast; // Same size, no-op cast
3682  } else {
3683  assert(SrcTy->isPointerTy() &&
3684  "Casting from a value that is not first-class type");
3685  return PtrToInt; // ptr -> int
3686  }
3687  } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3688  if (SrcTy->isIntegerTy()) { // Casting from integral
3689  if (SrcIsSigned)
3690  return SIToFP; // sint -> FP
3691  else
3692  return UIToFP; // uint -> FP
3693  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3694  if (DestBits < SrcBits) {
3695  return FPTrunc; // FP -> smaller FP
3696  } else if (DestBits > SrcBits) {
3697  return FPExt; // FP -> larger FP
3698  } else {
3699  return BitCast; // same size, no-op cast
3700  }
3701  } else if (SrcTy->isVectorTy()) {
3702  assert(DestBits == SrcBits &&
3703  "Casting vector to floating point of different width");
3704  return BitCast; // same size, no-op cast
3705  }
3706  llvm_unreachable("Casting pointer or non-first class to float");
3707  } else if (DestTy->isVectorTy()) {
3708  assert(DestBits == SrcBits &&
3709  "Illegal cast to vector (wrong type or size)");
3710  return BitCast;
3711  } else if (DestTy->isPointerTy()) {
3712  if (SrcTy->isPointerTy()) {
3713  if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3714  return AddrSpaceCast;
3715  return BitCast; // ptr -> ptr
3716  } else if (SrcTy->isIntegerTy()) {
3717  return IntToPtr; // int -> ptr
3718  }
3719  llvm_unreachable("Casting pointer to other than pointer or int");
3720  } else if (DestTy->isX86_MMXTy()) {
3721  if (SrcTy->isVectorTy()) {
3722  assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3723  return BitCast; // 64-bit vector to MMX
3724  }
3725  llvm_unreachable("Illegal cast to X86_MMX");
3726  }
3727  llvm_unreachable("Casting to type that is not first-class");
3728 }
3729 
3730 //===----------------------------------------------------------------------===//
3731 // CastInst SubClass Constructors
3732 //===----------------------------------------------------------------------===//
3733 
3734 /// Check that the construction parameters for a CastInst are correct. This
3735 /// could be broken out into the separate constructors but it is useful to have
3736 /// it in one place and to eliminate the redundant code for getting the sizes
3737 /// of the types involved.
3738 bool
3740  if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3741  SrcTy->isAggregateType() || DstTy->isAggregateType())
3742  return false;
3743 
3744  // Get the size of the types in bits, and whether we are dealing
3745  // with vector types, we'll need this later.
3746  bool SrcIsVec = isa<VectorType>(SrcTy);
3747  bool DstIsVec = isa<VectorType>(DstTy);
3748  unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3749  unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3750 
3751  // If these are vector types, get the lengths of the vectors (using zero for
3752  // scalar types means that checking that vector lengths match also checks that
3753  // scalars are not being converted to vectors or vectors to scalars).
3754  ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3756  ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3758 
3759  // Switch on the opcode provided
3760  switch (op) {
3761  default: return false; // This is an input error
3762  case Instruction::Trunc:
3763  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3764  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3765  case Instruction::ZExt:
3766  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3767  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3768  case Instruction::SExt:
3769  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3770  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3771  case Instruction::FPTrunc:
3772  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3773  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3774  case Instruction::FPExt:
3775  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3776  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3777  case Instruction::UIToFP:
3778  case Instruction::SIToFP:
3779  return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3780  SrcEC == DstEC;
3781  case Instruction::FPToUI:
3782  case Instruction::FPToSI:
3783  return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3784  SrcEC == DstEC;
3785  case Instruction::PtrToInt:
3786  if (SrcEC != DstEC)
3787  return false;
3788  return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3789  case Instruction::IntToPtr:
3790  if (SrcEC != DstEC)
3791  return false;
3792  return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3793  case Instruction::BitCast: {
3794  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3795  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3796 
3797  // BitCast implies a no-op cast of type only. No bits change.
3798  // However, you can't cast pointers to anything but pointers.
3799  if (!SrcPtrTy != !DstPtrTy)
3800  return false;
3801 
3802  // For non-pointer cases, the cast is okay if the source and destination bit
3803  // widths are identical.
3804  if (!SrcPtrTy)
3805  return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3806 
3807  // If both are pointers then the address spaces must match.
3808  if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3809  return false;
3810 
3811  // A vector of pointers must have the same number of elements.
3812  if (SrcIsVec && DstIsVec)
3813  return SrcEC == DstEC;
3814  if (SrcIsVec)
3815  return SrcEC == ElementCount::getFixed(1);
3816  if (DstIsVec)
3817  return DstEC == ElementCount::getFixed(1);
3818 
3819  return true;
3820  }
3821  case Instruction::AddrSpaceCast: {
3822  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3823  if (!SrcPtrTy)
3824  return false;
3825 
3826  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3827  if (!DstPtrTy)
3828  return false;
3829 
3830  if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3831  return false;
3832 
3833  return SrcEC == DstEC;
3834  }
3835  }
3836 }
3837 
3839  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3840 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3841  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3842 }
3843 
3845  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3846 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3847  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3848 }
3849 
3851  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3852 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3853  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3854 }
3855 
3857  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3858 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3859  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3860 }
3862  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3863 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3864  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3865 }
3866 
3868  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3869 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3870  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3871 }
3872 
3874  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3875 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3876  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3877 }
3878 
3880  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3881 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3882  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3883 }
3884 
3886  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3887 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3888  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3889 }
3890 
3892  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3893 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3894  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3895 }
3896 
3898  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3899 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3900  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3901 }
3902 
3904  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3905 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3906  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3907 }
3908 
3910  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3911 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3912  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3913 }
3914 
3916  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3917 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3918  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3919 }
3920 
3922  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3923 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3924  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3925 }
3926 
3928  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3929 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3930  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3931 }
3932 
3934  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3935 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3936  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3937 }
3938 
3940  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3941 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3942  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3943 }
3944 
3946  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3947 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3948  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3949 }
3950 
3952  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3953 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3954  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3955 }
3956 
3958  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3959 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3960  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3961 }
3962 
3964  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3965 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3966  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3967 }
3968 
3970  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3971 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3972  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3973 }
3974 
3976  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3977 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3978  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3979 }
3980 
3982  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3983 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3984  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3985 }
3986 
3988  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3989 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3990  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3991 }
3992 
3993 //===----------------------------------------------------------------------===//
3994 // CmpInst Classes
3995 //===----------------------------------------------------------------------===//
3996 
3998  Value *RHS, const Twine &Name, Instruction *InsertBefore,
3999  Instruction *FlagsSource)
4000  : Instruction(ty, op,
4001  OperandTraits<CmpInst>::op_begin(this),
4002  OperandTraits<CmpInst>::operands(this),
4003  InsertBefore) {
4004  Op<0>() = LHS;
4005  Op<1>() = RHS;
4006  setPredicate((Predicate)predicate);
4007  setName(Name);
4008  if (FlagsSource)
4009  copyIRFlags(FlagsSource);
4010 }
4011 
4013  Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4014  : Instruction(ty, op,
4015  OperandTraits<CmpInst>::op_begin(this),
4016  OperandTraits<CmpInst>::operands(this),
4017  InsertAtEnd) {
4018  Op<0>() = LHS;
4019  Op<1>() = RHS;
4020  setPredicate((Predicate)predicate);
4021  setName(Name);
4022 }
4023 
4024 CmpInst *
4026  const Twine &Name, Instruction *InsertBefore) {
4027  if (Op == Instruction::ICmp) {
4028  if (InsertBefore)
4029  return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4030  S1, S2, Name);
4031  else
4032  return new ICmpInst(CmpInst::Predicate(predicate),
4033  S1, S2, Name);
4034  }
4035 
4036  if (InsertBefore)
4037  return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4038  S1, S2, Name);
4039  else
4040  return new FCmpInst(CmpInst::Predicate(predicate),
4041  S1, S2, Name);
4042 }
4043 
4044 CmpInst *
4046  const Twine &Name, BasicBlock *InsertAtEnd) {
4047  if (Op == Instruction::ICmp) {
4048  return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4049  S1, S2, Name);
4050  }
4051  return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4052  S1, S2, Name);
4053 }
4054 
4056  if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4057  IC->swapOperands();
4058  else
4059  cast<FCmpInst>(this)->swapOperands();
4060 }
4061 
4063  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4064  return IC->isCommutative();
4065  return cast<FCmpInst>(this)->isCommutative();
4066 }
4067 
4070  return ICmpInst::isEquality(P);
4072  return FCmpInst::isEquality(P);
4073  llvm_unreachable("Unsupported predicate kind");
4074 }
4075 
4077  switch (pred) {
4078  default: llvm_unreachable("Unknown cmp predicate!");
4079  case ICMP_EQ: return ICMP_NE;
4080  case ICMP_NE: return ICMP_EQ;
4081  case ICMP_UGT: return ICMP_ULE;
4082  case ICMP_ULT: return ICMP_UGE;
4083  case ICMP_UGE: return ICMP_ULT;
4084  case ICMP_ULE: return ICMP_UGT;
4085  case ICMP_SGT: return ICMP_SLE;
4086  case ICMP_SLT: return ICMP_SGE;
4087  case ICMP_SGE: return ICMP_SLT;
4088  case ICMP_SLE: return ICMP_SGT;
4089 
4090  case FCMP_OEQ: return FCMP_UNE;
4091  case FCMP_ONE: return FCMP_UEQ;
4092  case FCMP_OGT: return FCMP_ULE;
4093  case FCMP_OLT: return FCMP_UGE;
4094  case FCMP_OGE: return FCMP_ULT;
4095  case FCMP_OLE: return FCMP_UGT;
4096  case FCMP_UEQ: return FCMP_ONE;
4097  case FCMP_UNE: return FCMP_OEQ;
4098  case FCMP_UGT: return FCMP_OLE;
4099  case FCMP_ULT: return FCMP_OGE;
4100  case FCMP_UGE: return FCMP_OLT;
4101  case FCMP_ULE: return FCMP_OGT;
4102  case FCMP_ORD: return FCMP_UNO;
4103  case FCMP_UNO: return FCMP_ORD;
4104  case FCMP_TRUE: return FCMP_FALSE;
4105  case FCMP_FALSE: return FCMP_TRUE;
4106  }
4107 }
4108 
4110  switch (Pred) {
4111  default: return "unknown";
4112  case FCmpInst::FCMP_FALSE: return "false";
4113  case FCmpInst::FCMP_OEQ: return "oeq";
4114  case FCmpInst::FCMP_OGT: return "ogt";
4115  case FCmpInst::FCMP_OGE: return "oge";
4116  case FCmpInst::FCMP_OLT: return "olt";
4117  case FCmpInst::FCMP_OLE: return "ole";
4118  case FCmpInst::FCMP_ONE: return "one";
4119  case FCmpInst::FCMP_ORD: return "ord";
4120  case FCmpInst::FCMP_UNO: return "uno";
4121  case FCmpInst::FCMP_UEQ: return "ueq";
4122  case FCmpInst::FCMP_UGT: return "ugt";
4123  case FCmpInst::FCMP_UGE: return "uge";
4124  case FCmpInst::FCMP_ULT: return "ult";
4125  case FCmpInst::FCMP_ULE: return "ule";
4126  case FCmpInst::FCMP_UNE: return "une";
4127  case FCmpInst::FCMP_TRUE: return "true";
4128  case ICmpInst::ICMP_EQ: return "eq";
4129  case ICmpInst::ICMP_NE: return "ne";
4130  case ICmpInst::ICMP_SGT: return "sgt";
4131  case ICmpInst::ICMP_SGE: return "sge";
4132  case ICmpInst::ICMP_SLT: return "slt";
4133  case ICmpInst::ICMP_SLE: return "sle";
4134  case ICmpInst::ICMP_UGT: return "ugt";
4135  case ICmpInst::ICMP_UGE: return "uge";
4136  case ICmpInst::ICMP_ULT: return "ult";
4137  case ICmpInst::ICMP_ULE: return "ule";
4138  }
4139 }
4140 
4142  switch (pred) {
4143  default: llvm_unreachable("Unknown icmp predicate!");
4144  case ICMP_EQ: case ICMP_NE:
4145  case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4146  return pred;
4147  case ICMP_UGT: return ICMP_SGT;
4148  case ICMP_ULT: return ICMP_SLT;
4149  case ICMP_UGE: return ICMP_SGE;
4150  case ICMP_ULE: return ICMP_SLE;
4151  }
4152 }
4153 
4155  switch (pred) {
4156  default: llvm_unreachable("Unknown icmp predicate!");
4157  case ICMP_EQ: case ICMP_NE:
4158  case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4159  return pred;
4160  case ICMP_SGT: return ICMP_UGT;
4161  case ICMP_SLT: return ICMP_ULT;
4162  case ICMP_SGE: return ICMP_UGE;
4163  case ICMP_SLE: return ICMP_ULE;
4164  }
4165 }
4166 
4168  switch (pred) {
4169  default: llvm_unreachable("Unknown cmp predicate!");
4170  case ICMP_EQ: case ICMP_NE:
4171  return pred;
4172  case ICMP_SGT: return ICMP_SLT;
4173  case ICMP_SLT: return ICMP_SGT;
4174  case ICMP_SGE: return ICMP_SLE;
4175  case ICMP_SLE: return ICMP_SGE;
4176  case ICMP_UGT: return ICMP_ULT;
4177  case ICMP_ULT: return ICMP_UGT;
4178  case ICMP_UGE: return ICMP_ULE;
4179  case ICMP_ULE: return ICMP_UGE;
4180 
4181  case FCMP_FALSE: case FCMP_TRUE:
4182  case FCMP_OEQ: case FCMP_ONE:
4183  case FCMP_UEQ: case FCMP_UNE:
4184  case FCMP_ORD: case FCMP_UNO:
4185  return pred;
4186  case FCMP_OGT: return FCMP_OLT;
4187  case FCMP_OLT: return FCMP_OGT;
4188  case FCMP_OGE: return FCMP_OLE;
4189  case FCMP_OLE: return FCMP_OGE;
4190  case FCMP_UGT: return FCMP_ULT;
4191  case FCMP_ULT: return FCMP_UGT;
4192  case FCMP_UGE: return FCMP_ULE;
4193  case FCMP_ULE: return FCMP_UGE;
4194  }
4195 }
4196 
4198  switch (pred) {
4199  case ICMP_SGE:
4200  case ICMP_SLE:
4201  case ICMP_UGE:
4202  case ICMP_ULE:
4203  case FCMP_OGE:
4204  case FCMP_OLE:
4205  case FCMP_UGE:
4206  case FCMP_ULE:
4207  return true;
4208  default:
4209  return false;
4210  }
4211 }
4212 
4214  switch (pred) {
4215  case ICMP_SGT:
4216  case ICMP_SLT:
4217  case ICMP_UGT:
4218  case ICMP_ULT:
4219  case FCMP_OGT:
4220  case FCMP_OLT:
4221  case FCMP_UGT:
4222  case FCMP_ULT:
4223  return true;
4224  default:
4225  return false;
4226  }
4227 }
4228 
4230  switch (pred) {
4231  case ICMP_SGE:
4232  return ICMP_SGT;
4233  case ICMP_SLE:
4234  return ICMP_SLT;
4235  case ICMP_UGE:
4236  return ICMP_UGT;
4237  case ICMP_ULE:
4238  return ICMP_ULT;
4239  case FCMP_OGE:
4240  return FCMP_OGT;
4241  case FCMP_OLE:
4242  return FCMP_OLT;
4243  case FCMP_UGE:
4244  return FCMP_UGT;
4245  case FCMP_ULE:
4246  return FCMP_ULT;
4247  default:
4248  return pred;
4249  }
4250 }
4251 
4253  switch (pred) {
4254  case ICMP_SGT:
4255  return ICMP_SGE;
4256  case ICMP_SLT:
4257  return ICMP_SLE;
4258  case ICMP_UGT:
4259  return ICMP_UGE;
4260  case ICMP_ULT:
4261  return ICMP_ULE;
4262  case FCMP_OGT:
4263  return FCMP_OGE;
4264  case FCMP_OLT:
4265  return FCMP_OLE;
4266  case FCMP_UGT:
4267  return FCMP_UGE;
4268  case FCMP_ULT:
4269  return FCMP_ULE;
4270  default:
4271  return pred;
4272  }
4273 }
4274 
4276  assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4277 
4278  if (isStrictPredicate(pred))
4279  return getNonStrictPredicate(pred);
4281  return getStrictPredicate(pred);
4282 
4283  llvm_unreachable("Unknown predicate!");
4284 }
4285 
4287  assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4288 
4289  switch (pred) {
4290  default:
4291  llvm_unreachable("Unknown predicate!");
4292  case CmpInst::ICMP_ULT:
4293  return CmpInst::ICMP_SLT;
4294  case CmpInst::ICMP_ULE:
4295  return CmpInst::ICMP_SLE;
4296  case CmpInst::ICMP_UGT:
4297  return CmpInst::ICMP_SGT;
4298  case CmpInst::ICMP_UGE:
4299  return CmpInst::ICMP_SGE;
4300  }
4301 }
4302 
4304  assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4305 
4306  switch (pred) {
4307  default:
4308  llvm_unreachable("Unknown predicate!");
4309  case CmpInst::ICMP_SLT:
4310  return CmpInst::ICMP_ULT;
4311  case CmpInst::ICMP_SLE:
4312  return CmpInst::ICMP_ULE;
4313  case CmpInst::ICMP_SGT:
4314  return CmpInst::ICMP_UGT;
4315  case CmpInst::ICMP_SGE:
4316  return CmpInst::ICMP_UGE;
4317  }
4318 }
4319 
4321  switch (predicate) {
4322  default: return false;
4324  case ICmpInst::ICMP_UGE: return true;
4325  }
4326 }
4327 
4328 bool CmpInst::isSigned(Predicate predicate) {
4329  switch (predicate) {
4330  default: return false;
4332  case ICmpInst::ICMP_SGE: return true;
4333  }
4334 }
4335 
4336 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4337  ICmpInst::Predicate Pred) {
4338  assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4339  switch (Pred) {
4340  case ICmpInst::Predicate::ICMP_EQ:
4341  return LHS.eq(RHS);
4342  case ICmpInst::Predicate::ICMP_NE:
4343  return LHS.ne(RHS);
4344  case ICmpInst::Predicate::ICMP_UGT:
4345  return LHS.ugt(RHS);
4346  case ICmpInst::Predicate::ICMP_UGE:
4347  return LHS.uge(RHS);
4348  case ICmpInst::Predicate::ICMP_ULT:
4349  return LHS.ult(RHS);
4350  case ICmpInst::Predicate::ICMP_ULE:
4351  return LHS.ule(RHS);
4352  case ICmpInst::Predicate::ICMP_SGT:
4353  return LHS.sgt(RHS);
4354  case ICmpInst::Predicate::ICMP_SGE:
4355  return LHS.sge(RHS);
4356  case ICmpInst::Predicate::ICMP_SLT:
4357  return LHS.slt(RHS);
4358  case ICmpInst::Predicate::ICMP_SLE:
4359  return LHS.sle(RHS);
4360  default:
4361  llvm_unreachable("Unexpected non-integer predicate.");
4362  };
4363 }
4364 
4366  FCmpInst::Predicate Pred) {
4367  APFloat::cmpResult R = LHS.compare(RHS);
4368  switch (Pred) {
4369  default:
4370  llvm_unreachable("Invalid FCmp Predicate");
4371  case FCmpInst::FCMP_FALSE:
4372  return false;
4373  case FCmpInst::FCMP_TRUE:
4374  return true;
4375  case FCmpInst::FCMP_UNO:
4376  return R == APFloat::cmpUnordered;
4377  case FCmpInst::FCMP_ORD:
4378  return R != APFloat::cmpUnordered;
4379  case FCmpInst::FCMP_UEQ:
4380  return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4381  case FCmpInst::FCMP_OEQ:
4382  return R == APFloat::cmpEqual;
4383  case FCmpInst::FCMP_UNE:
4384  return R != APFloat::cmpEqual;
4385  case FCmpInst::FCMP_ONE:
4386  return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
4387  case FCmpInst::FCMP_ULT:
4388  return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4389  case FCmpInst::FCMP_OLT:
4390  return R == APFloat::cmpLessThan;
4391  case FCmpInst::FCMP_UGT:
4392  return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
4393  case FCmpInst::FCMP_OGT:
4394  return R == APFloat::cmpGreaterThan;
4395  case FCmpInst::FCMP_ULE:
4396  return R != APFloat::cmpGreaterThan;
4397  case FCmpInst::FCMP_OLE:
4398  return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4399  case FCmpInst::FCMP_UGE:
4400  return R != APFloat::cmpLessThan;
4401  case FCmpInst::FCMP_OGE:
4402  return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4403  }
4404 }
4405 
4408  "Call only with non-equality predicates!");
4409 
4410  if (isSigned(pred))
4411  return getUnsignedPredicate(pred);
4412  if (isUnsigned(pred))
4413  return getSignedPredicate(pred);
4414 
4415  llvm_unreachable("Unknown predicate!");
4416 }
4417 
4419  switch (predicate) {
4420  default: return false;
4423  case FCmpInst::FCMP_ORD: return true;
4424  }
4425 }
4426 
4428  switch (predicate) {
4429  default: return false;
4432  case FCmpInst::FCMP_UNO: return true;
4433  }
4434 }
4435 
4437  switch(predicate) {
4438  default: return false;
4439  case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4440  case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4441  }
4442 }
4443 
4445  switch(predicate) {
4446  case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
4447  case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
4448  default: return false;
4449  }
4450 }
4451 
4453  // If the predicates match, then we know the first condition implies the
4454  // second is true.
4455  if (Pred1 == Pred2)
4456  return true;
4457 
4458  switch (Pred1) {
4459  default:
4460  break;
4461  case ICMP_EQ:
4462  // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4463  return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
4464  Pred2 == ICMP_SLE;
4465  case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
4466  return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
4467  case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
4468  return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
4469  case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4470  return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
4471  case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4472  return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
4473  }
4474  return false;
4475 }
4476 
4478  return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
4479 }
4480 
4481 //===----------------------------------------------------------------------===//
4482 // SwitchInst Implementation
4483 //===----------------------------------------------------------------------===//
4484 
4485 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4486  assert(Value && Default && NumReserved);
4487  ReservedSpace = NumReserved;
4489  allocHungoffUses(ReservedSpace);
4490 
4491  Op<0>() = Value;
4492  Op<1>() = Default;
4493 }
4494 
4495 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4496 /// switch on and a default destination. The number of additional cases can
4497 /// be specified here to make memory allocation more efficient. This
4498 /// constructor can also autoinsert before another instruction.
4499 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4500  Instruction *InsertBefore)
4501  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4502  nullptr, 0, InsertBefore) {
4503  init(Value, Default, 2+NumCases*2);
4504 }
4505 
4506 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4507 /// switch on and a default destination. The number of additional cases can
4508 /// be specified here to make memory allocation more efficient. This
4509 /// constructor also autoinserts at the end of the specified BasicBlock.
4510 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4511  BasicBlock *InsertAtEnd)
4512  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4513  nullptr, 0, InsertAtEnd) {
4514&#