LLVM  10.0.0svn
Instructions.cpp
Go to the documentation of this file.
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/Constant.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
38 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/TypeSize.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <vector>
46 
47 using namespace llvm;
48 
49 //===----------------------------------------------------------------------===//
50 // AllocaInst Class
51 //===----------------------------------------------------------------------===//
52 
56  if (isArrayAllocation()) {
58  if (!C)
59  return None;
60  Size *= C->getZExtValue();
61  }
62  return Size;
63 }
64 
65 //===----------------------------------------------------------------------===//
66 // CallSite Class
67 //===----------------------------------------------------------------------===//
68 
69 User::op_iterator CallSite::getCallee() const {
70  return cast<CallBase>(getInstruction())->op_end() - 1;
71 }
72 
73 //===----------------------------------------------------------------------===//
74 // SelectInst Class
75 //===----------------------------------------------------------------------===//
76 
77 /// areInvalidOperands - Return a string if the specified operands are invalid
78 /// for a select operation, otherwise return null.
79 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
80  if (Op1->getType() != Op2->getType())
81  return "both values to select must have same type";
82 
83  if (Op1->getType()->isTokenTy())
84  return "select values cannot have token type";
85 
86  if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
87  // Vector select.
88  if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
89  return "vector select condition element type must be i1";
90  VectorType *ET = dyn_cast<VectorType>(Op1->getType());
91  if (!ET)
92  return "selected values for vector select must be vectors";
93  if (ET->getNumElements() != VT->getNumElements())
94  return "vector select requires selected vectors to have "
95  "the same vector length as select condition";
96  } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
97  return "select condition must be i1 or <n x i1>";
98  }
99  return nullptr;
100 }
101 
102 //===----------------------------------------------------------------------===//
103 // PHINode Class
104 //===----------------------------------------------------------------------===//
105 
106 PHINode::PHINode(const PHINode &PN)
107  : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
108  ReservedSpace(PN.getNumOperands()) {
110  std::copy(PN.op_begin(), PN.op_end(), op_begin());
111  std::copy(PN.block_begin(), PN.block_end(), block_begin());
113 }
114 
115 // removeIncomingValue - Remove an incoming value. This is useful if a
116 // predecessor basic block is deleted.
117 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
118  Value *Removed = getIncomingValue(Idx);
119 
120  // Move everything after this operand down.
121  //
122  // FIXME: we could just swap with the end of the list, then erase. However,
123  // clients might not expect this to happen. The code as it is thrashes the
124  // use/def lists, which is kinda lame.
125  std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
126  std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
127 
128  // Nuke the last value.
129  Op<-1>().set(nullptr);
131 
132  // If the PHI node is dead, because it has zero entries, nuke it now.
133  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
134  // If anyone is using this PHI, make them use a dummy value instead...
136  eraseFromParent();
137  }
138  return Removed;
139 }
140 
141 /// growOperands - grow operands - This grows the operand list in response
142 /// to a push_back style of operation. This grows the number of ops by 1.5
143 /// times.
144 ///
145 void PHINode::growOperands() {
146  unsigned e = getNumOperands();
147  unsigned NumOps = e + e / 2;
148  if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
149 
150  ReservedSpace = NumOps;
151  growHungoffUses(ReservedSpace, /* IsPhi */ true);
152 }
153 
154 /// hasConstantValue - If the specified PHI node always merges together the same
155 /// value, return the value, otherwise return null.
157  // Exploit the fact that phi nodes always have at least one entry.
158  Value *ConstantValue = getIncomingValue(0);
159  for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
160  if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
161  if (ConstantValue != this)
162  return nullptr; // Incoming values not all the same.
163  // The case where the first value is this PHI.
164  ConstantValue = getIncomingValue(i);
165  }
166  if (ConstantValue == this)
167  return UndefValue::get(getType());
168  return ConstantValue;
169 }
170 
171 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
172 /// together the same value, assuming that undefs result in the same value as
173 /// non-undefs.
174 /// Unlike \ref hasConstantValue, this does not return a value because the
175 /// unique non-undef incoming value need not dominate the PHI node.
177  Value *ConstantValue = nullptr;
178  for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
179  Value *Incoming = getIncomingValue(i);
180  if (Incoming != this && !isa<UndefValue>(Incoming)) {
181  if (ConstantValue && ConstantValue != Incoming)
182  return false;
183  ConstantValue = Incoming;
184  }
185  }
186  return true;
187 }
188 
189 //===----------------------------------------------------------------------===//
190 // LandingPadInst Implementation
191 //===----------------------------------------------------------------------===//
192 
193 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
194  const Twine &NameStr, Instruction *InsertBefore)
195  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
196  init(NumReservedValues, NameStr);
197 }
198 
199 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
200  const Twine &NameStr, BasicBlock *InsertAtEnd)
201  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
202  init(NumReservedValues, NameStr);
203 }
204 
205 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
206  : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
207  LP.getNumOperands()),
208  ReservedSpace(LP.getNumOperands()) {
210  Use *OL = getOperandList();
211  const Use *InOL = LP.getOperandList();
212  for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
213  OL[I] = InOL[I];
214 
215  setCleanup(LP.isCleanup());
216 }
217 
218 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
219  const Twine &NameStr,
220  Instruction *InsertBefore) {
221  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
222 }
223 
224 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
225  const Twine &NameStr,
226  BasicBlock *InsertAtEnd) {
227  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
228 }
229 
230 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
231  ReservedSpace = NumReservedValues;
233  allocHungoffUses(ReservedSpace);
234  setName(NameStr);
235  setCleanup(false);
236 }
237 
238 /// growOperands - grow operands - This grows the operand list in response to a
239 /// push_back style of operation. This grows the number of ops by 2 times.
240 void LandingPadInst::growOperands(unsigned Size) {
241  unsigned e = getNumOperands();
242  if (ReservedSpace >= e + Size) return;
243  ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
244  growHungoffUses(ReservedSpace);
245 }
246 
248  unsigned OpNo = getNumOperands();
249  growOperands(1);
250  assert(OpNo < ReservedSpace && "Growing didn't work!");
252  getOperandList()[OpNo] = Val;
253 }
254 
255 //===----------------------------------------------------------------------===//
256 // CallBase Implementation
257 //===----------------------------------------------------------------------===//
258 
260 
262  assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
263  return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
264 }
265 
267  const Value *V = getCalledValue();
268  if (isa<Function>(V) || isa<Constant>(V))
269  return false;
270  if (const CallInst *CI = dyn_cast<CallInst>(this))
271  if (CI->isInlineAsm())
272  return false;
273  return true;
274 }
275 
276 /// Tests if this call site must be tail call optimized. Only a CallInst can
277 /// be tail call optimized.
279  if (auto *CI = dyn_cast<CallInst>(this))
280  return CI->isMustTailCall();
281  return false;
282 }
283 
284 /// Tests if this call site is marked as a tail call.
285 bool CallBase::isTailCall() const {
286  if (auto *CI = dyn_cast<CallInst>(this))
287  return CI->isTailCall();
288  return false;
289 }
290 
292  if (auto *F = getCalledFunction())
293  return F->getIntrinsicID();
295 }
296 
298  if (hasRetAttr(Attribute::NonNull))
299  return true;
300 
301  if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
302  !NullPointerIsDefined(getCaller(),
303  getType()->getPointerAddressSpace()))
304  return true;
305 
306  return false;
307 }
308 
310  unsigned Index;
311 
312  if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
313  return getArgOperand(Index - AttributeList::FirstArgIndex);
314  if (const Function *F = getCalledFunction())
315  if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
316  Index)
317  return getArgOperand(Index - AttributeList::FirstArgIndex);
318 
319  return nullptr;
320 }
321 
323  if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
324  return true;
325 
326  // Look at the callee, if available.
327  if (const Function *F = getCalledFunction())
328  return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
329  return false;
330 }
331 
332 /// Determine whether the argument or parameter has the given attribute.
333 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
334  assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
335 
336  if (Attrs.hasParamAttribute(ArgNo, Kind))
337  return true;
338  if (const Function *F = getCalledFunction())
339  return F->getAttributes().hasParamAttribute(ArgNo, Kind);
340  return false;
341 }
342 
343 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
344  if (const Function *F = getCalledFunction())
345  return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
346  return false;
347 }
348 
349 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
350  if (const Function *F = getCalledFunction())
351  return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
352  return false;
353 }
354 
357  const unsigned BeginIndex) {
358  auto It = op_begin() + BeginIndex;
359  for (auto &B : Bundles)
360  It = std::copy(B.input_begin(), B.input_end(), It);
361 
362  auto *ContextImpl = getContext().pImpl;
363  auto BI = Bundles.begin();
364  unsigned CurrentIndex = BeginIndex;
365 
366  for (auto &BOI : bundle_op_infos()) {
367  assert(BI != Bundles.end() && "Incorrect allocation?");
368 
369  BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
370  BOI.Begin = CurrentIndex;
371  BOI.End = CurrentIndex + BI->input_size();
372  CurrentIndex = BOI.End;
373  BI++;
374  }
375 
376  assert(BI == Bundles.end() && "Incorrect allocation?");
377 
378  return It;
379 }
380 
381 //===----------------------------------------------------------------------===//
382 // CallInst Implementation
383 //===----------------------------------------------------------------------===//
384 
385 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
386  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
387  this->FTy = FTy;
388  assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
389  "NumOperands not set up?");
390  setCalledOperand(Func);
391 
392 #ifndef NDEBUG
393  assert((Args.size() == FTy->getNumParams() ||
394  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
395  "Calling a function with bad signature!");
396 
397  for (unsigned i = 0; i != Args.size(); ++i)
398  assert((i >= FTy->getNumParams() ||
399  FTy->getParamType(i) == Args[i]->getType()) &&
400  "Calling a function with a bad signature!");
401 #endif
402 
403  llvm::copy(Args, op_begin());
404 
405  auto It = populateBundleOperandInfos(Bundles, Args.size());
406  (void)It;
407  assert(It + 1 == op_end() && "Should add up!");
408 
409  setName(NameStr);
410 }
411 
412 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
413  this->FTy = FTy;
414  assert(getNumOperands() == 1 && "NumOperands not set up?");
415  setCalledOperand(Func);
416 
417  assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
418 
419  setName(NameStr);
420 }
421 
422 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
423  Instruction *InsertBefore)
425  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
426  init(Ty, Func, Name);
427 }
428 
429 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
430  BasicBlock *InsertAtEnd)
432  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
433  init(Ty, Func, Name);
434 }
435 
436 CallInst::CallInst(const CallInst &CI)
437  : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
439  CI.getNumOperands()) {
440  setTailCallKind(CI.getTailCallKind());
441  setCallingConv(CI.getCallingConv());
442 
443  std::copy(CI.op_begin(), CI.op_end(), op_begin());
445  bundle_op_info_begin());
447 }
448 
450  Instruction *InsertPt) {
451  std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
452 
453  auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(),
454  Args, OpB, CI->getName(), InsertPt);
455  NewCI->setTailCallKind(CI->getTailCallKind());
456  NewCI->setCallingConv(CI->getCallingConv());
457  NewCI->SubclassOptionalData = CI->SubclassOptionalData;
458  NewCI->setAttributes(CI->getAttributes());
459  NewCI->setDebugLoc(CI->getDebugLoc());
460  return NewCI;
461 }
462 
463 // Update profile weight for call instruction by scaling it using the ratio
464 // of S/T. The meaning of "branch_weights" meta data for call instruction is
465 // transfered to represent call count.
466 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
467  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
468  if (ProfileData == nullptr)
469  return;
470 
471  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
472  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
473  !ProfDataName->getString().equals("VP")))
474  return;
475 
476  if (T == 0) {
477  LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
478  "div by 0. Ignoring. Likely the function "
479  << getParent()->getParent()->getName()
480  << " has 0 entry count, and contains call instructions "
481  "with non-zero prof info.");
482  return;
483  }
484 
485  MDBuilder MDB(getContext());
487  Vals.push_back(ProfileData->getOperand(0));
488  APInt APS(128, S), APT(128, T);
489  if (ProfDataName->getString().equals("branch_weights") &&
490  ProfileData->getNumOperands() > 0) {
491  // Using APInt::div may be expensive, but most cases should fit 64 bits.
492  APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
493  ->getValue()
494  .getZExtValue());
495  Val *= APS;
497  Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue())));
498  } else if (ProfDataName->getString().equals("VP"))
499  for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
500  // The first value is the key of the value profile, which will not change.
501  Vals.push_back(ProfileData->getOperand(i));
502  // Using APInt::div may be expensive, but most cases should fit 64 bits.
503  APInt Val(128,
504  mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
505  ->getValue()
506  .getZExtValue());
507  Val *= APS;
508  Vals.push_back(MDB.createConstant(
510  Val.udiv(APT).getLimitedValue())));
511  }
512  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
513 }
514 
515 /// IsConstantOne - Return true only if val is constant int 1
516 static bool IsConstantOne(Value *val) {
517  assert(val && "IsConstantOne does not work with nullptr val");
518  const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
519  return CVal && CVal->isOne();
520 }
521 
522 static Instruction *createMalloc(Instruction *InsertBefore,
523  BasicBlock *InsertAtEnd, Type *IntPtrTy,
524  Type *AllocTy, Value *AllocSize,
525  Value *ArraySize,
527  Function *MallocF, const Twine &Name) {
528  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
529  "createMalloc needs either InsertBefore or InsertAtEnd");
530 
531  // malloc(type) becomes:
532  // bitcast (i8* malloc(typeSize)) to type*
533  // malloc(type, arraySize) becomes:
534  // bitcast (i8* malloc(typeSize*arraySize)) to type*
535  if (!ArraySize)
536  ArraySize = ConstantInt::get(IntPtrTy, 1);
537  else if (ArraySize->getType() != IntPtrTy) {
538  if (InsertBefore)
539  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
540  "", InsertBefore);
541  else
542  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
543  "", InsertAtEnd);
544  }
545 
546  if (!IsConstantOne(ArraySize)) {
547  if (IsConstantOne(AllocSize)) {
548  AllocSize = ArraySize; // Operand * 1 = Operand
549  } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
550  Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
551  false /*ZExt*/);
552  // Malloc arg is constant product of type size and array size
553  AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
554  } else {
555  // Multiply type size by the array size...
556  if (InsertBefore)
557  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
558  "mallocsize", InsertBefore);
559  else
560  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
561  "mallocsize", InsertAtEnd);
562  }
563  }
564 
565  assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
566  // Create the call to Malloc.
567  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
568  Module *M = BB->getParent()->getParent();
569  Type *BPTy = Type::getInt8PtrTy(BB->getContext());
570  FunctionCallee MallocFunc = MallocF;
571  if (!MallocFunc)
572  // prototype malloc as "void *malloc(size_t)"
573  MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
574  PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
575  CallInst *MCall = nullptr;
576  Instruction *Result = nullptr;
577  if (InsertBefore) {
578  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
579  InsertBefore);
580  Result = MCall;
581  if (Result->getType() != AllocPtrType)
582  // Create a cast instruction to convert to the right type...
583  Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
584  } else {
585  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
586  Result = MCall;
587  if (Result->getType() != AllocPtrType) {
588  InsertAtEnd->getInstList().push_back(MCall);
589  // Create a cast instruction to convert to the right type...
590  Result = new BitCastInst(MCall, AllocPtrType, Name);
591  }
592  }
593  MCall->setTailCall();
594  if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
595  MCall->setCallingConv(F->getCallingConv());
596  if (!F->returnDoesNotAlias())
597  F->setReturnDoesNotAlias();
598  }
599  assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
600 
601  return Result;
602 }
603 
604 /// CreateMalloc - Generate the IR for a call to malloc:
605 /// 1. Compute the malloc call's argument as the specified type's size,
606 /// possibly multiplied by the array size if the array size is not
607 /// constant 1.
608 /// 2. Call malloc with that argument.
609 /// 3. Bitcast the result of the malloc call to the specified type.
611  Type *IntPtrTy, Type *AllocTy,
612  Value *AllocSize, Value *ArraySize,
613  Function *MallocF,
614  const Twine &Name) {
615  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
616  ArraySize, None, MallocF, Name);
617 }
619  Type *IntPtrTy, Type *AllocTy,
620  Value *AllocSize, Value *ArraySize,
622  Function *MallocF,
623  const Twine &Name) {
624  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
625  ArraySize, OpB, MallocF, Name);
626 }
627 
628 /// CreateMalloc - Generate the IR for a call to malloc:
629 /// 1. Compute the malloc call's argument as the specified type's size,
630 /// possibly multiplied by the array size if the array size is not
631 /// constant 1.
632 /// 2. Call malloc with that argument.
633 /// 3. Bitcast the result of the malloc call to the specified type.
634 /// Note: This function does not add the bitcast to the basic block, that is the
635 /// responsibility of the caller.
637  Type *IntPtrTy, Type *AllocTy,
638  Value *AllocSize, Value *ArraySize,
639  Function *MallocF, const Twine &Name) {
640  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
641  ArraySize, None, MallocF, Name);
642 }
644  Type *IntPtrTy, Type *AllocTy,
645  Value *AllocSize, Value *ArraySize,
647  Function *MallocF, const Twine &Name) {
648  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
649  ArraySize, OpB, MallocF, Name);
650 }
651 
654  Instruction *InsertBefore,
655  BasicBlock *InsertAtEnd) {
656  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
657  "createFree needs either InsertBefore or InsertAtEnd");
658  assert(Source->getType()->isPointerTy() &&
659  "Can not free something of nonpointer type!");
660 
661  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
662  Module *M = BB->getParent()->getParent();
663 
664  Type *VoidTy = Type::getVoidTy(M->getContext());
665  Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
666  // prototype free as "void free(void*)"
667  FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
668  CallInst *Result = nullptr;
669  Value *PtrCast = Source;
670  if (InsertBefore) {
671  if (Source->getType() != IntPtrTy)
672  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
673  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
674  } else {
675  if (Source->getType() != IntPtrTy)
676  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
677  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
678  }
679  Result->setTailCall();
680  if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
681  Result->setCallingConv(F->getCallingConv());
682 
683  return Result;
684 }
685 
686 /// CreateFree - Generate the IR for a call to the builtin free function.
688  return createFree(Source, None, InsertBefore, nullptr);
689 }
692  Instruction *InsertBefore) {
693  return createFree(Source, Bundles, InsertBefore, nullptr);
694 }
695 
696 /// CreateFree - Generate the IR for a call to the builtin free function.
697 /// Note: This function does not add the call to the basic block, that is the
698 /// responsibility of the caller.
700  Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
701  assert(FreeCall && "CreateFree did not create a CallInst");
702  return FreeCall;
703 }
706  BasicBlock *InsertAtEnd) {
707  Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
708  assert(FreeCall && "CreateFree did not create a CallInst");
709  return FreeCall;
710 }
711 
712 //===----------------------------------------------------------------------===//
713 // InvokeInst Implementation
714 //===----------------------------------------------------------------------===//
715 
716 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
717  BasicBlock *IfException, ArrayRef<Value *> Args,
719  const Twine &NameStr) {
720  this->FTy = FTy;
721 
722  assert((int)getNumOperands() ==
723  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
724  "NumOperands not set up?");
725  setNormalDest(IfNormal);
726  setUnwindDest(IfException);
727  setCalledOperand(Fn);
728 
729 #ifndef NDEBUG
730  assert(((Args.size() == FTy->getNumParams()) ||
731  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
732  "Invoking a function with bad signature");
733 
734  for (unsigned i = 0, e = Args.size(); i != e; i++)
735  assert((i >= FTy->getNumParams() ||
736  FTy->getParamType(i) == Args[i]->getType()) &&
737  "Invoking a function with a bad signature!");
738 #endif
739 
740  llvm::copy(Args, op_begin());
741 
742  auto It = populateBundleOperandInfos(Bundles, Args.size());
743  (void)It;
744  assert(It + 3 == op_end() && "Should add up!");
745 
746  setName(NameStr);
747 }
748 
749 InvokeInst::InvokeInst(const InvokeInst &II)
750  : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
752  II.getNumOperands()) {
753  setCallingConv(II.getCallingConv());
754  std::copy(II.op_begin(), II.op_end(), op_begin());
756  bundle_op_info_begin());
758 }
759 
761  Instruction *InsertPt) {
762  std::vector<Value *> Args(II->arg_begin(), II->arg_end());
763 
764  auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(),
765  II->getNormalDest(), II->getUnwindDest(),
766  Args, OpB, II->getName(), InsertPt);
767  NewII->setCallingConv(II->getCallingConv());
768  NewII->SubclassOptionalData = II->SubclassOptionalData;
769  NewII->setAttributes(II->getAttributes());
770  NewII->setDebugLoc(II->getDebugLoc());
771  return NewII;
772 }
773 
774 
776  return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
777 }
778 
779 //===----------------------------------------------------------------------===//
780 // CallBrInst Implementation
781 //===----------------------------------------------------------------------===//
782 
783 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
784  ArrayRef<BasicBlock *> IndirectDests,
785  ArrayRef<Value *> Args,
787  const Twine &NameStr) {
788  this->FTy = FTy;
789 
790  assert((int)getNumOperands() ==
791  ComputeNumOperands(Args.size(), IndirectDests.size(),
792  CountBundleInputs(Bundles)) &&
793  "NumOperands not set up?");
794  NumIndirectDests = IndirectDests.size();
795  setDefaultDest(Fallthrough);
796  for (unsigned i = 0; i != NumIndirectDests; ++i)
797  setIndirectDest(i, IndirectDests[i]);
798  setCalledOperand(Fn);
799 
800 #ifndef NDEBUG
801  assert(((Args.size() == FTy->getNumParams()) ||
802  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
803  "Calling a function with bad signature");
804 
805  for (unsigned i = 0, e = Args.size(); i != e; i++)
806  assert((i >= FTy->getNumParams() ||
807  FTy->getParamType(i) == Args[i]->getType()) &&
808  "Calling a function with a bad signature!");
809 #endif
810 
811  std::copy(Args.begin(), Args.end(), op_begin());
812 
813  auto It = populateBundleOperandInfos(Bundles, Args.size());
814  (void)It;
815  assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
816 
817  setName(NameStr);
818 }
819 
820 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
821  assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
822  if (BasicBlock *OldBB = getIndirectDest(i)) {
823  BlockAddress *Old = BlockAddress::get(OldBB);
825  for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo)
826  if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
827  setArgOperand(ArgNo, New);
828  }
829 }
830 
831 CallBrInst::CallBrInst(const CallBrInst &CBI)
832  : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
834  CBI.getNumOperands()) {
835  setCallingConv(CBI.getCallingConv());
836  std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
838  bundle_op_info_begin());
840  NumIndirectDests = CBI.NumIndirectDests;
841 }
842 
844  Instruction *InsertPt) {
845  std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
846 
847  auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(),
848  CBI->getCalledValue(),
849  CBI->getDefaultDest(),
850  CBI->getIndirectDests(),
851  Args, OpB, CBI->getName(), InsertPt);
852  NewCBI->setCallingConv(CBI->getCallingConv());
853  NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
854  NewCBI->setAttributes(CBI->getAttributes());
855  NewCBI->setDebugLoc(CBI->getDebugLoc());
856  NewCBI->NumIndirectDests = CBI->NumIndirectDests;
857  return NewCBI;
858 }
859 
860 //===----------------------------------------------------------------------===//
861 // ReturnInst Implementation
862 //===----------------------------------------------------------------------===//
863 
864 ReturnInst::ReturnInst(const ReturnInst &RI)
867  RI.getNumOperands()) {
868  if (RI.getNumOperands())
869  Op<0>() = RI.Op<0>();
871 }
872 
873 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
875  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
876  InsertBefore) {
877  if (retVal)
878  Op<0>() = retVal;
879 }
880 
881 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
883  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
884  InsertAtEnd) {
885  if (retVal)
886  Op<0>() = retVal;
887 }
888 
889 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
891  OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
892 
893 //===----------------------------------------------------------------------===//
894 // ResumeInst Implementation
895 //===----------------------------------------------------------------------===//
896 
897 ResumeInst::ResumeInst(const ResumeInst &RI)
898  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
900  Op<0>() = RI.Op<0>();
901 }
902 
903 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
904  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
905  OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
906  Op<0>() = Exn;
907 }
908 
909 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
910  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
911  OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
912  Op<0>() = Exn;
913 }
914 
915 //===----------------------------------------------------------------------===//
916 // CleanupReturnInst Implementation
917 //===----------------------------------------------------------------------===//
918 
919 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
920  : Instruction(CRI.getType(), Instruction::CleanupRet,
922  CRI.getNumOperands(),
923  CRI.getNumOperands()) {
924  setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
925  Op<0>() = CRI.Op<0>();
926  if (CRI.hasUnwindDest())
927  Op<1>() = CRI.Op<1>();
928 }
929 
930 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
931  if (UnwindBB)
932  setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
933 
934  Op<0>() = CleanupPad;
935  if (UnwindBB)
936  Op<1>() = UnwindBB;
937 }
938 
939 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
940  unsigned Values, Instruction *InsertBefore)
941  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
942  Instruction::CleanupRet,
944  Values, InsertBefore) {
945  init(CleanupPad, UnwindBB);
946 }
947 
948 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
949  unsigned Values, BasicBlock *InsertAtEnd)
950  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
951  Instruction::CleanupRet,
953  Values, InsertAtEnd) {
954  init(CleanupPad, UnwindBB);
955 }
956 
957 //===----------------------------------------------------------------------===//
958 // CatchReturnInst Implementation
959 //===----------------------------------------------------------------------===//
960 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
961  Op<0>() = CatchPad;
962  Op<1>() = BB;
963 }
964 
965 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
966  : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
968  Op<0>() = CRI.Op<0>();
969  Op<1>() = CRI.Op<1>();
970 }
971 
972 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
973  Instruction *InsertBefore)
974  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
976  InsertBefore) {
977  init(CatchPad, BB);
978 }
979 
980 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
981  BasicBlock *InsertAtEnd)
982  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
984  InsertAtEnd) {
985  init(CatchPad, BB);
986 }
987 
988 //===----------------------------------------------------------------------===//
989 // CatchSwitchInst Implementation
990 //===----------------------------------------------------------------------===//
991 
992 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
993  unsigned NumReservedValues,
994  const Twine &NameStr,
995  Instruction *InsertBefore)
996  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
997  InsertBefore) {
998  if (UnwindDest)
999  ++NumReservedValues;
1000  init(ParentPad, UnwindDest, NumReservedValues + 1);
1001  setName(NameStr);
1002 }
1003 
1004 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1005  unsigned NumReservedValues,
1006  const Twine &NameStr, BasicBlock *InsertAtEnd)
1007  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1008  InsertAtEnd) {
1009  if (UnwindDest)
1010  ++NumReservedValues;
1011  init(ParentPad, UnwindDest, NumReservedValues + 1);
1012  setName(NameStr);
1013 }
1014 
1015 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1016  : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1017  CSI.getNumOperands()) {
1018  init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1019  setNumHungOffUseOperands(ReservedSpace);
1020  Use *OL = getOperandList();
1021  const Use *InOL = CSI.getOperandList();
1022  for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1023  OL[I] = InOL[I];
1024 }
1025 
1026 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1027  unsigned NumReservedValues) {
1028  assert(ParentPad && NumReservedValues);
1029 
1030  ReservedSpace = NumReservedValues;
1031  setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1032  allocHungoffUses(ReservedSpace);
1033 
1034  Op<0>() = ParentPad;
1035  if (UnwindDest) {
1036  setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
1037  setUnwindDest(UnwindDest);
1038  }
1039 }
1040 
1041 /// growOperands - grow operands - This grows the operand list in response to a
1042 /// push_back style of operation. This grows the number of ops by 2 times.
1043 void CatchSwitchInst::growOperands(unsigned Size) {
1044  unsigned NumOperands = getNumOperands();
1045  assert(NumOperands >= 1);
1046  if (ReservedSpace >= NumOperands + Size)
1047  return;
1048  ReservedSpace = (NumOperands + Size / 2) * 2;
1049  growHungoffUses(ReservedSpace);
1050 }
1051 
1053  unsigned OpNo = getNumOperands();
1054  growOperands(1);
1055  assert(OpNo < ReservedSpace && "Growing didn't work!");
1057  getOperandList()[OpNo] = Handler;
1058 }
1059 
1061  // Move all subsequent handlers up one.
1062  Use *EndDst = op_end() - 1;
1063  for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1064  *CurDst = *(CurDst + 1);
1065  // Null out the last handler use.
1066  *EndDst = nullptr;
1067 
1069 }
1070 
1071 //===----------------------------------------------------------------------===//
1072 // FuncletPadInst Implementation
1073 //===----------------------------------------------------------------------===//
1074 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1075  const Twine &NameStr) {
1076  assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1077  llvm::copy(Args, op_begin());
1078  setParentPad(ParentPad);
1079  setName(NameStr);
1080 }
1081 
1082 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1083  : Instruction(FPI.getType(), FPI.getOpcode(),
1085  FPI.getNumOperands(),
1086  FPI.getNumOperands()) {
1087  std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1088  setParentPad(FPI.getParentPad());
1089 }
1090 
1091 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1092  ArrayRef<Value *> Args, unsigned Values,
1093  const Twine &NameStr, Instruction *InsertBefore)
1094  : Instruction(ParentPad->getType(), Op,
1095  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1096  InsertBefore) {
1097  init(ParentPad, Args, NameStr);
1098 }
1099 
1100 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1101  ArrayRef<Value *> Args, unsigned Values,
1102  const Twine &NameStr, BasicBlock *InsertAtEnd)
1103  : Instruction(ParentPad->getType(), Op,
1104  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1105  InsertAtEnd) {
1106  init(ParentPad, Args, NameStr);
1107 }
1108 
1109 //===----------------------------------------------------------------------===//
1110 // UnreachableInst Implementation
1111 //===----------------------------------------------------------------------===//
1112 
1114  Instruction *InsertBefore)
1115  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1116  0, InsertBefore) {}
1118  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1119  0, InsertAtEnd) {}
1120 
1121 //===----------------------------------------------------------------------===//
1122 // BranchInst Implementation
1123 //===----------------------------------------------------------------------===//
1124 
1125 void BranchInst::AssertOK() {
1126  if (isConditional())
1127  assert(getCondition()->getType()->isIntegerTy(1) &&
1128  "May only branch on boolean predicates!");
1129 }
1130 
1131 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1132  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1133  OperandTraits<BranchInst>::op_end(this) - 1, 1,
1134  InsertBefore) {
1135  assert(IfTrue && "Branch destination may not be null!");
1136  Op<-1>() = IfTrue;
1137 }
1138 
1139 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1140  Instruction *InsertBefore)
1141  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1142  OperandTraits<BranchInst>::op_end(this) - 3, 3,
1143  InsertBefore) {
1144  Op<-1>() = IfTrue;
1145  Op<-2>() = IfFalse;
1146  Op<-3>() = Cond;
1147 #ifndef NDEBUG
1148  AssertOK();
1149 #endif
1150 }
1151 
1152 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1153  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1154  OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1155  assert(IfTrue && "Branch destination may not be null!");
1156  Op<-1>() = IfTrue;
1157 }
1158 
1159 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1160  BasicBlock *InsertAtEnd)
1161  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1162  OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1163  Op<-1>() = IfTrue;
1164  Op<-2>() = IfFalse;
1165  Op<-3>() = Cond;
1166 #ifndef NDEBUG
1167  AssertOK();
1168 #endif
1169 }
1170 
1171 BranchInst::BranchInst(const BranchInst &BI)
1172  : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1174  BI.getNumOperands()) {
1175  Op<-1>() = BI.Op<-1>();
1176  if (BI.getNumOperands() != 1) {
1177  assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1178  Op<-3>() = BI.Op<-3>();
1179  Op<-2>() = BI.Op<-2>();
1180  }
1182 }
1183 
1185  assert(isConditional() &&
1186  "Cannot swap successors of an unconditional branch");
1187  Op<-1>().swap(Op<-2>());
1188 
1189  // Update profile metadata if present and it matches our structural
1190  // expectations.
1191  swapProfMetadata();
1192 }
1193 
1194 //===----------------------------------------------------------------------===//
1195 // AllocaInst Implementation
1196 //===----------------------------------------------------------------------===//
1197 
1198 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1199  if (!Amt)
1200  Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1201  else {
1202  assert(!isa<BasicBlock>(Amt) &&
1203  "Passed basic block into allocation size parameter! Use other ctor");
1204  assert(Amt->getType()->isIntegerTy() &&
1205  "Allocation array size is not an integer!");
1206  }
1207  return Amt;
1208 }
1209 
1210 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1211  Instruction *InsertBefore)
1212  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1213 
1214 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1215  BasicBlock *InsertAtEnd)
1216  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1217 
1218 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1219  const Twine &Name, Instruction *InsertBefore)
1220  : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {}
1221 
1222 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1223  const Twine &Name, BasicBlock *InsertAtEnd)
1224  : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
1225 
1226 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1227  unsigned Align, const Twine &Name,
1228  Instruction *InsertBefore)
1229  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1230  getAISize(Ty->getContext(), ArraySize), InsertBefore),
1231  AllocatedType(Ty) {
1232  setAlignment(MaybeAlign(Align));
1233  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1234  setName(Name);
1235 }
1236 
1237 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1238  unsigned Align, const Twine &Name,
1239  BasicBlock *InsertAtEnd)
1240  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1241  getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1242  AllocatedType(Ty) {
1243  setAlignment(MaybeAlign(Align));
1244  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1245  setName(Name);
1246 }
1247 
1249  assert((!Align || *Align <= MaximumAlignment) &&
1250  "Alignment is greater than MaximumAlignment!");
1251  setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
1252  encode(Align));
1253  if (Align)
1254  assert(getAlignment() == Align->value() &&
1255  "Alignment representation error!");
1256  else
1257  assert(getAlignment() == 0 && "Alignment representation error!");
1258 }
1259 
1261  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1262  return !CI->isOne();
1263  return true;
1264 }
1265 
1266 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1267 /// function and is a constant size. If so, the code generator will fold it
1268 /// into the prolog/epilog code, so it is basically free.
1270  // Must be constant size.
1271  if (!isa<ConstantInt>(getArraySize())) return false;
1272 
1273  // Must be in the entry block.
1274  const BasicBlock *Parent = getParent();
1275  return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
1276 }
1277 
1278 //===----------------------------------------------------------------------===//
1279 // LoadInst Implementation
1280 //===----------------------------------------------------------------------===//
1281 
1282 void LoadInst::AssertOK() {
1283  assert(getOperand(0)->getType()->isPointerTy() &&
1284  "Ptr must have pointer type.");
1285  assert(!(isAtomic() && getAlignment() == 0) &&
1286  "Alignment required for atomic load");
1287 }
1288 
1289 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1290  Instruction *InsertBef)
1291  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1292 
1293 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1294  BasicBlock *InsertAE)
1295  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1296 
1297 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1298  Instruction *InsertBef)
1299  : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
1300 
1301 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1302  BasicBlock *InsertAE)
1303  : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
1304 
1305 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1306  unsigned Align, Instruction *InsertBef)
1307  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1308  SyncScope::System, InsertBef) {}
1309 
1310 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1311  unsigned Align, BasicBlock *InsertAE)
1312  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1313  SyncScope::System, InsertAE) {}
1314 
1315 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1316  unsigned Align, AtomicOrdering Order,
1317  SyncScope::ID SSID, Instruction *InsertBef)
1318  : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1319  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1320  setVolatile(isVolatile);
1321  setAlignment(MaybeAlign(Align));
1322  setAtomic(Order, SSID);
1323  AssertOK();
1324  setName(Name);
1325 }
1326 
1327 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1328  unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
1329  BasicBlock *InsertAE)
1330  : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1331  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1332  setVolatile(isVolatile);
1333  setAlignment(MaybeAlign(Align));
1334  setAtomic(Order, SSID);
1335  AssertOK();
1336  setName(Name);
1337 }
1338 
1340  assert((!Align || *Align <= MaximumAlignment) &&
1341  "Alignment is greater than MaximumAlignment!");
1342  setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1343  (encode(Align) << 1));
1344  if (Align)
1345  assert(getAlignment() == Align->value() &&
1346  "Alignment representation error!");
1347  else
1348  assert(getAlignment() == 0 && "Alignment representation error!");
1349 }
1350 
1351 //===----------------------------------------------------------------------===//
1352 // StoreInst Implementation
1353 //===----------------------------------------------------------------------===//
1354 
1355 void StoreInst::AssertOK() {
1356  assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1357  assert(getOperand(1)->getType()->isPointerTy() &&
1358  "Ptr must have pointer type!");
1359  assert(getOperand(0)->getType() ==
1360  cast<PointerType>(getOperand(1)->getType())->getElementType()
1361  && "Ptr must be a pointer to Val type!");
1362  assert(!(isAtomic() && getAlignment() == 0) &&
1363  "Alignment required for atomic store");
1364 }
1365 
1366 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1367  : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1368 
1369 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1370  : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1371 
1373  Instruction *InsertBefore)
1374  : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
1375 
1377  BasicBlock *InsertAtEnd)
1378  : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
1379 
1380 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1381  Instruction *InsertBefore)
1382  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1383  SyncScope::System, InsertBefore) {}
1384 
1385 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1386  BasicBlock *InsertAtEnd)
1387  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1388  SyncScope::System, InsertAtEnd) {}
1389 
1391  unsigned Align, AtomicOrdering Order,
1392  SyncScope::ID SSID,
1393  Instruction *InsertBefore)
1394  : Instruction(Type::getVoidTy(val->getContext()), Store,
1397  InsertBefore) {
1398  Op<0>() = val;
1399  Op<1>() = addr;
1400  setVolatile(isVolatile);
1401  setAlignment(MaybeAlign(Align));
1402  setAtomic(Order, SSID);
1403  AssertOK();
1404 }
1405 
1407  unsigned Align, AtomicOrdering Order,
1408  SyncScope::ID SSID,
1409  BasicBlock *InsertAtEnd)
1410  : Instruction(Type::getVoidTy(val->getContext()), Store,
1413  InsertAtEnd) {
1414  Op<0>() = val;
1415  Op<1>() = addr;
1416  setVolatile(isVolatile);
1417  setAlignment(MaybeAlign(Align));
1418  setAtomic(Order, SSID);
1419  AssertOK();
1420 }
1421 
1423  assert((!Align || *Align <= MaximumAlignment) &&
1424  "Alignment is greater than MaximumAlignment!");
1425  setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1426  (encode(Align) << 1));
1427  if (Align)
1428  assert(getAlignment() == Align->value() &&
1429  "Alignment representation error!");
1430  else
1431  assert(getAlignment() == 0 && "Alignment representation error!");
1432 }
1433 
1434 //===----------------------------------------------------------------------===//
1435 // AtomicCmpXchgInst Implementation
1436 //===----------------------------------------------------------------------===//
1437 
1438 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1439  AtomicOrdering SuccessOrdering,
1440  AtomicOrdering FailureOrdering,
1441  SyncScope::ID SSID) {
1442  Op<0>() = Ptr;
1443  Op<1>() = Cmp;
1444  Op<2>() = NewVal;
1445  setSuccessOrdering(SuccessOrdering);
1446  setFailureOrdering(FailureOrdering);
1447  setSyncScopeID(SSID);
1448 
1449  assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1450  "All operands must be non-null!");
1451  assert(getOperand(0)->getType()->isPointerTy() &&
1452  "Ptr must have pointer type!");
1453  assert(getOperand(1)->getType() ==
1454  cast<PointerType>(getOperand(0)->getType())->getElementType()
1455  && "Ptr must be a pointer to Cmp type!");
1456  assert(getOperand(2)->getType() ==
1457  cast<PointerType>(getOperand(0)->getType())->getElementType()
1458  && "Ptr must be a pointer to NewVal type!");
1459  assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
1460  "AtomicCmpXchg instructions must be atomic!");
1461  assert(FailureOrdering != AtomicOrdering::NotAtomic &&
1462  "AtomicCmpXchg instructions must be atomic!");
1463  assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
1464  "AtomicCmpXchg failure argument shall be no stronger than the success "
1465  "argument");
1466  assert(FailureOrdering != AtomicOrdering::Release &&
1467  FailureOrdering != AtomicOrdering::AcquireRelease &&
1468  "AtomicCmpXchg failure ordering cannot include release semantics");
1469 }
1470 
1472  AtomicOrdering SuccessOrdering,
1473  AtomicOrdering FailureOrdering,
1474  SyncScope::ID SSID,
1475  Instruction *InsertBefore)
1476  : Instruction(
1477  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1478  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1479  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1480  Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
1481 }
1482 
1484  AtomicOrdering SuccessOrdering,
1485  AtomicOrdering FailureOrdering,
1486  SyncScope::ID SSID,
1487  BasicBlock *InsertAtEnd)
1488  : Instruction(
1489  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1490  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1491  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1492  Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
1493 }
1494 
1495 //===----------------------------------------------------------------------===//
1496 // AtomicRMWInst Implementation
1497 //===----------------------------------------------------------------------===//
1498 
1499 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1500  AtomicOrdering Ordering,
1501  SyncScope::ID SSID) {
1502  Op<0>() = Ptr;
1503  Op<1>() = Val;
1504  setOperation(Operation);
1505  setOrdering(Ordering);
1506  setSyncScopeID(SSID);
1507 
1508  assert(getOperand(0) && getOperand(1) &&
1509  "All operands must be non-null!");
1510  assert(getOperand(0)->getType()->isPointerTy() &&
1511  "Ptr must have pointer type!");
1512  assert(getOperand(1)->getType() ==
1513  cast<PointerType>(getOperand(0)->getType())->getElementType()
1514  && "Ptr must be a pointer to Val type!");
1515  assert(Ordering != AtomicOrdering::NotAtomic &&
1516  "AtomicRMW instructions must be atomic!");
1517 }
1518 
1520  AtomicOrdering Ordering,
1521  SyncScope::ID SSID,
1522  Instruction *InsertBefore)
1523  : Instruction(Val->getType(), AtomicRMW,
1526  InsertBefore) {
1527  Init(Operation, Ptr, Val, Ordering, SSID);
1528 }
1529 
1531  AtomicOrdering Ordering,
1532  SyncScope::ID SSID,
1533  BasicBlock *InsertAtEnd)
1534  : Instruction(Val->getType(), AtomicRMW,
1537  InsertAtEnd) {
1538  Init(Operation, Ptr, Val, Ordering, SSID);
1539 }
1540 
1542  switch (Op) {
1543  case AtomicRMWInst::Xchg:
1544  return "xchg";
1545  case AtomicRMWInst::Add:
1546  return "add";
1547  case AtomicRMWInst::Sub:
1548  return "sub";
1549  case AtomicRMWInst::And:
1550  return "and";
1551  case AtomicRMWInst::Nand:
1552  return "nand";
1553  case AtomicRMWInst::Or:
1554  return "or";
1555  case AtomicRMWInst::Xor:
1556  return "xor";
1557  case AtomicRMWInst::Max:
1558  return "max";
1559  case AtomicRMWInst::Min:
1560  return "min";
1561  case AtomicRMWInst::UMax:
1562  return "umax";
1563  case AtomicRMWInst::UMin:
1564  return "umin";
1565  case AtomicRMWInst::FAdd:
1566  return "fadd";
1567  case AtomicRMWInst::FSub:
1568  return "fsub";
1570  return "<invalid operation>";
1571  }
1572 
1573  llvm_unreachable("invalid atomicrmw operation");
1574 }
1575 
1576 //===----------------------------------------------------------------------===//
1577 // FenceInst Implementation
1578 //===----------------------------------------------------------------------===//
1579 
1581  SyncScope::ID SSID,
1582  Instruction *InsertBefore)
1583  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1584  setOrdering(Ordering);
1585  setSyncScopeID(SSID);
1586 }
1587 
1589  SyncScope::ID SSID,
1590  BasicBlock *InsertAtEnd)
1591  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1592  setOrdering(Ordering);
1593  setSyncScopeID(SSID);
1594 }
1595 
1596 //===----------------------------------------------------------------------===//
1597 // GetElementPtrInst Implementation
1598 //===----------------------------------------------------------------------===//
1599 
1600 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1601  const Twine &Name) {
1602  assert(getNumOperands() == 1 + IdxList.size() &&
1603  "NumOperands not initialized?");
1604  Op<0>() = Ptr;
1605  llvm::copy(IdxList, op_begin() + 1);
1606  setName(Name);
1607 }
1608 
1609 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1610  : Instruction(GEPI.getType(), GetElementPtr,
1612  GEPI.getNumOperands(),
1613  GEPI.getNumOperands()),
1614  SourceElementType(GEPI.SourceElementType),
1615  ResultElementType(GEPI.ResultElementType) {
1616  std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1618 }
1619 
1620 /// getIndexedType - Returns the type of the element that would be accessed with
1621 /// a gep instruction with the specified parameters.
1622 ///
1623 /// The Idxs pointer should point to a continuous piece of memory containing the
1624 /// indices, either as Value* or uint64_t.
1625 ///
1626 /// A null type is returned if the indices are invalid for the specified
1627 /// pointer type.
1628 ///
1629 template <typename IndexTy>
1631  // Handle the special case of the empty set index set, which is always valid.
1632  if (IdxList.empty())
1633  return Agg;
1634 
1635  // If there is at least one index, the top level type must be sized, otherwise
1636  // it cannot be 'stepped over'.
1637  if (!Agg->isSized())
1638  return nullptr;
1639 
1640  unsigned CurIdx = 1;
1641  for (; CurIdx != IdxList.size(); ++CurIdx) {
1642  CompositeType *CT = dyn_cast<CompositeType>(Agg);
1643  if (!CT || CT->isPointerTy()) return nullptr;
1644  IndexTy Index = IdxList[CurIdx];
1645  if (!CT->indexValid(Index)) return nullptr;
1646  Agg = CT->getTypeAtIndex(Index);
1647  }
1648  return CurIdx == IdxList.size() ? Agg : nullptr;
1649 }
1650 
1652  return getIndexedTypeInternal(Ty, IdxList);
1653 }
1654 
1656  ArrayRef<Constant *> IdxList) {
1657  return getIndexedTypeInternal(Ty, IdxList);
1658 }
1659 
1661  return getIndexedTypeInternal(Ty, IdxList);
1662 }
1663 
1664 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1665 /// zeros. If so, the result pointer and the first operand have the same
1666 /// value, just potentially different types.
1668  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1669  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1670  if (!CI->isZero()) return false;
1671  } else {
1672  return false;
1673  }
1674  }
1675  return true;
1676 }
1677 
1678 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1679 /// constant integers. If so, the result pointer and the first operand have
1680 /// a constant offset between them.
1682  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1683  if (!isa<ConstantInt>(getOperand(i)))
1684  return false;
1685  }
1686  return true;
1687 }
1688 
1690  cast<GEPOperator>(this)->setIsInBounds(B);
1691 }
1692 
1694  return cast<GEPOperator>(this)->isInBounds();
1695 }
1696 
1698  APInt &Offset) const {
1699  // Delegate to the generic GEPOperator implementation.
1700  return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1701 }
1702 
1703 //===----------------------------------------------------------------------===//
1704 // ExtractElementInst Implementation
1705 //===----------------------------------------------------------------------===//
1706 
1707 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1708  const Twine &Name,
1709  Instruction *InsertBef)
1710  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1711  ExtractElement,
1713  2, InsertBef) {
1714  assert(isValidOperands(Val, Index) &&
1715  "Invalid extractelement instruction operands!");
1716  Op<0>() = Val;
1717  Op<1>() = Index;
1718  setName(Name);
1719 }
1720 
1721 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1722  const Twine &Name,
1723  BasicBlock *InsertAE)
1724  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1725  ExtractElement,
1727  2, InsertAE) {
1728  assert(isValidOperands(Val, Index) &&
1729  "Invalid extractelement instruction operands!");
1730 
1731  Op<0>() = Val;
1732  Op<1>() = Index;
1733  setName(Name);
1734 }
1735 
1736 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1737  if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1738  return false;
1739  return true;
1740 }
1741 
1742 //===----------------------------------------------------------------------===//
1743 // InsertElementInst Implementation
1744 //===----------------------------------------------------------------------===//
1745 
1746 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1747  const Twine &Name,
1748  Instruction *InsertBef)
1749  : Instruction(Vec->getType(), InsertElement,
1751  3, InsertBef) {
1752  assert(isValidOperands(Vec, Elt, Index) &&
1753  "Invalid insertelement instruction operands!");
1754  Op<0>() = Vec;
1755  Op<1>() = Elt;
1756  Op<2>() = Index;
1757  setName(Name);
1758 }
1759 
1760 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1761  const Twine &Name,
1762  BasicBlock *InsertAE)
1763  : Instruction(Vec->getType(), InsertElement,
1765  3, InsertAE) {
1766  assert(isValidOperands(Vec, Elt, Index) &&
1767  "Invalid insertelement instruction operands!");
1768 
1769  Op<0>() = Vec;
1770  Op<1>() = Elt;
1771  Op<2>() = Index;
1772  setName(Name);
1773 }
1774 
1775 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1776  const Value *Index) {
1777  if (!Vec->getType()->isVectorTy())
1778  return false; // First operand of insertelement must be vector type.
1779 
1780  if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1781  return false;// Second operand of insertelement must be vector element type.
1782 
1783  if (!Index->getType()->isIntegerTy())
1784  return false; // Third operand of insertelement must be i32.
1785  return true;
1786 }
1787 
1788 //===----------------------------------------------------------------------===//
1789 // ShuffleVectorInst Implementation
1790 //===----------------------------------------------------------------------===//
1791 
1793  const Twine &Name,
1794  Instruction *InsertBefore)
1795 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1796  cast<VectorType>(Mask->getType())->getElementCount()),
1797  ShuffleVector,
1800  InsertBefore) {
1801  assert(isValidOperands(V1, V2, Mask) &&
1802  "Invalid shuffle vector instruction operands!");
1803  Op<0>() = V1;
1804  Op<1>() = V2;
1805  Op<2>() = Mask;
1806  setName(Name);
1807 }
1808 
1810  const Twine &Name,
1811  BasicBlock *InsertAtEnd)
1812 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1813  cast<VectorType>(Mask->getType())->getElementCount()),
1814  ShuffleVector,
1817  InsertAtEnd) {
1818  assert(isValidOperands(V1, V2, Mask) &&
1819  "Invalid shuffle vector instruction operands!");
1820 
1821  Op<0>() = V1;
1822  Op<1>() = V2;
1823  Op<2>() = Mask;
1824  setName(Name);
1825 }
1826 
1828  int NumOpElts = Op<0>()->getType()->getVectorNumElements();
1829  int NumMaskElts = getMask()->getType()->getVectorNumElements();
1830  SmallVector<Constant*, 16> NewMask(NumMaskElts);
1832  for (int i = 0; i != NumMaskElts; ++i) {
1833  int MaskElt = getMaskValue(i);
1834  if (MaskElt == -1) {
1835  NewMask[i] = UndefValue::get(Int32Ty);
1836  continue;
1837  }
1838  assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1839  MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1840  NewMask[i] = ConstantInt::get(Int32Ty, MaskElt);
1841  }
1842  Op<2>() = ConstantVector::get(NewMask);
1843  Op<0>().swap(Op<1>());
1844 }
1845 
1847  const Value *Mask) {
1848  // V1 and V2 must be vectors of the same type.
1849  if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1850  return false;
1851 
1852  // Mask must be vector of i32.
1853  auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1854  if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
1855  return false;
1856 
1857  // Check to see if Mask is valid.
1858  if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1859  return true;
1860 
1861  if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1862  unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1863  for (Value *Op : MV->operands()) {
1864  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1865  if (CI->uge(V1Size*2))
1866  return false;
1867  } else if (!isa<UndefValue>(Op)) {
1868  return false;
1869  }
1870  }
1871  return true;
1872  }
1873 
1874  if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1875  unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1876  for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
1877  if (CDS->getElementAsInteger(i) >= V1Size*2)
1878  return false;
1879  return true;
1880  }
1881 
1882  // The bitcode reader can create a place holder for a forward reference
1883  // used as the shuffle mask. When this occurs, the shuffle mask will
1884  // fall into this case and fail. To avoid this error, do this bit of
1885  // ugliness to allow such a mask pass.
1886  if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
1887  if (CE->getOpcode() == Instruction::UserOp1)
1888  return true;
1889 
1890  return false;
1891 }
1892 
1894  assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
1895  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
1896  return CDS->getElementAsInteger(i);
1897  Constant *C = Mask->getAggregateElement(i);
1898  if (isa<UndefValue>(C))
1899  return -1;
1900  return cast<ConstantInt>(C)->getZExtValue();
1901 }
1902 
1904  SmallVectorImpl<int> &Result) {
1905  unsigned NumElts = Mask->getType()->getVectorNumElements();
1906 
1907  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1908  for (unsigned i = 0; i != NumElts; ++i)
1909  Result.push_back(CDS->getElementAsInteger(i));
1910  return;
1911  }
1912  for (unsigned i = 0; i != NumElts; ++i) {
1913  Constant *C = Mask->getAggregateElement(i);
1914  Result.push_back(isa<UndefValue>(C) ? -1 :
1915  cast<ConstantInt>(C)->getZExtValue());
1916  }
1917 }
1918 
1919 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1920  assert(!Mask.empty() && "Shuffle mask must contain elements");
1921  bool UsesLHS = false;
1922  bool UsesRHS = false;
1923  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1924  if (Mask[i] == -1)
1925  continue;
1926  assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) &&
1927  "Out-of-bounds shuffle mask element");
1928  UsesLHS |= (Mask[i] < NumOpElts);
1929  UsesRHS |= (Mask[i] >= NumOpElts);
1930  if (UsesLHS && UsesRHS)
1931  return false;
1932  }
1933  assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source");
1934  return true;
1935 }
1936 
1938  // We don't have vector operand size information, so assume operands are the
1939  // same size as the mask.
1940  return isSingleSourceMaskImpl(Mask, Mask.size());
1941 }
1942 
1943 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1944  if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1945  return false;
1946  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1947  if (Mask[i] == -1)
1948  continue;
1949  if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1950  return false;
1951  }
1952  return true;
1953 }
1954 
1956  // We don't have vector operand size information, so assume operands are the
1957  // same size as the mask.
1958  return isIdentityMaskImpl(Mask, Mask.size());
1959 }
1960 
1962  if (!isSingleSourceMask(Mask))
1963  return false;
1964  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1965  if (Mask[i] == -1)
1966  continue;
1967  if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
1968  return false;
1969  }
1970  return true;
1971 }
1972 
1974  if (!isSingleSourceMask(Mask))
1975  return false;
1976  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1977  if (Mask[i] == -1)
1978  continue;
1979  if (Mask[i] != 0 && Mask[i] != NumElts)
1980  return false;
1981  }
1982  return true;
1983 }
1984 
1986  // Select is differentiated from identity. It requires using both sources.
1987  if (isSingleSourceMask(Mask))
1988  return false;
1989  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1990  if (Mask[i] == -1)
1991  continue;
1992  if (Mask[i] != i && Mask[i] != (NumElts + i))
1993  return false;
1994  }
1995  return true;
1996 }
1997 
1999  // Example masks that will return true:
2000  // v1 = <a, b, c, d>
2001  // v2 = <e, f, g, h>
2002  // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2003  // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2004 
2005  // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2006  int NumElts = Mask.size();
2007  if (NumElts < 2 || !isPowerOf2_32(NumElts))
2008  return false;
2009 
2010  // 2. The first element of the mask must be either a 0 or a 1.
2011  if (Mask[0] != 0 && Mask[0] != 1)
2012  return false;
2013 
2014  // 3. The difference between the first 2 elements must be equal to the
2015  // number of elements in the mask.
2016  if ((Mask[1] - Mask[0]) != NumElts)
2017  return false;
2018 
2019  // 4. The difference between consecutive even-numbered and odd-numbered
2020  // elements must be equal to 2.
2021  for (int i = 2; i < NumElts; ++i) {
2022  int MaskEltVal = Mask[i];
2023  if (MaskEltVal == -1)
2024  return false;
2025  int MaskEltPrevVal = Mask[i - 2];
2026  if (MaskEltVal - MaskEltPrevVal != 2)
2027  return false;
2028  }
2029  return true;
2030 }
2031 
2033  int NumSrcElts, int &Index) {
2034  // Must extract from a single source.
2035  if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2036  return false;
2037 
2038  // Must be smaller (else this is an Identity shuffle).
2039  if (NumSrcElts <= (int)Mask.size())
2040  return false;
2041 
2042  // Find start of extraction, accounting that we may start with an UNDEF.
2043  int SubIndex = -1;
2044  for (int i = 0, e = Mask.size(); i != e; ++i) {
2045  int M = Mask[i];
2046  if (M < 0)
2047  continue;
2048  int Offset = (M % NumSrcElts) - i;
2049  if (0 <= SubIndex && SubIndex != Offset)
2050  return false;
2051  SubIndex = Offset;
2052  }
2053 
2054  if (0 <= SubIndex) {
2055  Index = SubIndex;
2056  return true;
2057  }
2058  return false;
2059 }
2060 
2062  int NumOpElts = Op<0>()->getType()->getVectorNumElements();
2063  int NumMaskElts = getType()->getVectorNumElements();
2064  if (NumMaskElts <= NumOpElts)
2065  return false;
2066 
2067  // The first part of the mask must choose elements from exactly 1 source op.
2069  if (!isIdentityMaskImpl(Mask, NumOpElts))
2070  return false;
2071 
2072  // All extending must be with undef elements.
2073  for (int i = NumOpElts; i < NumMaskElts; ++i)
2074  if (Mask[i] != -1)
2075  return false;
2076 
2077  return true;
2078 }
2079 
2081  int NumOpElts = Op<0>()->getType()->getVectorNumElements();
2082  int NumMaskElts = getType()->getVectorNumElements();
2083  if (NumMaskElts >= NumOpElts)
2084  return false;
2085 
2086  return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2087 }
2088 
2090  // Vector concatenation is differentiated from identity with padding.
2091  if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2092  return false;
2093 
2094  int NumOpElts = Op<0>()->getType()->getVectorNumElements();
2095  int NumMaskElts = getType()->getVectorNumElements();
2096  if (NumMaskElts != NumOpElts * 2)
2097  return false;
2098 
2099  // Use the mask length rather than the operands' vector lengths here. We
2100  // already know that the shuffle returns a vector twice as long as the inputs,
2101  // and neither of the inputs are undef vectors. If the mask picks consecutive
2102  // elements from both inputs, then this is a concatenation of the inputs.
2103  return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2104 }
2105 
2106 //===----------------------------------------------------------------------===//
2107 // InsertValueInst Class
2108 //===----------------------------------------------------------------------===//
2109 
2110 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2111  const Twine &Name) {
2112  assert(getNumOperands() == 2 && "NumOperands not initialized?");
2113 
2114  // There's no fundamental reason why we require at least one index
2115  // (other than weirdness with &*IdxBegin being invalid; see
2116  // getelementptr's init routine for example). But there's no
2117  // present need to support it.
2118  assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2119 
2121  Val->getType() && "Inserted value must match indexed type!");
2122  Op<0>() = Agg;
2123  Op<1>() = Val;
2124 
2125  Indices.append(Idxs.begin(), Idxs.end());
2126  setName(Name);
2127 }
2128 
2129 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2130  : Instruction(IVI.getType(), InsertValue,
2132  Indices(IVI.Indices) {
2133  Op<0>() = IVI.getOperand(0);
2134  Op<1>() = IVI.getOperand(1);
2136 }
2137 
2138 //===----------------------------------------------------------------------===//
2139 // ExtractValueInst Class
2140 //===----------------------------------------------------------------------===//
2141 
2142 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2143  assert(getNumOperands() == 1 && "NumOperands not initialized?");
2144 
2145  // There's no fundamental reason why we require at least one index.
2146  // But there's no present need to support it.
2147  assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2148 
2149  Indices.append(Idxs.begin(), Idxs.end());
2150  setName(Name);
2151 }
2152 
2153 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2154  : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2155  Indices(EVI.Indices) {
2157 }
2158 
2159 // getIndexedType - Returns the type of the element that would be extracted
2160 // with an extractvalue instruction with the specified parameters.
2161 //
2162 // A null type is returned if the indices are invalid for the specified
2163 // pointer type.
2164 //
2166  ArrayRef<unsigned> Idxs) {
2167  for (unsigned Index : Idxs) {
2168  // We can't use CompositeType::indexValid(Index) here.
2169  // indexValid() always returns true for arrays because getelementptr allows
2170  // out-of-bounds indices. Since we don't allow those for extractvalue and
2171  // insertvalue we need to check array indexing manually.
2172  // Since the only other types we can index into are struct types it's just
2173  // as easy to check those manually as well.
2174  if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2175  if (Index >= AT->getNumElements())
2176  return nullptr;
2177  } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2178  if (Index >= ST->getNumElements())
2179  return nullptr;
2180  } else {
2181  // Not a valid type to index into.
2182  return nullptr;
2183  }
2184 
2185  Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
2186  }
2187  return const_cast<Type*>(Agg);
2188 }
2189 
2190 //===----------------------------------------------------------------------===//
2191 // UnaryOperator Class
2192 //===----------------------------------------------------------------------===//
2193 
2195  Type *Ty, const Twine &Name,
2196  Instruction *InsertBefore)
2197  : UnaryInstruction(Ty, iType, S, InsertBefore) {
2198  Op<0>() = S;
2199  setName(Name);
2200  AssertOK();
2201 }
2202 
2204  Type *Ty, const Twine &Name,
2205  BasicBlock *InsertAtEnd)
2206  : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2207  Op<0>() = S;
2208  setName(Name);
2209  AssertOK();
2210 }
2211 
2213  const Twine &Name,
2214  Instruction *InsertBefore) {
2215  return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2216 }
2217 
2219  const Twine &Name,
2220  BasicBlock *InsertAtEnd) {
2221  UnaryOperator *Res = Create(Op, S, Name);
2222  InsertAtEnd->getInstList().push_back(Res);
2223  return Res;
2224 }
2225 
2226 void UnaryOperator::AssertOK() {
2227  Value *LHS = getOperand(0);
2228  (void)LHS; // Silence warnings.
2229 #ifndef NDEBUG
2230  switch (getOpcode()) {
2231  case FNeg:
2232  assert(getType() == LHS->getType() &&
2233  "Unary operation should return same type as operand!");
2234  assert(getType()->isFPOrFPVectorTy() &&
2235  "Tried to create a floating-point operation on a "
2236  "non-floating-point type!");
2237  break;
2238  default: llvm_unreachable("Invalid opcode provided");
2239  }
2240 #endif
2241 }
2242 
2243 //===----------------------------------------------------------------------===//
2244 // BinaryOperator Class
2245 //===----------------------------------------------------------------------===//
2246 
2248  Type *Ty, const Twine &Name,
2249  Instruction *InsertBefore)
2250  : Instruction(Ty, iType,
2253  InsertBefore) {
2254  Op<0>() = S1;
2255  Op<1>() = S2;
2256  setName(Name);
2257  AssertOK();
2258 }
2259 
2261  Type *Ty, const Twine &Name,
2262  BasicBlock *InsertAtEnd)
2263  : Instruction(Ty, iType,
2266  InsertAtEnd) {
2267  Op<0>() = S1;
2268  Op<1>() = S2;
2269  setName(Name);
2270  AssertOK();
2271 }
2272 
2273 void BinaryOperator::AssertOK() {
2274  Value *LHS = getOperand(0), *RHS = getOperand(1);
2275  (void)LHS; (void)RHS; // Silence warnings.
2276  assert(LHS->getType() == RHS->getType() &&
2277  "Binary operator operand types must match!");
2278 #ifndef NDEBUG
2279  switch (getOpcode()) {
2280  case Add: case Sub:
2281  case Mul:
2282  assert(getType() == LHS->getType() &&
2283  "Arithmetic operation should return same type as operands!");
2284  assert(getType()->isIntOrIntVectorTy() &&
2285  "Tried to create an integer operation on a non-integer type!");
2286  break;
2287  case FAdd: case FSub:
2288  case FMul:
2289  assert(getType() == LHS->getType() &&
2290  "Arithmetic operation should return same type as operands!");
2291  assert(getType()->isFPOrFPVectorTy() &&
2292  "Tried to create a floating-point operation on a "
2293  "non-floating-point type!");
2294  break;
2295  case UDiv:
2296  case SDiv:
2297  assert(getType() == LHS->getType() &&
2298  "Arithmetic operation should return same type as operands!");
2299  assert(getType()->isIntOrIntVectorTy() &&
2300  "Incorrect operand type (not integer) for S/UDIV");
2301  break;
2302  case FDiv:
2303  assert(getType() == LHS->getType() &&
2304  "Arithmetic operation should return same type as operands!");
2305  assert(getType()->isFPOrFPVectorTy() &&
2306  "Incorrect operand type (not floating point) for FDIV");
2307  break;
2308  case URem:
2309  case SRem:
2310  assert(getType() == LHS->getType() &&
2311  "Arithmetic operation should return same type as operands!");
2312  assert(getType()->isIntOrIntVectorTy() &&
2313  "Incorrect operand type (not integer) for S/UREM");
2314  break;
2315  case FRem:
2316  assert(getType() == LHS->getType() &&
2317  "Arithmetic operation should return same type as operands!");
2318  assert(getType()->isFPOrFPVectorTy() &&
2319  "Incorrect operand type (not floating point) for FREM");
2320  break;
2321  case Shl:
2322  case LShr:
2323  case AShr:
2324  assert(getType() == LHS->getType() &&
2325  "Shift operation should return same type as operands!");
2326  assert(getType()->isIntOrIntVectorTy() &&
2327  "Tried to create a shift operation on a non-integral type!");
2328  break;
2329  case And: case Or:
2330  case Xor:
2331  assert(getType() == LHS->getType() &&
2332  "Logical operation should return same type as operands!");
2333  assert(getType()->isIntOrIntVectorTy() &&
2334  "Tried to create a logical operation on a non-integral type!");
2335  break;
2336  default: llvm_unreachable("Invalid opcode provided");
2337  }
2338 #endif
2339 }
2340 
2342  const Twine &Name,
2343  Instruction *InsertBefore) {
2344  assert(S1->getType() == S2->getType() &&
2345  "Cannot create binary operator with two operands of differing type!");
2346  return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2347 }
2348 
2350  const Twine &Name,
2351  BasicBlock *InsertAtEnd) {
2352  BinaryOperator *Res = Create(Op, S1, S2, Name);
2353  InsertAtEnd->getInstList().push_back(Res);
2354  return Res;
2355 }
2356 
2358  Instruction *InsertBefore) {
2360  return new BinaryOperator(Instruction::Sub,
2361  zero, Op,
2362  Op->getType(), Name, InsertBefore);
2363 }
2364 
2366  BasicBlock *InsertAtEnd) {
2368  return new BinaryOperator(Instruction::Sub,
2369  zero, Op,
2370  Op->getType(), Name, InsertAtEnd);
2371 }
2372 
2374  Instruction *InsertBefore) {
2376  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2377 }
2378 
2380  BasicBlock *InsertAtEnd) {
2382  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2383 }
2384 
2386  Instruction *InsertBefore) {
2388  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2389 }
2390 
2392  BasicBlock *InsertAtEnd) {
2394  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
2395 }
2396 
2398  Instruction *InsertBefore) {
2400  return new BinaryOperator(Instruction::FSub, zero, Op,
2401  Op->getType(), Name, InsertBefore);
2402 }
2403 
2405  BasicBlock *InsertAtEnd) {
2407  return new BinaryOperator(Instruction::FSub, zero, Op,
2408  Op->getType(), Name, InsertAtEnd);
2409 }
2410 
2412  Instruction *InsertBefore) {
2414  return new BinaryOperator(Instruction::Xor, Op, C,
2415  Op->getType(), Name, InsertBefore);
2416 }
2417 
2419  BasicBlock *InsertAtEnd) {
2420  Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
2421  return new BinaryOperator(Instruction::Xor, Op, AllOnes,
2422  Op->getType(), Name, InsertAtEnd);
2423 }
2424 
2425 // Exchange the two operands to this instruction. This instruction is safe to
2426 // use on any binary instruction and does not modify the semantics of the
2427 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2428 // is changed.
2430  if (!isCommutative())
2431  return true; // Can't commute operands
2432  Op<0>().swap(Op<1>());
2433  return false;
2434 }
2435 
2436 //===----------------------------------------------------------------------===//
2437 // FPMathOperator Class
2438 //===----------------------------------------------------------------------===//
2439 
2441  const MDNode *MD =
2442  cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2443  if (!MD)
2444  return 0.0;
2445  ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2446  return Accuracy->getValueAPF().convertToFloat();
2447 }
2448 
2449 //===----------------------------------------------------------------------===//
2450 // CastInst Class
2451 //===----------------------------------------------------------------------===//
2452 
2453 // Just determine if this cast only deals with integral->integral conversion.
2455  switch (getOpcode()) {
2456  default: return false;
2457  case Instruction::ZExt:
2458  case Instruction::SExt:
2459  case Instruction::Trunc:
2460  return true;
2461  case Instruction::BitCast:
2462  return getOperand(0)->getType()->isIntegerTy() &&
2463  getType()->isIntegerTy();
2464  }
2465 }
2466 
2468  // Only BitCast can be lossless, exit fast if we're not BitCast
2469  if (getOpcode() != Instruction::BitCast)
2470  return false;
2471 
2472  // Identity cast is always lossless
2473  Type *SrcTy = getOperand(0)->getType();
2474  Type *DstTy = getType();
2475  if (SrcTy == DstTy)
2476  return true;
2477 
2478  // Pointer to pointer is always lossless.
2479  if (SrcTy->isPointerTy())
2480  return DstTy->isPointerTy();
2481  return false; // Other types have no identity values
2482 }
2483 
2484 /// This function determines if the CastInst does not require any bits to be
2485 /// changed in order to effect the cast. Essentially, it identifies cases where
2486 /// no code gen is necessary for the cast, hence the name no-op cast. For
2487 /// example, the following are all no-op casts:
2488 /// # bitcast i32* %x to i8*
2489 /// # bitcast <2 x i32> %x to <4 x i16>
2490 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2491 /// Determine if the described cast is a no-op.
2493  Type *SrcTy,
2494  Type *DestTy,
2495  const DataLayout &DL) {
2496  switch (Opcode) {
2497  default: llvm_unreachable("Invalid CastOp");
2498  case Instruction::Trunc:
2499  case Instruction::ZExt:
2500  case Instruction::SExt:
2501  case Instruction::FPTrunc:
2502  case Instruction::FPExt:
2503  case Instruction::UIToFP:
2504  case Instruction::SIToFP:
2505  case Instruction::FPToUI:
2506  case Instruction::FPToSI:
2507  case Instruction::AddrSpaceCast:
2508  // TODO: Target informations may give a more accurate answer here.
2509  return false;
2510  case Instruction::BitCast:
2511  return true; // BitCast never modifies bits.
2512  case Instruction::PtrToInt:
2513  return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2514  DestTy->getScalarSizeInBits();
2515  case Instruction::IntToPtr:
2516  return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2517  SrcTy->getScalarSizeInBits();
2518  }
2519 }
2520 
2521 bool CastInst::isNoopCast(const DataLayout &DL) const {
2522  return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2523 }
2524 
2525 /// This function determines if a pair of casts can be eliminated and what
2526 /// opcode should be used in the elimination. This assumes that there are two
2527 /// instructions like this:
2528 /// * %F = firstOpcode SrcTy %x to MidTy
2529 /// * %S = secondOpcode MidTy %F to DstTy
2530 /// The function returns a resultOpcode so these two casts can be replaced with:
2531 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2532 /// If no such cast is permitted, the function returns 0.
2534  Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2535  Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2536  Type *DstIntPtrTy) {
2537  // Define the 144 possibilities for these two cast instructions. The values
2538  // in this matrix determine what to do in a given situation and select the
2539  // case in the switch below. The rows correspond to firstOp, the columns
2540  // correspond to secondOp. In looking at the table below, keep in mind
2541  // the following cast properties:
2542  //
2543  // Size Compare Source Destination
2544  // Operator Src ? Size Type Sign Type Sign
2545  // -------- ------------ ------------------- ---------------------
2546  // TRUNC > Integer Any Integral Any
2547  // ZEXT < Integral Unsigned Integer Any
2548  // SEXT < Integral Signed Integer Any
2549  // FPTOUI n/a FloatPt n/a Integral Unsigned
2550  // FPTOSI n/a FloatPt n/a Integral Signed
2551  // UITOFP n/a Integral Unsigned FloatPt n/a
2552  // SITOFP n/a Integral Signed FloatPt n/a
2553  // FPTRUNC > FloatPt n/a FloatPt n/a
2554  // FPEXT < FloatPt n/a FloatPt n/a
2555  // PTRTOINT n/a Pointer n/a Integral Unsigned
2556  // INTTOPTR n/a Integral Unsigned Pointer n/a
2557  // BITCAST = FirstClass n/a FirstClass n/a
2558  // ADDRSPCST n/a Pointer n/a Pointer n/a
2559  //
2560  // NOTE: some transforms are safe, but we consider them to be non-profitable.
2561  // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2562  // into "fptoui double to i64", but this loses information about the range
2563  // of the produced value (we no longer know the top-part is all zeros).
2564  // Further this conversion is often much more expensive for typical hardware,
2565  // and causes issues when building libgcc. We disallow fptosi+sext for the
2566  // same reason.
2567  const unsigned numCastOps =
2568  Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2569  static const uint8_t CastResults[numCastOps][numCastOps] = {
2570  // T F F U S F F P I B A -+
2571  // R Z S P P I I T P 2 N T S |
2572  // U E E 2 2 2 2 R E I T C C +- secondOp
2573  // N X X U S F F N X N 2 V V |
2574  // C T T I I P P C T T P T T -+
2575  { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2576  { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2577  { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2578  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2579  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2580  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2581  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2582  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2583  { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2584  { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2585  { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2586  { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
2587  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2588  };
2589 
2590  // TODO: This logic could be encoded into the table above and handled in the
2591  // switch below.
2592  // If either of the casts are a bitcast from scalar to vector, disallow the
2593  // merging. However, any pair of bitcasts are allowed.
2594  bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2595  bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2596  bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2597 
2598  // Check if any of the casts convert scalars <-> vectors.
2599  if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2600  (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2601  if (!AreBothBitcasts)
2602  return 0;
2603 
2604  int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2605  [secondOp-Instruction::CastOpsBegin];
2606  switch (ElimCase) {
2607  case 0:
2608  // Categorically disallowed.
2609  return 0;
2610  case 1:
2611  // Allowed, use first cast's opcode.
2612  return firstOp;
2613  case 2:
2614  // Allowed, use second cast's opcode.
2615  return secondOp;
2616  case 3:
2617  // No-op cast in second op implies firstOp as long as the DestTy
2618  // is integer and we are not converting between a vector and a
2619  // non-vector type.
2620  if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2621  return firstOp;
2622  return 0;
2623  case 4:
2624  // No-op cast in second op implies firstOp as long as the DestTy
2625  // is floating point.
2626  if (DstTy->isFloatingPointTy())
2627  return firstOp;
2628  return 0;
2629  case 5:
2630  // No-op cast in first op implies secondOp as long as the SrcTy
2631  // is an integer.
2632  if (SrcTy->isIntegerTy())
2633  return secondOp;
2634  return 0;
2635  case 6:
2636  // No-op cast in first op implies secondOp as long as the SrcTy
2637  // is a floating point.
2638  if (SrcTy->isFloatingPointTy())
2639  return secondOp;
2640  return 0;
2641  case 7: {
2642  // Cannot simplify if address spaces are different!
2643  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2644  return 0;
2645 
2646  unsigned MidSize = MidTy->getScalarSizeInBits();
2647  // We can still fold this without knowing the actual sizes as long we
2648  // know that the intermediate pointer is the largest possible
2649  // pointer size.
2650  // FIXME: Is this always true?
2651  if (MidSize == 64)
2652  return Instruction::BitCast;
2653 
2654  // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2655  if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2656  return 0;
2657  unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2658  if (MidSize >= PtrSize)
2659  return Instruction::BitCast;
2660  return 0;
2661  }
2662  case 8: {
2663  // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
2664  // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2665  // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2666  unsigned SrcSize = SrcTy->getScalarSizeInBits();
2667  unsigned DstSize = DstTy->getScalarSizeInBits();
2668  if (SrcSize == DstSize)
2669  return Instruction::BitCast;
2670  else if (SrcSize < DstSize)
2671  return firstOp;
2672  return secondOp;
2673  }
2674  case 9:
2675  // zext, sext -> zext, because sext can't sign extend after zext
2676  return Instruction::ZExt;
2677  case 11: {
2678  // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2679  if (!MidIntPtrTy)
2680  return 0;
2681  unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2682  unsigned SrcSize = SrcTy->getScalarSizeInBits();
2683  unsigned DstSize = DstTy->getScalarSizeInBits();
2684  if (SrcSize <= PtrSize && SrcSize == DstSize)
2685  return Instruction::BitCast;
2686  return 0;
2687  }
2688  case 12:
2689  // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2690  // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2691  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2692  return Instruction::AddrSpaceCast;
2693  return Instruction::BitCast;
2694  case 13:
2695  // FIXME: this state can be merged with (1), but the following assert
2696  // is useful to check the correcteness of the sequence due to semantic
2697  // change of bitcast.
2698  assert(
2699  SrcTy->isPtrOrPtrVectorTy() &&
2700  MidTy->isPtrOrPtrVectorTy() &&
2701  DstTy->isPtrOrPtrVectorTy() &&
2702  SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2703  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2704  "Illegal addrspacecast, bitcast sequence!");
2705  // Allowed, use first cast's opcode
2706  return firstOp;
2707  case 14:
2708  // bitcast, addrspacecast -> addrspacecast if the element type of
2709  // bitcast's source is the same as that of addrspacecast's destination.
2710  if (SrcTy->getScalarType()->getPointerElementType() ==
2712  return Instruction::AddrSpaceCast;
2713  return 0;
2714  case 15:
2715  // FIXME: this state can be merged with (1), but the following assert
2716  // is useful to check the correcteness of the sequence due to semantic
2717  // change of bitcast.
2718  assert(
2719  SrcTy->isIntOrIntVectorTy() &&
2720  MidTy->isPtrOrPtrVectorTy() &&
2721  DstTy->isPtrOrPtrVectorTy() &&
2722  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2723  "Illegal inttoptr, bitcast sequence!");
2724  // Allowed, use first cast's opcode
2725  return firstOp;
2726  case 16:
2727  // FIXME: this state can be merged with (2), but the following assert
2728  // is useful to check the correcteness of the sequence due to semantic
2729  // change of bitcast.
2730  assert(
2731  SrcTy->isPtrOrPtrVectorTy() &&
2732  MidTy->isPtrOrPtrVectorTy() &&
2733  DstTy->isIntOrIntVectorTy() &&
2734  SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2735  "Illegal bitcast, ptrtoint sequence!");
2736  // Allowed, use second cast's opcode
2737  return secondOp;
2738  case 17:
2739  // (sitofp (zext x)) -> (uitofp x)
2740  return Instruction::UIToFP;
2741  case 99:
2742  // Cast combination can't happen (error in input). This is for all cases
2743  // where the MidTy is not the same for the two cast instructions.
2744  llvm_unreachable("Invalid Cast Combination");
2745  default:
2746  llvm_unreachable("Error in CastResults table!!!");
2747  }
2748 }
2749 
2751  const Twine &Name, Instruction *InsertBefore) {
2752  assert(castIsValid(op, S, Ty) && "Invalid cast!");
2753  // Construct and return the appropriate CastInst subclass
2754  switch (op) {
2755  case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2756  case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2757  case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2758  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2759  case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2760  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2761  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2762  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2763  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2764  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2765  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2766  case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
2767  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
2768  default: llvm_unreachable("Invalid opcode provided");
2769  }
2770 }
2771 
2773  const Twine &Name, BasicBlock *InsertAtEnd) {
2774  assert(castIsValid(op, S, Ty) && "Invalid cast!");
2775  // Construct and return the appropriate CastInst subclass
2776  switch (op) {
2777  case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
2778  case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
2779  case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
2780  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
2781  case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
2782  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
2783  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
2784  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
2785  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
2786  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
2787  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
2788  case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
2789  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
2790  default: llvm_unreachable("Invalid opcode provided");
2791  }
2792 }
2793 
2795  const Twine &Name,
2796  Instruction *InsertBefore) {
2797  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2798  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2799  return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2800 }
2801 
2803  const Twine &Name,
2804  BasicBlock *InsertAtEnd) {
2805  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2806  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2807  return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
2808 }
2809 
2811  const Twine &Name,
2812  Instruction *InsertBefore) {
2813  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2814  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2815  return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
2816 }
2817 
2819  const Twine &Name,
2820  BasicBlock *InsertAtEnd) {
2821  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2822  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2823  return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
2824 }
2825 
2827  const Twine &Name,
2828  Instruction *InsertBefore) {
2829  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2830  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2831  return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
2832 }
2833 
2835  const Twine &Name,
2836  BasicBlock *InsertAtEnd) {
2837  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2838  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2839  return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
2840 }
2841 
2843  const Twine &Name,
2844  BasicBlock *InsertAtEnd) {
2845  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2846  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2847  "Invalid cast");
2848  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2849  assert((!Ty->isVectorTy() ||
2851  "Invalid cast");
2852 
2853  if (Ty->isIntOrIntVectorTy())
2854  return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
2855 
2856  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
2857 }
2858 
2859 /// Create a BitCast or a PtrToInt cast instruction
2861  const Twine &Name,
2862  Instruction *InsertBefore) {
2863  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2864  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2865  "Invalid cast");
2866  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2867  assert((!Ty->isVectorTy() ||
2869  "Invalid cast");
2870 
2871  if (Ty->isIntOrIntVectorTy())
2872  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2873 
2874  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
2875 }
2876 
2878  Value *S, Type *Ty,
2879  const Twine &Name,
2880  BasicBlock *InsertAtEnd) {
2881  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2882  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2883 
2885  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
2886 
2887  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2888 }
2889 
2891  Value *S, Type *Ty,
2892  const Twine &Name,
2893  Instruction *InsertBefore) {
2894  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2895  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2896 
2898  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
2899 
2900  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2901 }
2902 
2904  const Twine &Name,
2905  Instruction *InsertBefore) {
2906  if (S->getType()->isPointerTy() && Ty->isIntegerTy())
2907  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2908  if (S->getType()->isIntegerTy() && Ty->isPointerTy())
2909  return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
2910 
2911  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2912 }
2913 
2915  bool isSigned, const Twine &Name,
2916  Instruction *InsertBefore) {
2918  "Invalid integer cast");
2919  unsigned SrcBits = C->getType()->getScalarSizeInBits();
2920  unsigned DstBits = Ty->getScalarSizeInBits();
2921  Instruction::CastOps opcode =
2922  (SrcBits == DstBits ? Instruction::BitCast :
2923  (SrcBits > DstBits ? Instruction::Trunc :
2924  (isSigned ? Instruction::SExt : Instruction::ZExt)));
2925  return Create(opcode, C, Ty, Name, InsertBefore);
2926 }
2927 
2929  bool isSigned, const Twine &Name,
2930  BasicBlock *InsertAtEnd) {
2932  "Invalid cast");
2933  unsigned SrcBits = C->getType()->getScalarSizeInBits();
2934  unsigned DstBits = Ty->getScalarSizeInBits();
2935  Instruction::CastOps opcode =
2936  (SrcBits == DstBits ? Instruction::BitCast :
2937  (SrcBits > DstBits ? Instruction::Trunc :
2938  (isSigned ? Instruction::SExt : Instruction::ZExt)));
2939  return Create(opcode, C, Ty, Name, InsertAtEnd);
2940 }
2941 
2943  const Twine &Name,
2944  Instruction *InsertBefore) {
2945  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2946  "Invalid cast");
2947  unsigned SrcBits = C->getType()->getScalarSizeInBits();
2948  unsigned DstBits = Ty->getScalarSizeInBits();
2949  Instruction::CastOps opcode =
2950  (SrcBits == DstBits ? Instruction::BitCast :
2951  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2952  return Create(opcode, C, Ty, Name, InsertBefore);
2953 }
2954 
2956  const Twine &Name,
2957  BasicBlock *InsertAtEnd) {
2958  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2959  "Invalid cast");
2960  unsigned SrcBits = C->getType()->getScalarSizeInBits();
2961  unsigned DstBits = Ty->getScalarSizeInBits();
2962  Instruction::CastOps opcode =
2963  (SrcBits == DstBits ? Instruction::BitCast :
2964  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2965  return Create(opcode, C, Ty, Name, InsertAtEnd);
2966 }
2967 
2968 // Check whether it is valid to call getCastOpcode for these types.
2969 // This routine must be kept in sync with getCastOpcode.
2970 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
2971  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2972  return false;
2973 
2974  if (SrcTy == DestTy)
2975  return true;
2976 
2977  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2978  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2979  if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2980  // An element by element cast. Valid if casting the elements is valid.
2981  SrcTy = SrcVecTy->getElementType();
2982  DestTy = DestVecTy->getElementType();
2983  }
2984 
2985  // Get the bit sizes, we'll need these
2986  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2987  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2988 
2989  // Run through the possibilities ...
2990  if (DestTy->isIntegerTy()) { // Casting to integral
2991  if (SrcTy->isIntegerTy()) // Casting from integral
2992  return true;
2993  if (SrcTy->isFloatingPointTy()) // Casting from floating pt
2994  return true;
2995  if (SrcTy->isVectorTy()) // Casting from vector
2996  return DestBits == SrcBits;
2997  // Casting from something else
2998  return SrcTy->isPointerTy();
2999  }
3000  if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3001  if (SrcTy->isIntegerTy()) // Casting from integral
3002  return true;
3003  if (SrcTy->isFloatingPointTy()) // Casting from floating pt
3004  return true;
3005  if (SrcTy->isVectorTy()) // Casting from vector
3006  return DestBits == SrcBits;
3007  // Casting from something else
3008  return false;
3009  }
3010  if (DestTy->isVectorTy()) // Casting to vector
3011  return DestBits == SrcBits;
3012  if (DestTy->isPointerTy()) { // Casting to pointer
3013  if (SrcTy->isPointerTy()) // Casting from pointer
3014  return true;
3015  return SrcTy->isIntegerTy(); // Casting from integral
3016  }
3017  if (DestTy->isX86_MMXTy()) {
3018  if (SrcTy->isVectorTy())
3019  return DestBits == SrcBits; // 64-bit vector to MMX
3020  return false;
3021  } // Casting to something else
3022  return false;
3023 }
3024 
3025 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3026  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3027  return false;
3028 
3029  if (SrcTy == DestTy)
3030  return true;
3031 
3032  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3033  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3034  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3035  // An element by element cast. Valid if casting the elements is valid.
3036  SrcTy = SrcVecTy->getElementType();
3037  DestTy = DestVecTy->getElementType();
3038  }
3039  }
3040  }
3041 
3042  if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3043  if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3044  return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3045  }
3046  }
3047 
3048  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3049  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3050 
3051  // Could still have vectors of pointers if the number of elements doesn't
3052  // match
3053  if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
3054  return false;
3055 
3056  if (SrcBits != DestBits)
3057  return false;
3058 
3059  if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3060  return false;
3061 
3062  return true;
3063 }
3064 
3066  const DataLayout &DL) {
3067  // ptrtoint and inttoptr are not allowed on non-integral pointers
3068  if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3069  if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3070  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3071  !DL.isNonIntegralPointerType(PtrTy));
3072  if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3073  if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3074  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3075  !DL.isNonIntegralPointerType(PtrTy));
3076 
3077  return isBitCastable(SrcTy, DestTy);
3078 }
3079 
3080 // Provide a way to get a "cast" where the cast opcode is inferred from the
3081 // types and size of the operand. This, basically, is a parallel of the
3082 // logic in the castIsValid function below. This axiom should hold:
3083 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3084 // should not assert in castIsValid. In other words, this produces a "correct"
3085 // casting opcode for the arguments passed to it.
3086 // This routine must be kept in sync with isCastable.
3089  const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3090  Type *SrcTy = Src->getType();
3091 
3092  assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3093  "Only first class types are castable!");
3094 
3095  if (SrcTy == DestTy)
3096  return BitCast;
3097 
3098  // FIXME: Check address space sizes here
3099  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3100  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3101  if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
3102  // An element by element cast. Find the appropriate opcode based on the
3103  // element types.
3104  SrcTy = SrcVecTy->getElementType();
3105  DestTy = DestVecTy->getElementType();
3106  }
3107 
3108  // Get the bit sizes, we'll need these
3109  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3110  unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3111 
3112  // Run through the possibilities ...
3113  if (DestTy->isIntegerTy()) { // Casting to integral
3114  if (SrcTy->isIntegerTy()) { // Casting from integral
3115  if (DestBits < SrcBits)
3116  return Trunc; // int -> smaller int
3117  else if (DestBits > SrcBits) { // its an extension
3118  if (SrcIsSigned)
3119  return SExt; // signed -> SEXT
3120  else
3121  return ZExt; // unsigned -> ZEXT
3122  } else {
3123  return BitCast; // Same size, No-op cast
3124  }
3125  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3126  if (DestIsSigned)
3127  return FPToSI; // FP -> sint
3128  else
3129  return FPToUI; // FP -> uint
3130  } else if (SrcTy->isVectorTy()) {
3131  assert(DestBits == SrcBits &&
3132  "Casting vector to integer of different width");
3133  return BitCast; // Same size, no-op cast
3134  } else {
3135  assert(SrcTy->isPointerTy() &&
3136  "Casting from a value that is not first-class type");
3137  return PtrToInt; // ptr -> int
3138  }
3139  } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3140  if (SrcTy->isIntegerTy()) { // Casting from integral
3141  if (SrcIsSigned)
3142  return SIToFP; // sint -> FP
3143  else
3144  return UIToFP; // uint -> FP
3145  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3146  if (DestBits < SrcBits) {
3147  return FPTrunc; // FP -> smaller FP
3148  } else if (DestBits > SrcBits) {
3149  return FPExt; // FP -> larger FP
3150  } else {
3151  return BitCast; // same size, no-op cast
3152  }
3153  } else if (SrcTy->isVectorTy()) {
3154  assert(DestBits == SrcBits &&
3155  "Casting vector to floating point of different width");
3156  return BitCast; // same size, no-op cast
3157  }
3158  llvm_unreachable("Casting pointer or non-first class to float");
3159  } else if (DestTy->isVectorTy()) {
3160  assert(DestBits == SrcBits &&
3161  "Illegal cast to vector (wrong type or size)");
3162  return BitCast;
3163  } else if (DestTy->isPointerTy()) {
3164  if (SrcTy->isPointerTy()) {
3165  if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3166  return AddrSpaceCast;
3167  return BitCast; // ptr -> ptr
3168  } else if (SrcTy->isIntegerTy()) {
3169  return IntToPtr; // int -> ptr
3170  }
3171  llvm_unreachable("Casting pointer to other than pointer or int");
3172  } else if (DestTy->isX86_MMXTy()) {
3173  if (SrcTy->isVectorTy()) {
3174  assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3175  return BitCast; // 64-bit vector to MMX
3176  }
3177  llvm_unreachable("Illegal cast to X86_MMX");
3178  }
3179  llvm_unreachable("Casting to type that is not first-class");
3180 }
3181 
3182 //===----------------------------------------------------------------------===//
3183 // CastInst SubClass Constructors
3184 //===----------------------------------------------------------------------===//
3185 
3186 /// Check that the construction parameters for a CastInst are correct. This
3187 /// could be broken out into the separate constructors but it is useful to have
3188 /// it in one place and to eliminate the redundant code for getting the sizes
3189 /// of the types involved.
3190 bool
3192  // Check for type sanity on the arguments
3193  Type *SrcTy = S->getType();
3194 
3195  if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3196  SrcTy->isAggregateType() || DstTy->isAggregateType())
3197  return false;
3198 
3199  // Get the size of the types in bits, we'll need this later
3200  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3201  unsigned DstBitSize = DstTy->getScalarSizeInBits();
3202 
3203  // If these are vector types, get the lengths of the vectors (using zero for
3204  // scalar types means that checking that vector lengths match also checks that
3205  // scalars are not being converted to vectors or vectors to scalars).
3206  unsigned SrcLength = SrcTy->isVectorTy() ?
3207  cast<VectorType>(SrcTy)->getNumElements() : 0;
3208  unsigned DstLength = DstTy->isVectorTy() ?
3209  cast<VectorType>(DstTy)->getNumElements() : 0;
3210 
3211  // Switch on the opcode provided
3212  switch (op) {
3213  default: return false; // This is an input error
3214  case Instruction::Trunc:
3215  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3216  SrcLength == DstLength && SrcBitSize > DstBitSize;
3217  case Instruction::ZExt:
3218  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3219  SrcLength == DstLength && SrcBitSize < DstBitSize;
3220  case Instruction::SExt:
3221  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3222  SrcLength == DstLength && SrcBitSize < DstBitSize;
3223  case Instruction::FPTrunc:
3224  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3225  SrcLength == DstLength && SrcBitSize > DstBitSize;
3226  case Instruction::FPExt:
3227  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3228  SrcLength == DstLength && SrcBitSize < DstBitSize;
3229  case Instruction::UIToFP:
3230  case Instruction::SIToFP:
3231  return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3232  SrcLength == DstLength;
3233  case Instruction::FPToUI:
3234  case Instruction::FPToSI:
3235  return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3236  SrcLength == DstLength;
3237  case Instruction::PtrToInt:
3238  if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3239  return false;
3240  if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3241  if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3242  return false;
3243  return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3244  case Instruction::IntToPtr:
3245  if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3246  return false;
3247  if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3248  if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3249  return false;
3250  return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3251  case Instruction::BitCast: {
3252  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3253  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3254 
3255  // BitCast implies a no-op cast of type only. No bits change.
3256  // However, you can't cast pointers to anything but pointers.
3257  if (!SrcPtrTy != !DstPtrTy)
3258  return false;
3259 
3260  // For non-pointer cases, the cast is okay if the source and destination bit
3261  // widths are identical.
3262  if (!SrcPtrTy)
3263  return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3264 
3265  // If both are pointers then the address spaces must match.
3266  if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3267  return false;
3268 
3269  // A vector of pointers must have the same number of elements.
3270  VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy);
3271  VectorType *DstVecTy = dyn_cast<VectorType>(DstTy);
3272  if (SrcVecTy && DstVecTy)
3273  return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3274  if (SrcVecTy)
3275  return SrcVecTy->getNumElements() == 1;
3276  if (DstVecTy)
3277  return DstVecTy->getNumElements() == 1;
3278 
3279  return true;
3280  }
3281  case Instruction::AddrSpaceCast: {
3282  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3283  if (!SrcPtrTy)
3284  return false;
3285 
3286  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3287  if (!DstPtrTy)
3288  return false;
3289 
3290  if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3291  return false;
3292 
3293  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3294  if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
3295  return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3296 
3297  return false;
3298  }
3299 
3300  return true;
3301  }
3302  }
3303 }
3304 
3306  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3307 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3308  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3309 }
3310 
3312  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3313 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3314  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3315 }
3316 
3318  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3319 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3320  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3321 }
3322 
3324  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3325 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3326  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3327 }
3329  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3330 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3331  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3332 }
3333 
3335  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3336 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3337  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3338 }
3339 
3341  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3342 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3343  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3344 }
3345 
3347  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3348 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3349  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3350 }
3351 
3353  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3354 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3355  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3356 }
3357 
3359  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3360 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3361  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3362 }
3363 
3365  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3366 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3367  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3368 }
3369 
3371  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3372 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3373  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3374 }
3375 
3377  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3378 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3379  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3380 }
3381 
3383  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3384 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3385  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3386 }
3387 
3389  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3390 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3391  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3392 }
3393 
3395  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3396 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3397  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3398 }
3399 
3401  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3402 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3403  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3404 }
3405 
3407  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3408 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3409  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3410 }
3411 
3413  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3414 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3415  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3416 }
3417 
3419  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3420 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3421  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3422 }
3423 
3425  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3426 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3427  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3428 }
3429 
3431  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3432 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3433  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3434 }
3435 
3437  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3438 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3439  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3440 }
3441 
3443  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3444 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3445  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3446 }
3447 
3449  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3450 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3451  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3452 }
3453 
3455  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3456 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3457  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3458 }
3459 
3460 //===----------------------------------------------------------------------===//
3461 // CmpInst Classes
3462 //===----------------------------------------------------------------------===//
3463 
3465  Value *RHS, const Twine &Name, Instruction *InsertBefore,
3466  Instruction *FlagsSource)
3467  : Instruction(ty, op,
3468  OperandTraits<CmpInst>::op_begin(this),
3469  OperandTraits<CmpInst>::operands(this),
3470  InsertBefore) {
3471  Op<0>() = LHS;
3472  Op<1>() = RHS;
3473  setPredicate((Predicate)predicate);
3474  setName(Name);
3475  if (FlagsSource)
3476  copyIRFlags(FlagsSource);
3477 }
3478 
3480  Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3481  : Instruction(ty, op,
3482  OperandTraits<CmpInst>::op_begin(this),
3483  OperandTraits<CmpInst>::operands(this),
3484  InsertAtEnd) {
3485  Op<0>() = LHS;
3486  Op<1>() = RHS;
3487  setPredicate((Predicate)predicate);
3488  setName(Name);
3489 }
3490 
3491 CmpInst *
3493  const Twine &Name, Instruction *InsertBefore) {
3494  if (Op == Instruction::ICmp) {
3495  if (InsertBefore)
3496  return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3497  S1, S2, Name);
3498  else
3499  return new ICmpInst(CmpInst::Predicate(predicate),
3500  S1, S2, Name);
3501  }
3502 
3503  if (InsertBefore)
3504  return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3505  S1, S2, Name);
3506  else
3507  return new FCmpInst(CmpInst::Predicate(predicate),
3508  S1, S2, Name);
3509 }
3510 
3511 CmpInst *
3513  const Twine &Name, BasicBlock *InsertAtEnd) {
3514  if (Op == Instruction::ICmp) {
3515  return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3516  S1, S2, Name);
3517  }
3518  return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3519  S1, S2, Name);
3520 }
3521 
3523  if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3524  IC->swapOperands();
3525  else
3526  cast<FCmpInst>(this)->swapOperands();
3527 }
3528 
3530  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3531  return IC->isCommutative();
3532  return cast<FCmpInst>(this)->isCommutative();
3533 }
3534 
3535 bool CmpInst::isEquality() const {
3536  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3537  return IC->isEquality();
3538  return cast<FCmpInst>(this)->isEquality();
3539 }
3540 
3542  switch (pred) {
3543  default: llvm_unreachable("Unknown cmp predicate!");
3544  case ICMP_EQ: return ICMP_NE;
3545  case ICMP_NE: return ICMP_EQ;
3546  case ICMP_UGT: return ICMP_ULE;
3547  case ICMP_ULT: return ICMP_UGE;
3548  case ICMP_UGE: return ICMP_ULT;
3549  case ICMP_ULE: return ICMP_UGT;
3550  case ICMP_SGT: return ICMP_SLE;
3551  case ICMP_SLT: return ICMP_SGE;
3552  case ICMP_SGE: return ICMP_SLT;
3553  case ICMP_SLE: return ICMP_SGT;
3554 
3555  case FCMP_OEQ: return FCMP_UNE;
3556  case FCMP_ONE: return FCMP_UEQ;
3557  case FCMP_OGT: return FCMP_ULE;
3558  case FCMP_OLT: return FCMP_UGE;
3559  case FCMP_OGE: return FCMP_ULT;
3560  case FCMP_OLE: return FCMP_UGT;
3561  case FCMP_UEQ: return FCMP_ONE;
3562  case FCMP_UNE: return FCMP_OEQ;
3563  case FCMP_UGT: return FCMP_OLE;
3564  case FCMP_ULT: return FCMP_OGE;
3565  case FCMP_UGE: return FCMP_OLT;
3566  case FCMP_ULE: return FCMP_OGT;
3567  case FCMP_ORD: return FCMP_UNO;
3568  case FCMP_UNO: return FCMP_ORD;
3569  case FCMP_TRUE: return FCMP_FALSE;
3570  case FCMP_FALSE: return FCMP_TRUE;
3571  }
3572 }
3573 
3575  switch (Pred) {
3576  default: return "unknown";
3577  case FCmpInst::FCMP_FALSE: return "false";
3578  case FCmpInst::FCMP_OEQ: return "oeq";
3579  case FCmpInst::FCMP_OGT: return "ogt";
3580  case FCmpInst::FCMP_OGE: return "oge";
3581  case FCmpInst::FCMP_OLT: return "olt";
3582  case FCmpInst::FCMP_OLE: return "ole";
3583  case FCmpInst::FCMP_ONE: return "one";
3584  case FCmpInst::FCMP_ORD: return "ord";
3585  case FCmpInst::FCMP_UNO: return "uno";
3586  case FCmpInst::FCMP_UEQ: return "ueq";
3587  case FCmpInst::FCMP_UGT: return "ugt";
3588  case FCmpInst::FCMP_UGE: return "uge";
3589  case FCmpInst::FCMP_ULT: return "ult";
3590  case FCmpInst::FCMP_ULE: return "ule";
3591  case FCmpInst::FCMP_UNE: return "une";
3592  case FCmpInst::FCMP_TRUE: return "true";
3593  case ICmpInst::ICMP_EQ: return "eq";
3594  case ICmpInst::ICMP_NE: return "ne";
3595  case ICmpInst::ICMP_SGT: return "sgt";
3596  case ICmpInst::ICMP_SGE: return "sge";
3597  case ICmpInst::ICMP_SLT: return "slt";
3598  case ICmpInst::ICMP_SLE: return "sle";
3599  case ICmpInst::ICMP_UGT: return "ugt";
3600  case ICmpInst::ICMP_UGE: return "uge";
3601  case ICmpInst::ICMP_ULT: return "ult";
3602  case ICmpInst::ICMP_ULE: return "ule";
3603  }
3604 }
3605 
3607  switch (pred) {
3608  default: llvm_unreachable("Unknown icmp predicate!");
3609  case ICMP_EQ: case ICMP_NE:
3610  case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3611  return pred;
3612  case ICMP_UGT: return ICMP_SGT;
3613  case ICMP_ULT: return ICMP_SLT;
3614  case ICMP_UGE: return ICMP_SGE;
3615  case ICMP_ULE: return ICMP_SLE;
3616  }
3617 }
3618 
3620  switch (pred) {
3621  default: llvm_unreachable("Unknown icmp predicate!");
3622  case ICMP_EQ: case ICMP_NE:
3623  case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3624  return pred;
3625  case ICMP_SGT: return ICMP_UGT;
3626  case ICMP_SLT: return ICMP_ULT;
3627  case ICMP_SGE: return ICMP_UGE;
3628  case ICMP_SLE: return ICMP_ULE;
3629  }
3630 }
3631 
3633  switch (pred) {
3634  default: llvm_unreachable("Unknown or unsupported cmp predicate!");
3635  case ICMP_SGT: return ICMP_SGE;
3636  case ICMP_SLT: return ICMP_SLE;
3637  case ICMP_SGE: return ICMP_SGT;
3638  case ICMP_SLE: return ICMP_SLT;
3639  case ICMP_UGT: return ICMP_UGE;
3640  case ICMP_ULT: return ICMP_ULE;
3641  case ICMP_UGE: return ICMP_UGT;
3642  case ICMP_ULE: return ICMP_ULT;
3643 
3644  case FCMP_OGT: return FCMP_OGE;
3645  case FCMP_OLT: return FCMP_OLE;
3646  case FCMP_OGE: return FCMP_OGT;
3647  case FCMP_OLE: return FCMP_OLT;
3648  case FCMP_UGT: return FCMP_UGE;
3649  case FCMP_ULT: return FCMP_ULE;
3650  case FCMP_UGE: return FCMP_UGT;
3651  case FCMP_ULE: return FCMP_ULT;
3652  }
3653 }
3654 
3656  switch (pred) {
3657  default: llvm_unreachable("Unknown cmp predicate!");
3658  case ICMP_EQ: case ICMP_NE:
3659  return pred;
3660  case ICMP_SGT: return ICMP_SLT;
3661  case ICMP_SLT: return ICMP_SGT;
3662  case ICMP_SGE: return ICMP_SLE;
3663  case ICMP_SLE: return ICMP_SGE;
3664  case ICMP_UGT: return ICMP_ULT;
3665  case ICMP_ULT: return ICMP_UGT;
3666  case ICMP_UGE: return ICMP_ULE;
3667  case ICMP_ULE: return ICMP_UGE;
3668 
3669  case FCMP_FALSE: case FCMP_TRUE:
3670  case FCMP_OEQ: case FCMP_ONE:
3671  case FCMP_UEQ: case FCMP_UNE:
3672  case FCMP_ORD: case FCMP_UNO:
3673  return pred;
3674  case FCMP_OGT: return FCMP_OLT;
3675  case FCMP_OLT: return FCMP_OGT;
3676  case FCMP_OGE: return FCMP_OLE;
3677  case FCMP_OLE: return FCMP_OGE;
3678  case FCMP_UGT: return FCMP_ULT;
3679  case FCMP_ULT: return FCMP_UGT;
3680  case FCMP_UGE: return FCMP_ULE;
3681  case FCMP_ULE: return FCMP_UGE;
3682  }
3683 }
3684 
3686  switch (pred) {
3687  case ICMP_SGT: return ICMP_SGE;
3688  case ICMP_SLT: return ICMP_SLE;
3689  case ICMP_UGT: return ICMP_UGE;
3690  case ICMP_ULT: return ICMP_ULE;
3691  case FCMP_OGT: return FCMP_OGE;
3692  case FCMP_OLT: return FCMP_OLE;
3693  case FCMP_UGT: return FCMP_UGE;
3694  case FCMP_ULT: return FCMP_ULE;
3695  default: return pred;
3696  }
3697 }
3698 
3700  assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!");
3701 
3702  switch (pred) {
3703  default:
3704  llvm_unreachable("Unknown predicate!");
3705  case CmpInst::ICMP_ULT:
3706  return CmpInst::ICMP_SLT;
3707  case CmpInst::ICMP_ULE:
3708  return CmpInst::ICMP_SLE;
3709  case CmpInst::ICMP_UGT:
3710  return CmpInst::ICMP_SGT;
3711  case CmpInst::ICMP_UGE:
3712  return CmpInst::ICMP_SGE;
3713  }
3714 }
3715 
3717  switch (predicate) {
3718  default: return false;
3720  case ICmpInst::ICMP_UGE: return true;
3721  }
3722 }
3723 
3724 bool CmpInst::isSigned(Predicate predicate) {
3725  switch (predicate) {
3726  default: return false;
3728  case ICmpInst::ICMP_SGE: return true;
3729  }
3730 }
3731 
3733  switch (predicate) {
3734  default: return false;
3737  case FCmpInst::FCMP_ORD: return true;
3738  }
3739 }
3740 
3742  switch (predicate) {
3743  default: return false;
3746  case FCmpInst::FCMP_UNO: return true;
3747  }
3748 }
3749 
3751  switch(predicate) {
3752  default: return false;
3753  case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3754  case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3755  }
3756 }
3757 
3759  switch(predicate) {
3760  case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3761  case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3762  default: return false;
3763  }
3764 }
3765 
3767  // If the predicates match, then we know the first condition implies the
3768  // second is true.
3769  if (Pred1 == Pred2)
3770  return true;
3771 
3772  switch (Pred1) {
3773  default:
3774  break;
3775  case ICMP_EQ:
3776  // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3777  return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3778  Pred2 == ICMP_SLE;
3779  case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3780  return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3781  case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3782  return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3783  case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3784  return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3785  case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3786  return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3787  }
3788  return false;
3789 }
3790 
3792  return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
3793 }
3794 
3795 //===----------------------------------------------------------------------===//
3796 // SwitchInst Implementation
3797 //===----------------------------------------------------------------------===//
3798 
3799 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3800  assert(Value && Default && NumReserved);
3801  ReservedSpace = NumReserved;
3803  allocHungoffUses(ReservedSpace);
3804 
3805  Op<0>() = Value;
3806  Op<1>() = Default;
3807 }
3808 
3809 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3810 /// switch on and a default destination. The number of additional cases can
3811 /// be specified here to make memory allocation more efficient. This
3812 /// constructor can also autoinsert before another instruction.
3813 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3814  Instruction *InsertBefore)
3815  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3816  nullptr, 0, InsertBefore) {
3817  init(Value, Default, 2+NumCases*2);
3818 }
3819 
3820 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3821 /// switch on and a default destination. The number of additional cases can
3822 /// be specified here to make memory allocation more efficient. This
3823 /// constructor also autoinserts at the end of the specified BasicBlock.
3824 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3825  BasicBlock *InsertAtEnd)
3826  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3827  nullptr, 0, InsertAtEnd) {
3828  init(Value, Default, 2+NumCases*2);
3829 }
3830 
3831 SwitchInst::SwitchInst(const SwitchInst &SI)
3832  : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
3833  init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3835  Use *OL = getOperandList();
3836  const Use *InOL = SI.getOperandList();
3837  for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3838  OL[i] = InOL[i];
3839  OL[i+1] = InOL[i+1];
3840  }
3842 }
3843 
3844 /// addCase - Add an entry to the switch instruction...
3845 ///
3847  unsigned NewCaseIdx = getNumCases();
3848  unsigned OpNo = getNumOperands();
3849  if (OpNo+2 > ReservedSpace)
3850  growOperands(); // Get more space!
3851  // Initialize some new operands.
3852  assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3853  setNumHungOffUseOperands(OpNo+2);
3854  CaseHandle Case(this, NewCaseIdx);
3855  Case.setValue(OnVal);
3856  Case.setSuccessor(Dest);
3857 }
3858 
3859 /// removeCase - This method removes the specified case and its successor
3860 /// from the switch instruction.
3862  unsigned idx = I->getCaseIndex();
3863 
3864  assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3865 
3866  unsigned NumOps = getNumOperands();
3867  Use *OL = getOperandList();
3868 
3869  // Overwrite this case with the end of the list.
3870  if (2 + (idx + 1) * 2 != NumOps) {
3871  OL[2 + idx * 2] = OL[NumOps - 2];
3872  OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3873  }
3874 
3875  // Nuke the last value.
3876  OL[NumOps-2].set(nullptr);
3877  OL[NumOps-2+1].set(nullptr);
3878  setNumHungOffUseOperands(NumOps-2);
3879 
3880  return CaseIt(this, idx);
3881 }
3882 
3883 /// growOperands - grow operands - This grows the operand list in response
3884 /// to a push_back style of operation. This grows the number of ops by 3 times.
3885 ///
3886 void SwitchInst::growOperands() {
3887  unsigned e = getNumOperands();
3888  unsigned NumOps = e*3;
3889 
3890  ReservedSpace = NumOps;
3891  growHungoffUses(ReservedSpace);
3892 }
3893 
3894 MDNode *
3896  if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof))
3897  if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0)))
3898  if (MDName->getString() == "branch_weights")
3899  return ProfileData;
3900  return nullptr;
3901 }
3902 
3904  assert(Changed && "called only if metadata has changed");
3905 
3906  if (!Weights)
3907  return nullptr;
3908 
3909  assert(SI.getNumSuccessors() == Weights->size() &&
3910  "num of prof branch_weights must accord with num of successors");
3911 
3912  bool AllZeroes =
3913  all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
3914 
3915  if (AllZeroes || Weights.getValue().size() < 2)
3916  return nullptr;
3917 
3918  return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
3919 }
3920 
3922  MDNode *ProfileData = getProfBranchWeightsMD(SI);
3923  if (!ProfileData)
3924  return;
3925 
3926  if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
3927  llvm_unreachable("number of prof branch_weights metadata operands does "
3928  "not correspond to number of succesors");
3929  }
3930 
3931  SmallVector<uint32_t, 8> Weights;
3932  for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) {
3933  ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI));
3934  uint32_t CW = C->getValue().getZExtValue();
3935  Weights.push_back(CW);
3936  }
3937  this->Weights = std::move(Weights);
3938 }
3939 
3942  if (Weights) {
3943  assert(SI.getNumSuccessors() == Weights->size() &&
3944  "num of prof branch_weights must accord with num of successors");
3945  Changed = true;
3946  // Copy the last case to the place of the removed one and shrink.
3947  // This is tightly coupled with the way SwitchInst::removeCase() removes
3948  // the cases in SwitchInst::removeCase(CaseIt).
3949  Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
3950  Weights.getValue().pop_back();
3951  }
3952  return SI.removeCase(I);
3953 }
3954 
3956  ConstantInt *OnVal, BasicBlock *Dest,
3958  SI.addCase(OnVal, Dest);
3959 
3960  if (!Weights && W && *W) {
3961  Changed = true;
3962  Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
3963  Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
3964  } else if (Weights) {
3965  Changed = true;
3966  Weights.getValue().push_back(W ? *W : 0);
3967  }
3968  if (Weights)
3969  assert(SI.getNumSuccessors() == Weights->size() &&
3970  "num of prof branch_weights must accord with num of successors");
3971 }
3972 
3975  // Instruction is erased. Mark as unchanged to not touch it in the destructor.
3976  Changed = false;
3977  if (Weights)
3978  Weights->resize(0);
3979  return SI.eraseFromParent();
3980 }
3981 
3984  if (!Weights)
3985  return None;
3986  return Weights.getValue()[idx];
3987 }
3988 
3991  if (!W)
3992  return;
3993 
3994  if (!Weights && *W)
3995  Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
3996 
3997  if (Weights) {
3998  auto &OldW = Weights.getValue()[idx];
3999  if (*W != OldW) {
4000  Changed = true;
4001  OldW = *W;
4002  }
4003  }
4004 }
4005 
4008  unsigned idx) {
4009  if (MDNode *ProfileData = getProfBranchWeightsMD(SI))
4010  if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4011  return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4012  ->getValue()
4013  .getZExtValue();
4014 
4015  return None;
4016 }
4017 
4018 //===----------------------------------------------------------------------===//
4019 // IndirectBrInst Implementation
4020 //===----------------------------------------------------------------------===//
4021 
4022 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4023  assert(Address && Address->getType()->isPointerTy() &&
4024  "Address of indirectbr must be a pointer");
4025  ReservedSpace = 1+NumDests;
4027  allocHungoffUses(ReservedSpace);
4028 
4029  Op<0>() = Address;
4030 }
4031 
4032 
4033 /// growOperands - grow operands - This grows the operand list in response
4034 /// to a push_back style of operation. This grows the number of ops by 2 times.
4035 ///
4036 void IndirectBrInst::growOperands() {
4037  unsigned e = getNumOperands();
4038  unsigned NumOps = e*2;
4039 
4040  ReservedSpace = NumOps;
4041  growHungoffUses(ReservedSpace);
4042 }
4043 
4044 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4045  Instruction *InsertBefore)
4046  : Instruction(Type::getVoidTy(Address->getContext()),
4047  Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4048  init(Address, NumCases);
4049 }
4050 
4051 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4052  BasicBlock *InsertAtEnd)
4053  : Instruction(Type::getVoidTy(Address->getContext()),
4054  Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
4055  init(Address, NumCases);
4056 }
4057 
4058 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4059  : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4060  nullptr, IBI.getNumOperands()) {
4062  Use *OL = getOperandList();
4063  const Use *InOL = IBI.getOperandList();
4064  for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4065  OL[i] = InOL[i];
4067 }
4068 
4069 /// addDestination - Add a destination.
4070 ///
4072  unsigned OpNo = getNumOperands();
4073  if (OpNo+1 > ReservedSpace)
4074  growOperands(); // Get more space!
4075  // Initialize some new operands.
4076  assert(OpNo < ReservedSpace && "Growing didn't work!");
4077  setNumHungOffUseOperands(OpNo+1);
4078  getOperandList()[OpNo] = DestBB;
4079 }
4080 
4081 /// removeDestination - This method removes the specified successor from the
4082 /// indirectbr instruction.
4084  assert(idx < getNumOperands()-1 && "Successor index out of range!");
4085 
4086  unsigned NumOps = getNumOperands();
4087  Use *OL = getOperandList();
4088 
4089  // Replace this value with the last one.
4090  OL[idx+1] = OL[NumOps-1];
4091 
4092  // Nuke the last value.
4093  OL[NumOps-1].set(nullptr);
4094  setNumHungOffUseOperands(NumOps-1);
4095 }
4096 
4097 //===----------------------------------------------------------------------===//
4098 // cloneImpl() implementations
4099 //===----------------------------------------------------------------------===//
4100 
4101 // Define these methods here so vtables don't get emitted into every translation
4102 // unit that uses these classes.
4103 
4105  return new (getNumOperands()) GetElementPtrInst(*this);
4106 }
4107 
4109  return Create(getOpcode(), Op<0>());
4110 }
4111 
4113  return Create(getOpcode(), Op<0>(), Op<1>());
4114 }
4115 
4117  return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4118 }
4119 
4121  return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4122 }
4123 
4125  return new ExtractValueInst(*this);
4126 }
4127 
4129  return new InsertValueInst(*this);
4130 }
4131 
4133  AllocaInst *Result = new AllocaInst(getAllocatedType(),
4134  getType()->getAddressSpace(),
4135  (Value *)getOperand(0), getAlignment());
4136  Result->setUsedWithInAlloca(isUsedWithInAlloca());
4137  Result->setSwiftError(isSwiftError());
4138  return Result;
4139 }
4140 
4142  return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4143  getAlignment(), getOrdering(), getSyncScopeID());
4144 }
4145 
4147  return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
4148  getAlignment(), getOrdering(), getSyncScopeID());
4149 
4150 }
4151 
4153  AtomicCmpXchgInst *Result =
4155  getSuccessOrdering(), getFailureOrdering(),
4156  getSyncScopeID());
4157  Result->setVolatile(isVolatile());
4158  Result->setWeak(isWeak());
4159  return Result;
4160 }
4161 
4163  AtomicRMWInst *Result =
4164  new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4165  getOrdering(), getSyncScopeID());
4166  Result->setVolatile(isVolatile());
4167  return Result;
4168 }
4169 
4171  return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4172 }
4173 
4175  return new TruncInst(getOperand(0), getType());
4176 }
4177 
4179  return new ZExtInst(getOperand(0), getType());
4180 }
4181 
4183  return new SExtInst(getOperand(0), getType());
4184 }
4185 
4187  return new FPTruncInst(getOperand(0), getType());
4188 }
4189 
4191  return new FPExtInst(getOperand(0), getType());
4192 }
4193 
4195  return new UIToFPInst(getOperand(0), getType());
4196 }
4197 
4199  return new SIToFPInst(getOperand(0), getType());
4200 }
4201 
4203  return new FPToUIInst(getOperand(0), getType());
4204 }
4205 
4207  return new FPToSIInst(getOperand(0), getType());
4208 }
4209 
4211  return new PtrToIntInst(getOperand(0), getType());
4212 }
4213 
4215  return new IntToPtrInst(getOperand(0), getType());
4216 }
4217 
4219  return new BitCastInst(getOperand(0), getType());
4220 }
4221 
4223  return new AddrSpaceCastInst(getOperand(0), getType());
4224 }
4225 
4227  if (hasOperandBundles()) {
4228  unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4229  return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4230  }
4231  return new(getNumOperands()) CallInst(*this);
4232 }
4233 
4236 }
4237 
4239  return new VAArgInst(getOperand(0), getType());
4240 }
4241 
4244 }
4245 
4248 }
4249 
4251  return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
4252 }
4253 
4254 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
4255 
4257  return new LandingPadInst(*this);
4258 }
4259 
4261  return new(getNumOperands()) ReturnInst(*this);
4262 }
4263 
4265  return new(getNumOperands()) BranchInst(*this);
4266 }
4267 
4268 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4269 
4271  return new IndirectBrInst(*this);
4272 }
4273 
4275  if (hasOperandBundles()) {
4276  unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4277  return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
4278  }
4279  return new(getNumOperands()) InvokeInst(*this);
4280 }
4281 
4283  if (hasOperandBundles()) {
4284  unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4285  return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
4286  }
4287  return new (getNumOperands()) CallBrInst(*this);
4288 }
4289 
4290 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4291 
4293  return new (getNumOperands()) CleanupReturnInst(*this);
4294 }
4295 
4297  return new (getNumOperands()) CatchReturnInst(*this);
4298 }
4299 
4301  return new CatchSwitchInst(*this);
4302 }
4303 
4305  return new (getNumOperands()) FuncletPadInst(*this);
4306 }
4307 
4309  LLVMContext &Context = getContext();
4310  return new UnreachableInst(Context);
4311 }
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
uint64_t CallInst * C
Return a value (possibly void), from a function.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:873
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:112
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:722
FuncletPadInst * cloneImpl() const
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
StoreInst * cloneImpl() const
This instruction extracts a struct member or array element value from an aggregate value...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1571
*p = old <signed v ? old : v
Definition: Instructions.h:731
static bool IsConstantOne(Value *val)
IsConstantOne - Return true only if val is constant int 1.
LLVMContext & Context
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Atomic ordering constants.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
BinaryOps getOpcode() const
Definition: InstrTypes.h:402
unsigned getSubclassDataFromInstruction() const
Definition: Instruction.h:769
void swapSuccessors()
Swap the successors of this branch instruction.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:140
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
IndirectBrInst * cloneImpl() const
ConstantAsMetadata * createConstant(Constant *C)
Return the given constant as metadata.
Definition: MDBuilder.cpp:24
unsigned encode(MaybeAlign A)
Returns a representation of the alignment that encodes undefined as 0.
Definition: Alignment.h:236
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
iterator begin() const
Definition: ArrayRef.h:136
An instruction for ordering other memory operations.
Definition: Instructions.h:462
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
An instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:538
Function * getCaller()
Helper to get the caller (the parent function).
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
This class represents zero extension of integer types.
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="", Instruction *InsertBefore=nullptr)
ShuffleVectorInst * cloneImpl() const
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements...
Value * getCondition() const
unsigned less or equal
Definition: InstrTypes.h:758
unsigned less than
Definition: InstrTypes.h:757
*p = old <unsigned v ? old : v
Definition: Instructions.h:735
float convertToFloat() const
Definition: APFloat.h:1113
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
Definition: Instructions.h:573
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:738
FenceInst * cloneImpl() const
void removeHandler(handler_iterator HI)
*p = old >unsigned v ? old : v
Definition: Instructions.h:733
This instruction constructs a fixed permutation of two input vectors.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:748
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
Metadata node.
Definition: Metadata.h:863
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1212
CatchSwitchInst * cloneImpl() const
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1068
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, Instruction *InsertBefore=nullptr)
This class represents a sign extension of integer types.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:621
An instruction for reading from memory.
Definition: Instructions.h:169
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:181
friend class Instruction
Definition: Instructions.h:64
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:701
static bool isReverseMask(ArrayRef< int > Mask)
Return true if this shuffle mask swaps the order of elements from exactly one source vector...
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
InsertElementInst * cloneImpl() const
static Instruction * CreateFree(Value *Source, Instruction *InsertBefore)
Generate the IR for a call to the builtin free function.
#define op(i)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:403
CaseWeightOpt getSuccessorWeight(unsigned idx)
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void setAlignment(MaybeAlign Align)
*p = old >signed v ? old : v
Definition: Instructions.h:729
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock *> IndirectDests, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:32
op_iterator op_begin()
Definition: User.h:229
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
bool isTailCall() const
Tests if this call site is marked as a tail call.
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:790
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op...
block_iterator block_end()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Definition: InstrTypes.h:921
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", Instruction *InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool indexValid(const Value *V) const
Definition: Type.cpp:556
bool swapOperands()
Exchange the two operands to this instruction.
AllocaInst * cloneImpl() const
SwitchInst * cloneImpl() const
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:743
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:275
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
The address of a basic block.
Definition: Constants.h:839
Constant * getMask() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:742
static uint32_t getAlignment(const MCSectionCOFF &Sec)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:233
This class represents a conversion between pointers from one address space to another.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1644
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy)
This method can be used to determine if a cast from S to DstTy using Opcode op is valid or not...
bool isSigned() const
Definition: InstrTypes.h:902
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
Used to keep track of an operand bundle.
Definition: InstrTypes.h:1947
This class represents the LLVM &#39;select&#39; instruction.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Definition: InstrTypes.h:831
Type * getPointerElementType() const
Definition: Type.h:380
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
static void zero(T &Obj)
Definition: ELFEmitter.cpp:197
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:439
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:671
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
CatchReturnInst * cloneImpl() const
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:162
bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice, which is based on C++&#39;s definition.
TailCallKind getTailCallKind() const
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
Class to represent struct types.
Definition: DerivedTypes.h:238
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:244
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2040
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:58
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:492
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
bool isUnsigned() const
Definition: InstrTypes.h:908
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:739
This file contains the simple types necessary to represent the attributes associated with functions a...
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don&#39;t have a fixed number of extra operands...
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:285
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
Definition: DerivedTypes.h:398
static const unsigned MaximumAlignment
Definition: Value.h:655
block_iterator block_begin()
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This class represents a cast from a pointer to an integer.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction...
static StringRef getOperationName(BinOp Op)
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition: Constants.h:200
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:802
Class to represent function types.
Definition: DerivedTypes.h:108
static bool isIdentityMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:692
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:246
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value. ...
Definition: Type.h:244
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:713
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:764
unsigned getNumSuccessors() const
Class to represent array types.
Definition: DerivedTypes.h:408
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:126
This instruction compares its operands according to the predicate given to the constructor.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
bool isVarArg() const
Definition: DerivedTypes.h:128
UnreachableInst * cloneImpl() const
LandingPadInst * cloneImpl() const
This class represents a no-op cast from one type to another.
static Instruction * createFree(Value *Source, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertBefore, BasicBlock *InsertAtEnd)
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
An instruction for storing to memory.
Definition: Instructions.h:325
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:203
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
This class represents a cast from floating point to signed integer.
ExtractElementInst * cloneImpl() const
Value * getParentPad() const
LoadInst * cloneImpl() const
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
static MDNode * getProfBranchWeightsMD(const SwitchInst &SI)
This class represents a truncation of integer types.
std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type cast(const Y &Val)
Definition: Casting.h:249
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:575
TruncInst * cloneImpl() const
Clone an identical TruncInst.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight...
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:359
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:307
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Type * getIndexedTypeInternal(Type *Agg, ArrayRef< IndexTy > IdxList)
getIndexedType - Returns the type of the element that would be accessed with a gep instruction with t...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:883