LLVM  10.0.0svn
InstCombineCalls.cpp
Go to the documentation of this file.
1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/Twine.h"
26 #include "llvm/Analysis/Loads.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/Statepoint.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/IR/ValueHandle.h"
52 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/KnownBits.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <cstring>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 using namespace PatternMatch;
72 
73 #define DEBUG_TYPE "instcombine"
74 
75 STATISTIC(NumSimplified, "Number of library calls simplified");
76 
78  "instcombine-guard-widening-window",
79  cl::init(3),
80  cl::desc("How wide an instruction window to bypass looking for "
81  "another guard"));
82 
83 /// Return the specified type promoted as it would be to pass though a va_arg
84 /// area.
85 static Type *getPromotedType(Type *Ty) {
86  if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
87  if (ITy->getBitWidth() < 32)
88  return Type::getInt32Ty(Ty->getContext());
89  }
90  return Ty;
91 }
92 
93 /// Return a constant boolean vector that has true elements in all positions
94 /// where the input constant data vector has an element with the sign bit set.
97  IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
98  for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
99  Constant *Elt = V->getElementAsConstant(I);
100  assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
101  "Unexpected constant data vector element type");
102  bool Sign = V->getElementType()->isIntegerTy()
103  ? cast<ConstantInt>(Elt)->isNegative()
104  : cast<ConstantFP>(Elt)->isNegative();
105  BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
106  }
107  return ConstantVector::get(BoolVec);
108 }
109 
110 Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
111  unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
112  unsigned CopyDstAlign = MI->getDestAlignment();
113  if (CopyDstAlign < DstAlign){
114  MI->setDestAlignment(DstAlign);
115  return MI;
116  }
117 
118  unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
119  unsigned CopySrcAlign = MI->getSourceAlignment();
120  if (CopySrcAlign < SrcAlign) {
121  MI->setSourceAlignment(SrcAlign);
122  return MI;
123  }
124 
125  // If we have a store to a location which is known constant, we can conclude
126  // that the store must be storing the constant value (else the memory
127  // wouldn't be constant), and this must be a noop.
128  if (AA->pointsToConstantMemory(MI->getDest())) {
129  // Set the size of the copy to 0, it will be deleted on the next iteration.
130  MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
131  return MI;
132  }
133 
134  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
135  // load/store.
136  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
137  if (!MemOpLength) return nullptr;
138 
139  // Source and destination pointer types are always "i8*" for intrinsic. See
140  // if the size is something we can handle with a single primitive load/store.
141  // A single load+store correctly handles overlapping memory in the memmove
142  // case.
143  uint64_t Size = MemOpLength->getLimitedValue();
144  assert(Size && "0-sized memory transferring should be removed already.");
145 
146  if (Size > 8 || (Size&(Size-1)))
147  return nullptr; // If not 1/2/4/8 bytes, exit.
148 
149  // If it is an atomic and alignment is less than the size then we will
150  // introduce the unaligned memory access which will be later transformed
151  // into libcall in CodeGen. This is not evident performance gain so disable
152  // it now.
153  if (isa<AtomicMemTransferInst>(MI))
154  if (CopyDstAlign < Size || CopySrcAlign < Size)
155  return nullptr;
156 
157  // Use an integer load+store unless we can find something better.
158  unsigned SrcAddrSp =
159  cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
160  unsigned DstAddrSp =
161  cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
162 
163  IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
164  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
165  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
166 
167  // If the memcpy has metadata describing the members, see if we can get the
168  // TBAA tag describing our copy.
169  MDNode *CopyMD = nullptr;
170  if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
171  CopyMD = M;
172  } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
173  if (M->getNumOperands() == 3 && M->getOperand(0) &&
174  mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
175  mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
176  M->getOperand(1) &&
177  mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
178  mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
179  Size &&
180  M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
181  CopyMD = cast<MDNode>(M->getOperand(2));
182  }
183 
184  Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
185  Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
186  LoadInst *L = Builder.CreateLoad(IntType, Src);
187  // Alignment from the mem intrinsic will be better, so use it.
188  L->setAlignment(CopySrcAlign);
189  if (CopyMD)
190  L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
191  MDNode *LoopMemParallelMD =
192  MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
193  if (LoopMemParallelMD)
194  L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
195  MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
196  if (AccessGroupMD)
197  L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
198 
199  StoreInst *S = Builder.CreateStore(L, Dest);
200  // Alignment from the mem intrinsic will be better, so use it.
201  S->setAlignment(CopyDstAlign);
202  if (CopyMD)
203  S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
204  if (LoopMemParallelMD)
205  S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
206  if (AccessGroupMD)
207  S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
208 
209  if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
210  // non-atomics can be volatile
211  L->setVolatile(MT->isVolatile());
212  S->setVolatile(MT->isVolatile());
213  }
214  if (isa<AtomicMemTransferInst>(MI)) {
215  // atomics have to be unordered
218  }
219 
220  // Set the size of the copy to 0, it will be deleted on the next iteration.
221  MI->setLength(Constant::getNullValue(MemOpLength->getType()));
222  return MI;
223 }
224 
225 Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
226  unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
227  if (MI->getDestAlignment() < Alignment) {
228  MI->setDestAlignment(Alignment);
229  return MI;
230  }
231 
232  // If we have a store to a location which is known constant, we can conclude
233  // that the store must be storing the constant value (else the memory
234  // wouldn't be constant), and this must be a noop.
235  if (AA->pointsToConstantMemory(MI->getDest())) {
236  // Set the size of the copy to 0, it will be deleted on the next iteration.
237  MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
238  return MI;
239  }
240 
241  // Extract the length and alignment and fill if they are constant.
242  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
243  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
244  if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
245  return nullptr;
246  uint64_t Len = LenC->getLimitedValue();
247  Alignment = MI->getDestAlignment();
248  assert(Len && "0-sized memory setting should be removed already.");
249 
250  // Alignment 0 is identity for alignment 1 for memset, but not store.
251  if (Alignment == 0)
252  Alignment = 1;
253 
254  // If it is an atomic and alignment is less than the size then we will
255  // introduce the unaligned memory access which will be later transformed
256  // into libcall in CodeGen. This is not evident performance gain so disable
257  // it now.
258  if (isa<AtomicMemSetInst>(MI))
259  if (Alignment < Len)
260  return nullptr;
261 
262  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
263  if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
264  Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
265 
266  Value *Dest = MI->getDest();
267  unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
268  Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
269  Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
270 
271  // Extract the fill value and store.
272  uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
273  StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
274  MI->isVolatile());
275  S->setAlignment(Alignment);
276  if (isa<AtomicMemSetInst>(MI))
278 
279  // Set the size of the copy to 0, it will be deleted on the next iteration.
280  MI->setLength(Constant::getNullValue(LenC->getType()));
281  return MI;
282  }
283 
284  return nullptr;
285 }
286 
288  InstCombiner::BuilderTy &Builder) {
289  bool LogicalShift = false;
290  bool ShiftLeft = false;
291 
292  switch (II.getIntrinsicID()) {
293  default: llvm_unreachable("Unexpected intrinsic!");
294  case Intrinsic::x86_sse2_psra_d:
295  case Intrinsic::x86_sse2_psra_w:
296  case Intrinsic::x86_sse2_psrai_d:
297  case Intrinsic::x86_sse2_psrai_w:
298  case Intrinsic::x86_avx2_psra_d:
299  case Intrinsic::x86_avx2_psra_w:
300  case Intrinsic::x86_avx2_psrai_d:
301  case Intrinsic::x86_avx2_psrai_w:
302  case Intrinsic::x86_avx512_psra_q_128:
303  case Intrinsic::x86_avx512_psrai_q_128:
304  case Intrinsic::x86_avx512_psra_q_256:
305  case Intrinsic::x86_avx512_psrai_q_256:
306  case Intrinsic::x86_avx512_psra_d_512:
307  case Intrinsic::x86_avx512_psra_q_512:
308  case Intrinsic::x86_avx512_psra_w_512:
309  case Intrinsic::x86_avx512_psrai_d_512:
310  case Intrinsic::x86_avx512_psrai_q_512:
311  case Intrinsic::x86_avx512_psrai_w_512:
312  LogicalShift = false; ShiftLeft = false;
313  break;
314  case Intrinsic::x86_sse2_psrl_d:
315  case Intrinsic::x86_sse2_psrl_q:
316  case Intrinsic::x86_sse2_psrl_w:
317  case Intrinsic::x86_sse2_psrli_d:
318  case Intrinsic::x86_sse2_psrli_q:
319  case Intrinsic::x86_sse2_psrli_w:
320  case Intrinsic::x86_avx2_psrl_d:
321  case Intrinsic::x86_avx2_psrl_q:
322  case Intrinsic::x86_avx2_psrl_w:
323  case Intrinsic::x86_avx2_psrli_d:
324  case Intrinsic::x86_avx2_psrli_q:
325  case Intrinsic::x86_avx2_psrli_w:
326  case Intrinsic::x86_avx512_psrl_d_512:
327  case Intrinsic::x86_avx512_psrl_q_512:
328  case Intrinsic::x86_avx512_psrl_w_512:
329  case Intrinsic::x86_avx512_psrli_d_512:
330  case Intrinsic::x86_avx512_psrli_q_512:
331  case Intrinsic::x86_avx512_psrli_w_512:
332  LogicalShift = true; ShiftLeft = false;
333  break;
334  case Intrinsic::x86_sse2_psll_d:
335  case Intrinsic::x86_sse2_psll_q:
336  case Intrinsic::x86_sse2_psll_w:
337  case Intrinsic::x86_sse2_pslli_d:
338  case Intrinsic::x86_sse2_pslli_q:
339  case Intrinsic::x86_sse2_pslli_w:
340  case Intrinsic::x86_avx2_psll_d:
341  case Intrinsic::x86_avx2_psll_q:
342  case Intrinsic::x86_avx2_psll_w:
343  case Intrinsic::x86_avx2_pslli_d:
344  case Intrinsic::x86_avx2_pslli_q:
345  case Intrinsic::x86_avx2_pslli_w:
346  case Intrinsic::x86_avx512_psll_d_512:
347  case Intrinsic::x86_avx512_psll_q_512:
348  case Intrinsic::x86_avx512_psll_w_512:
349  case Intrinsic::x86_avx512_pslli_d_512:
350  case Intrinsic::x86_avx512_pslli_q_512:
351  case Intrinsic::x86_avx512_pslli_w_512:
352  LogicalShift = true; ShiftLeft = true;
353  break;
354  }
355  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
356 
357  // Simplify if count is constant.
358  auto Arg1 = II.getArgOperand(1);
359  auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
360  auto CDV = dyn_cast<ConstantDataVector>(Arg1);
361  auto CInt = dyn_cast<ConstantInt>(Arg1);
362  if (!CAZ && !CDV && !CInt)
363  return nullptr;
364 
365  APInt Count(64, 0);
366  if (CDV) {
367  // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
368  // operand to compute the shift amount.
369  auto VT = cast<VectorType>(CDV->getType());
370  unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
371  assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
372  unsigned NumSubElts = 64 / BitWidth;
373 
374  // Concatenate the sub-elements to create the 64-bit value.
375  for (unsigned i = 0; i != NumSubElts; ++i) {
376  unsigned SubEltIdx = (NumSubElts - 1) - i;
377  auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
378  Count <<= BitWidth;
379  Count |= SubElt->getValue().zextOrTrunc(64);
380  }
381  }
382  else if (CInt)
383  Count = CInt->getValue();
384 
385  auto Vec = II.getArgOperand(0);
386  auto VT = cast<VectorType>(Vec->getType());
387  auto SVT = VT->getElementType();
388  unsigned VWidth = VT->getNumElements();
389  unsigned BitWidth = SVT->getPrimitiveSizeInBits();
390 
391  // If shift-by-zero then just return the original value.
392  if (Count.isNullValue())
393  return Vec;
394 
395  // Handle cases when Shift >= BitWidth.
396  if (Count.uge(BitWidth)) {
397  // If LogicalShift - just return zero.
398  if (LogicalShift)
399  return ConstantAggregateZero::get(VT);
400 
401  // If ArithmeticShift - clamp Shift to (BitWidth - 1).
402  Count = APInt(64, BitWidth - 1);
403  }
404 
405  // Get a constant vector of the same type as the first operand.
406  auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
407  auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
408 
409  if (ShiftLeft)
410  return Builder.CreateShl(Vec, ShiftVec);
411 
412  if (LogicalShift)
413  return Builder.CreateLShr(Vec, ShiftVec);
414 
415  return Builder.CreateAShr(Vec, ShiftVec);
416 }
417 
418 // Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
419 // Unlike the generic IR shifts, the intrinsics have defined behaviour for out
420 // of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
422  InstCombiner::BuilderTy &Builder) {
423  bool LogicalShift = false;
424  bool ShiftLeft = false;
425 
426  switch (II.getIntrinsicID()) {
427  default: llvm_unreachable("Unexpected intrinsic!");
428  case Intrinsic::x86_avx2_psrav_d:
429  case Intrinsic::x86_avx2_psrav_d_256:
430  case Intrinsic::x86_avx512_psrav_q_128:
431  case Intrinsic::x86_avx512_psrav_q_256:
432  case Intrinsic::x86_avx512_psrav_d_512:
433  case Intrinsic::x86_avx512_psrav_q_512:
434  case Intrinsic::x86_avx512_psrav_w_128:
435  case Intrinsic::x86_avx512_psrav_w_256:
436  case Intrinsic::x86_avx512_psrav_w_512:
437  LogicalShift = false;
438  ShiftLeft = false;
439  break;
440  case Intrinsic::x86_avx2_psrlv_d:
441  case Intrinsic::x86_avx2_psrlv_d_256:
442  case Intrinsic::x86_avx2_psrlv_q:
443  case Intrinsic::x86_avx2_psrlv_q_256:
444  case Intrinsic::x86_avx512_psrlv_d_512:
445  case Intrinsic::x86_avx512_psrlv_q_512:
446  case Intrinsic::x86_avx512_psrlv_w_128:
447  case Intrinsic::x86_avx512_psrlv_w_256:
448  case Intrinsic::x86_avx512_psrlv_w_512:
449  LogicalShift = true;
450  ShiftLeft = false;
451  break;
452  case Intrinsic::x86_avx2_psllv_d:
453  case Intrinsic::x86_avx2_psllv_d_256:
454  case Intrinsic::x86_avx2_psllv_q:
455  case Intrinsic::x86_avx2_psllv_q_256:
456  case Intrinsic::x86_avx512_psllv_d_512:
457  case Intrinsic::x86_avx512_psllv_q_512:
458  case Intrinsic::x86_avx512_psllv_w_128:
459  case Intrinsic::x86_avx512_psllv_w_256:
460  case Intrinsic::x86_avx512_psllv_w_512:
461  LogicalShift = true;
462  ShiftLeft = true;
463  break;
464  }
465  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
466 
467  // Simplify if all shift amounts are constant/undef.
468  auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
469  if (!CShift)
470  return nullptr;
471 
472  auto Vec = II.getArgOperand(0);
473  auto VT = cast<VectorType>(II.getType());
474  auto SVT = VT->getVectorElementType();
475  int NumElts = VT->getNumElements();
476  int BitWidth = SVT->getIntegerBitWidth();
477 
478  // Collect each element's shift amount.
479  // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
480  bool AnyOutOfRange = false;
481  SmallVector<int, 8> ShiftAmts;
482  for (int I = 0; I < NumElts; ++I) {
483  auto *CElt = CShift->getAggregateElement(I);
484  if (CElt && isa<UndefValue>(CElt)) {
485  ShiftAmts.push_back(-1);
486  continue;
487  }
488 
489  auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
490  if (!COp)
491  return nullptr;
492 
493  // Handle out of range shifts.
494  // If LogicalShift - set to BitWidth (special case).
495  // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
496  APInt ShiftVal = COp->getValue();
497  if (ShiftVal.uge(BitWidth)) {
498  AnyOutOfRange = LogicalShift;
499  ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
500  continue;
501  }
502 
503  ShiftAmts.push_back((int)ShiftVal.getZExtValue());
504  }
505 
506  // If all elements out of range or UNDEF, return vector of zeros/undefs.
507  // ArithmeticShift should only hit this if they are all UNDEF.
508  auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
509  if (llvm::all_of(ShiftAmts, OutOfRange)) {
510  SmallVector<Constant *, 8> ConstantVec;
511  for (int Idx : ShiftAmts) {
512  if (Idx < 0) {
513  ConstantVec.push_back(UndefValue::get(SVT));
514  } else {
515  assert(LogicalShift && "Logical shift expected");
516  ConstantVec.push_back(ConstantInt::getNullValue(SVT));
517  }
518  }
519  return ConstantVector::get(ConstantVec);
520  }
521 
522  // We can't handle only some out of range values with generic logical shifts.
523  if (AnyOutOfRange)
524  return nullptr;
525 
526  // Build the shift amount constant vector.
527  SmallVector<Constant *, 8> ShiftVecAmts;
528  for (int Idx : ShiftAmts) {
529  if (Idx < 0)
530  ShiftVecAmts.push_back(UndefValue::get(SVT));
531  else
532  ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
533  }
534  auto ShiftVec = ConstantVector::get(ShiftVecAmts);
535 
536  if (ShiftLeft)
537  return Builder.CreateShl(Vec, ShiftVec);
538 
539  if (LogicalShift)
540  return Builder.CreateLShr(Vec, ShiftVec);
541 
542  return Builder.CreateAShr(Vec, ShiftVec);
543 }
544 
546  InstCombiner::BuilderTy &Builder, bool IsSigned) {
547  Value *Arg0 = II.getArgOperand(0);
548  Value *Arg1 = II.getArgOperand(1);
549  Type *ResTy = II.getType();
550 
551  // Fast all undef handling.
552  if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
553  return UndefValue::get(ResTy);
554 
555  Type *ArgTy = Arg0->getType();
556  unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
557  unsigned NumSrcElts = ArgTy->getVectorNumElements();
558  assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) &&
559  "Unexpected packing types");
560 
561  unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
562  unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
563  unsigned SrcScalarSizeInBits = ArgTy->getScalarSizeInBits();
564  assert(SrcScalarSizeInBits == (2 * DstScalarSizeInBits) &&
565  "Unexpected packing types");
566 
567  // Constant folding.
568  if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
569  return nullptr;
570 
571  // Clamp Values - signed/unsigned both use signed clamp values, but they
572  // differ on the min/max values.
573  APInt MinValue, MaxValue;
574  if (IsSigned) {
575  // PACKSS: Truncate signed value with signed saturation.
576  // Source values less than dst minint are saturated to minint.
577  // Source values greater than dst maxint are saturated to maxint.
578  MinValue =
579  APInt::getSignedMinValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
580  MaxValue =
581  APInt::getSignedMaxValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
582  } else {
583  // PACKUS: Truncate signed value with unsigned saturation.
584  // Source values less than zero are saturated to zero.
585  // Source values greater than dst maxuint are saturated to maxuint.
586  MinValue = APInt::getNullValue(SrcScalarSizeInBits);
587  MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
588  }
589 
590  auto *MinC = Constant::getIntegerValue(ArgTy, MinValue);
591  auto *MaxC = Constant::getIntegerValue(ArgTy, MaxValue);
592  Arg0 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg0, MinC), MinC, Arg0);
593  Arg1 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg1, MinC), MinC, Arg1);
594  Arg0 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg0, MaxC), MaxC, Arg0);
595  Arg1 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg1, MaxC), MaxC, Arg1);
596 
597  // Shuffle clamped args together at the lane level.
598  SmallVector<unsigned, 32> PackMask;
599  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
600  for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
601  PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane));
602  for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
603  PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
604  }
605  auto *Shuffle = Builder.CreateShuffleVector(Arg0, Arg1, PackMask);
606 
607  // Truncate to dst size.
608  return Builder.CreateTrunc(Shuffle, ResTy);
609 }
610 
612  InstCombiner::BuilderTy &Builder) {
613  Value *Arg = II.getArgOperand(0);
614  Type *ResTy = II.getType();
615  Type *ArgTy = Arg->getType();
616 
617  // movmsk(undef) -> zero as we must ensure the upper bits are zero.
618  if (isa<UndefValue>(Arg))
619  return Constant::getNullValue(ResTy);
620 
621  // We can't easily peek through x86_mmx types.
622  if (!ArgTy->isVectorTy())
623  return nullptr;
624 
625  // Expand MOVMSK to compare/bitcast/zext:
626  // e.g. PMOVMSKB(v16i8 x):
627  // %cmp = icmp slt <16 x i8> %x, zeroinitializer
628  // %int = bitcast <16 x i1> %cmp to i16
629  // %res = zext i16 %int to i32
630  unsigned NumElts = ArgTy->getVectorNumElements();
631  Type *IntegerVecTy = VectorType::getInteger(cast<VectorType>(ArgTy));
632  Type *IntegerTy = Builder.getIntNTy(NumElts);
633 
634  Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy);
635  Res = Builder.CreateICmpSLT(Res, Constant::getNullValue(IntegerVecTy));
636  Res = Builder.CreateBitCast(Res, IntegerTy);
637  Res = Builder.CreateZExtOrTrunc(Res, ResTy);
638  return Res;
639 }
640 
642  InstCombiner::BuilderTy &Builder) {
643  Value *CarryIn = II.getArgOperand(0);
644  Value *Op1 = II.getArgOperand(1);
645  Value *Op2 = II.getArgOperand(2);
646  Type *RetTy = II.getType();
647  Type *OpTy = Op1->getType();
648  assert(RetTy->getStructElementType(0)->isIntegerTy(8) &&
649  RetTy->getStructElementType(1) == OpTy && OpTy == Op2->getType() &&
650  "Unexpected types for x86 addcarry");
651 
652  // If carry-in is zero, this is just an unsigned add with overflow.
653  if (match(CarryIn, m_ZeroInt())) {
654  Value *UAdd = Builder.CreateIntrinsic(Intrinsic::uadd_with_overflow, OpTy,
655  { Op1, Op2 });
656  // The types have to be adjusted to match the x86 call types.
657  Value *UAddResult = Builder.CreateExtractValue(UAdd, 0);
658  Value *UAddOV = Builder.CreateZExt(Builder.CreateExtractValue(UAdd, 1),
659  Builder.getInt8Ty());
660  Value *Res = UndefValue::get(RetTy);
661  Res = Builder.CreateInsertValue(Res, UAddOV, 0);
662  return Builder.CreateInsertValue(Res, UAddResult, 1);
663  }
664 
665  return nullptr;
666 }
667 
669  InstCombiner::BuilderTy &Builder) {
670  auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
671  if (!CInt)
672  return nullptr;
673 
674  VectorType *VecTy = cast<VectorType>(II.getType());
675  assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
676 
677  // The immediate permute control byte looks like this:
678  // [3:0] - zero mask for each 32-bit lane
679  // [5:4] - select one 32-bit destination lane
680  // [7:6] - select one 32-bit source lane
681 
682  uint8_t Imm = CInt->getZExtValue();
683  uint8_t ZMask = Imm & 0xf;
684  uint8_t DestLane = (Imm >> 4) & 0x3;
685  uint8_t SourceLane = (Imm >> 6) & 0x3;
686 
688 
689  // If all zero mask bits are set, this was just a weird way to
690  // generate a zero vector.
691  if (ZMask == 0xf)
692  return ZeroVector;
693 
694  // Initialize by passing all of the first source bits through.
695  uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
696 
697  // We may replace the second operand with the zero vector.
698  Value *V1 = II.getArgOperand(1);
699 
700  if (ZMask) {
701  // If the zero mask is being used with a single input or the zero mask
702  // overrides the destination lane, this is a shuffle with the zero vector.
703  if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
704  (ZMask & (1 << DestLane))) {
705  V1 = ZeroVector;
706  // We may still move 32-bits of the first source vector from one lane
707  // to another.
708  ShuffleMask[DestLane] = SourceLane;
709  // The zero mask may override the previous insert operation.
710  for (unsigned i = 0; i < 4; ++i)
711  if ((ZMask >> i) & 0x1)
712  ShuffleMask[i] = i + 4;
713  } else {
714  // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
715  return nullptr;
716  }
717  } else {
718  // Replace the selected destination lane with the selected source lane.
719  ShuffleMask[DestLane] = SourceLane + 4;
720  }
721 
722  return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
723 }
724 
725 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
726 /// or conversion to a shuffle vector.
728  ConstantInt *CILength, ConstantInt *CIIndex,
729  InstCombiner::BuilderTy &Builder) {
730  auto LowConstantHighUndef = [&](uint64_t Val) {
731  Type *IntTy64 = Type::getInt64Ty(II.getContext());
732  Constant *Args[] = {ConstantInt::get(IntTy64, Val),
733  UndefValue::get(IntTy64)};
734  return ConstantVector::get(Args);
735  };
736 
737  // See if we're dealing with constant values.
738  Constant *C0 = dyn_cast<Constant>(Op0);
739  ConstantInt *CI0 =
740  C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
741  : nullptr;
742 
743  // Attempt to constant fold.
744  if (CILength && CIIndex) {
745  // From AMD documentation: "The bit index and field length are each six
746  // bits in length other bits of the field are ignored."
747  APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
748  APInt APLength = CILength->getValue().zextOrTrunc(6);
749 
750  unsigned Index = APIndex.getZExtValue();
751 
752  // From AMD documentation: "a value of zero in the field length is
753  // defined as length of 64".
754  unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
755 
756  // From AMD documentation: "If the sum of the bit index + length field
757  // is greater than 64, the results are undefined".
758  unsigned End = Index + Length;
759 
760  // Note that both field index and field length are 8-bit quantities.
761  // Since variables 'Index' and 'Length' are unsigned values
762  // obtained from zero-extending field index and field length
763  // respectively, their sum should never wrap around.
764  if (End > 64)
765  return UndefValue::get(II.getType());
766 
767  // If we are inserting whole bytes, we can convert this to a shuffle.
768  // Lowering can recognize EXTRQI shuffle masks.
769  if ((Length % 8) == 0 && (Index % 8) == 0) {
770  // Convert bit indices to byte indices.
771  Length /= 8;
772  Index /= 8;
773 
774  Type *IntTy8 = Type::getInt8Ty(II.getContext());
775  Type *IntTy32 = Type::getInt32Ty(II.getContext());
776  VectorType *ShufTy = VectorType::get(IntTy8, 16);
777 
778  SmallVector<Constant *, 16> ShuffleMask;
779  for (int i = 0; i != (int)Length; ++i)
780  ShuffleMask.push_back(
781  Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
782  for (int i = Length; i != 8; ++i)
783  ShuffleMask.push_back(
784  Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
785  for (int i = 8; i != 16; ++i)
786  ShuffleMask.push_back(UndefValue::get(IntTy32));
787 
788  Value *SV = Builder.CreateShuffleVector(
789  Builder.CreateBitCast(Op0, ShufTy),
790  ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
791  return Builder.CreateBitCast(SV, II.getType());
792  }
793 
794  // Constant Fold - shift Index'th bit to lowest position and mask off
795  // Length bits.
796  if (CI0) {
797  APInt Elt = CI0->getValue();
798  Elt.lshrInPlace(Index);
799  Elt = Elt.zextOrTrunc(Length);
800  return LowConstantHighUndef(Elt.getZExtValue());
801  }
802 
803  // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
804  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
805  Value *Args[] = {Op0, CILength, CIIndex};
806  Module *M = II.getModule();
807  Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
808  return Builder.CreateCall(F, Args);
809  }
810  }
811 
812  // Constant Fold - extraction from zero is always {zero, undef}.
813  if (CI0 && CI0->isZero())
814  return LowConstantHighUndef(0);
815 
816  return nullptr;
817 }
818 
819 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
820 /// folding or conversion to a shuffle vector.
822  APInt APLength, APInt APIndex,
823  InstCombiner::BuilderTy &Builder) {
824  // From AMD documentation: "The bit index and field length are each six bits
825  // in length other bits of the field are ignored."
826  APIndex = APIndex.zextOrTrunc(6);
827  APLength = APLength.zextOrTrunc(6);
828 
829  // Attempt to constant fold.
830  unsigned Index = APIndex.getZExtValue();
831 
832  // From AMD documentation: "a value of zero in the field length is
833  // defined as length of 64".
834  unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
835 
836  // From AMD documentation: "If the sum of the bit index + length field
837  // is greater than 64, the results are undefined".
838  unsigned End = Index + Length;
839 
840  // Note that both field index and field length are 8-bit quantities.
841  // Since variables 'Index' and 'Length' are unsigned values
842  // obtained from zero-extending field index and field length
843  // respectively, their sum should never wrap around.
844  if (End > 64)
845  return UndefValue::get(II.getType());
846 
847  // If we are inserting whole bytes, we can convert this to a shuffle.
848  // Lowering can recognize INSERTQI shuffle masks.
849  if ((Length % 8) == 0 && (Index % 8) == 0) {
850  // Convert bit indices to byte indices.
851  Length /= 8;
852  Index /= 8;
853 
854  Type *IntTy8 = Type::getInt8Ty(II.getContext());
855  Type *IntTy32 = Type::getInt32Ty(II.getContext());
856  VectorType *ShufTy = VectorType::get(IntTy8, 16);
857 
858  SmallVector<Constant *, 16> ShuffleMask;
859  for (int i = 0; i != (int)Index; ++i)
860  ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
861  for (int i = 0; i != (int)Length; ++i)
862  ShuffleMask.push_back(
863  Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
864  for (int i = Index + Length; i != 8; ++i)
865  ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
866  for (int i = 8; i != 16; ++i)
867  ShuffleMask.push_back(UndefValue::get(IntTy32));
868 
869  Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
870  Builder.CreateBitCast(Op1, ShufTy),
871  ConstantVector::get(ShuffleMask));
872  return Builder.CreateBitCast(SV, II.getType());
873  }
874 
875  // See if we're dealing with constant values.
876  Constant *C0 = dyn_cast<Constant>(Op0);
877  Constant *C1 = dyn_cast<Constant>(Op1);
878  ConstantInt *CI00 =
879  C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
880  : nullptr;
881  ConstantInt *CI10 =
882  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
883  : nullptr;
884 
885  // Constant Fold - insert bottom Length bits starting at the Index'th bit.
886  if (CI00 && CI10) {
887  APInt V00 = CI00->getValue();
888  APInt V10 = CI10->getValue();
889  APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
890  V00 = V00 & ~Mask;
891  V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
892  APInt Val = V00 | V10;
893  Type *IntTy64 = Type::getInt64Ty(II.getContext());
894  Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
895  UndefValue::get(IntTy64)};
896  return ConstantVector::get(Args);
897  }
898 
899  // If we were an INSERTQ call, we'll save demanded elements if we convert to
900  // INSERTQI.
901  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
902  Type *IntTy8 = Type::getInt8Ty(II.getContext());
903  Constant *CILength = ConstantInt::get(IntTy8, Length, false);
904  Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
905 
906  Value *Args[] = {Op0, Op1, CILength, CIIndex};
907  Module *M = II.getModule();
908  Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
909  return Builder.CreateCall(F, Args);
910  }
911 
912  return nullptr;
913 }
914 
915 /// Attempt to convert pshufb* to shufflevector if the mask is constant.
917  InstCombiner::BuilderTy &Builder) {
919  if (!V)
920  return nullptr;
921 
922  auto *VecTy = cast<VectorType>(II.getType());
923  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
924  unsigned NumElts = VecTy->getNumElements();
925  assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
926  "Unexpected number of elements in shuffle mask!");
927 
928  // Construct a shuffle mask from constant integers or UNDEFs.
929  Constant *Indexes[64] = {nullptr};
930 
931  // Each byte in the shuffle control mask forms an index to permute the
932  // corresponding byte in the destination operand.
933  for (unsigned I = 0; I < NumElts; ++I) {
934  Constant *COp = V->getAggregateElement(I);
935  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
936  return nullptr;
937 
938  if (isa<UndefValue>(COp)) {
939  Indexes[I] = UndefValue::get(MaskEltTy);
940  continue;
941  }
942 
943  int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
944 
945  // If the most significant bit (bit[7]) of each byte of the shuffle
946  // control mask is set, then zero is written in the result byte.
947  // The zero vector is in the right-hand side of the resulting
948  // shufflevector.
949 
950  // The value of each index for the high 128-bit lane is the least
951  // significant 4 bits of the respective shuffle control byte.
952  Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
953  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
954  }
955 
956  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
957  auto V1 = II.getArgOperand(0);
958  auto V2 = Constant::getNullValue(VecTy);
959  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
960 }
961 
962 /// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
964  InstCombiner::BuilderTy &Builder) {
966  if (!V)
967  return nullptr;
968 
969  auto *VecTy = cast<VectorType>(II.getType());
970  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
971  unsigned NumElts = VecTy->getVectorNumElements();
972  bool IsPD = VecTy->getScalarType()->isDoubleTy();
973  unsigned NumLaneElts = IsPD ? 2 : 4;
974  assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
975 
976  // Construct a shuffle mask from constant integers or UNDEFs.
977  Constant *Indexes[16] = {nullptr};
978 
979  // The intrinsics only read one or two bits, clear the rest.
980  for (unsigned I = 0; I < NumElts; ++I) {
981  Constant *COp = V->getAggregateElement(I);
982  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
983  return nullptr;
984 
985  if (isa<UndefValue>(COp)) {
986  Indexes[I] = UndefValue::get(MaskEltTy);
987  continue;
988  }
989 
990  APInt Index = cast<ConstantInt>(COp)->getValue();
991  Index = Index.zextOrTrunc(32).getLoBits(2);
992 
993  // The PD variants uses bit 1 to select per-lane element index, so
994  // shift down to convert to generic shuffle mask index.
995  if (IsPD)
996  Index.lshrInPlace(1);
997 
998  // The _256 variants are a bit trickier since the mask bits always index
999  // into the corresponding 128 half. In order to convert to a generic
1000  // shuffle, we have to make that explicit.
1001  Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1002 
1003  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1004  }
1005 
1006  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1007  auto V1 = II.getArgOperand(0);
1008  auto V2 = UndefValue::get(V1->getType());
1009  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1010 }
1011 
1012 /// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1014  InstCombiner::BuilderTy &Builder) {
1015  auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1016  if (!V)
1017  return nullptr;
1018 
1019  auto *VecTy = cast<VectorType>(II.getType());
1020  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1021  unsigned Size = VecTy->getNumElements();
1022  assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1023  "Unexpected shuffle mask size");
1024 
1025  // Construct a shuffle mask from constant integers or UNDEFs.
1026  Constant *Indexes[64] = {nullptr};
1027 
1028  for (unsigned I = 0; I < Size; ++I) {
1029  Constant *COp = V->getAggregateElement(I);
1030  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1031  return nullptr;
1032 
1033  if (isa<UndefValue>(COp)) {
1034  Indexes[I] = UndefValue::get(MaskEltTy);
1035  continue;
1036  }
1037 
1038  uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1039  Index &= Size - 1;
1040  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1041  }
1042 
1043  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1044  auto V1 = II.getArgOperand(0);
1045  auto V2 = UndefValue::get(VecTy);
1046  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1047 }
1048 
1049 // TODO, Obvious Missing Transforms:
1050 // * Narrow width by halfs excluding zero/undef lanes
1051 Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
1052  Value *LoadPtr = II.getArgOperand(0);
1053  unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1054 
1055  // If the mask is all ones or undefs, this is a plain vector load of the 1st
1056  // argument.
1058  return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1059  "unmaskedload");
1060 
1061  // If we can unconditionally load from this address, replace with a
1062  // load/select idiom. TODO: use DT for context sensitive query
1063  if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
1064  II.getModule()->getDataLayout(),
1065  &II, nullptr)) {
1066  Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1067  "unmaskedload");
1068  return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
1069  }
1070 
1071  return nullptr;
1072 }
1073 
1074 // TODO, Obvious Missing Transforms:
1075 // * Single constant active lane -> store
1076 // * Narrow width by halfs excluding zero/undef lanes
1077 Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
1078  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1079  if (!ConstMask)
1080  return nullptr;
1081 
1082  // If the mask is all zeros, this instruction does nothing.
1083  if (ConstMask->isNullValue())
1084  return eraseInstFromFunction(II);
1085 
1086  // If the mask is all ones, this is a plain vector store of the 1st argument.
1087  if (ConstMask->isAllOnesValue()) {
1088  Value *StorePtr = II.getArgOperand(1);
1089  unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1090  return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1091  }
1092 
1093  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1094  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1095  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1096  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1097  DemandedElts, UndefElts)) {
1098  II.setOperand(0, V);
1099  return &II;
1100  }
1101 
1102  return nullptr;
1103 }
1104 
1105 // TODO, Obvious Missing Transforms:
1106 // * Single constant active lane load -> load
1107 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
1108 // * Adjacent vector addresses -> masked.load
1109 // * Narrow width by halfs excluding zero/undef lanes
1110 // * Vector splat address w/known mask -> scalar load
1111 // * Vector incrementing address -> vector masked load
1112 Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
1113  return nullptr;
1114 }
1115 
1116 // TODO, Obvious Missing Transforms:
1117 // * Single constant active lane -> store
1118 // * Adjacent vector addresses -> masked.store
1119 // * Narrow store width by halfs excluding zero/undef lanes
1120 // * Vector splat address w/known mask -> scalar store
1121 // * Vector incrementing address -> vector masked store
1122 Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) {
1123  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1124  if (!ConstMask)
1125  return nullptr;
1126 
1127  // If the mask is all zeros, a scatter does nothing.
1128  if (ConstMask->isNullValue())
1129  return eraseInstFromFunction(II);
1130 
1131  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1132  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1133  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1134  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1135  DemandedElts, UndefElts)) {
1136  II.setOperand(0, V);
1137  return &II;
1138  }
1139  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
1140  DemandedElts, UndefElts)) {
1141  II.setOperand(1, V);
1142  return &II;
1143  }
1144 
1145  return nullptr;
1146 }
1147 
1148 /// This function transforms launder.invariant.group and strip.invariant.group
1149 /// like:
1150 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1151 /// launder(strip(%x)) -> launder(%x)
1152 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1153 /// strip(launder(%x)) -> strip(%x)
1154 /// This is legal because it preserves the most recent information about
1155 /// the presence or absence of invariant.group.
1157  InstCombiner &IC) {
1158  auto *Arg = II.getArgOperand(0);
1159  auto *StrippedArg = Arg->stripPointerCasts();
1160  auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1161  if (StrippedArg == StrippedInvariantGroupsArg)
1162  return nullptr; // No launders/strips to remove.
1163 
1164  Value *Result = nullptr;
1165 
1166  if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1167  Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1168  else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1169  Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1170  else
1172  "simplifyInvariantGroupIntrinsic only handles launder and strip");
1173  if (Result->getType()->getPointerAddressSpace() !=
1175  Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1176  if (Result->getType() != II.getType())
1177  Result = IC.Builder.CreateBitCast(Result, II.getType());
1178 
1179  return cast<Instruction>(Result);
1180 }
1181 
1183  assert((II.getIntrinsicID() == Intrinsic::cttz ||
1184  II.getIntrinsicID() == Intrinsic::ctlz) &&
1185  "Expected cttz or ctlz intrinsic");
1186  bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1187  Value *Op0 = II.getArgOperand(0);
1188  Value *X;
1189  // ctlz(bitreverse(x)) -> cttz(x)
1190  // cttz(bitreverse(x)) -> ctlz(x)
1191  if (match(Op0, m_BitReverse(m_Value(X)))) {
1192  Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
1194  return CallInst::Create(F, {X, II.getArgOperand(1)});
1195  }
1196 
1197  if (IsTZ) {
1198  // cttz(-x) -> cttz(x)
1199  if (match(Op0, m_Neg(m_Value(X)))) {
1200  II.setOperand(0, X);
1201  return &II;
1202  }
1203 
1204  // cttz(abs(x)) -> cttz(x)
1205  // cttz(nabs(x)) -> cttz(x)
1206  Value *Y;
1208  if (SPF == SPF_ABS || SPF == SPF_NABS) {
1209  II.setOperand(0, X);
1210  return &II;
1211  }
1212  }
1213 
1214  KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1215 
1216  // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1217  unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1218  : Known.countMaxLeadingZeros();
1219  unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1220  : Known.countMinLeadingZeros();
1221 
1222  // If all bits above (ctlz) or below (cttz) the first known one are known
1223  // zero, this value is constant.
1224  // FIXME: This should be in InstSimplify because we're replacing an
1225  // instruction with a constant.
1226  if (PossibleZeros == DefiniteZeros) {
1227  auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1228  return IC.replaceInstUsesWith(II, C);
1229  }
1230 
1231  // If the input to cttz/ctlz is known to be non-zero,
1232  // then change the 'ZeroIsUndef' parameter to 'true'
1233  // because we know the zero behavior can't affect the result.
1234  if (!Known.One.isNullValue() ||
1235  isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1236  &IC.getDominatorTree())) {
1237  if (!match(II.getArgOperand(1), m_One())) {
1238  II.setOperand(1, IC.Builder.getTrue());
1239  return &II;
1240  }
1241  }
1242 
1243  // Add range metadata since known bits can't completely reflect what we know.
1244  // TODO: Handle splat vectors.
1245  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1246  if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1247  Metadata *LowAndHigh[] = {
1248  ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1249  ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1250  II.setMetadata(LLVMContext::MD_range,
1252  return &II;
1253  }
1254 
1255  return nullptr;
1256 }
1257 
1259  assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1260  "Expected ctpop intrinsic");
1261  Value *Op0 = II.getArgOperand(0);
1262  Value *X;
1263  // ctpop(bitreverse(x)) -> ctpop(x)
1264  // ctpop(bswap(x)) -> ctpop(x)
1265  if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) {
1266  II.setOperand(0, X);
1267  return &II;
1268  }
1269 
1270  // FIXME: Try to simplify vectors of integers.
1271  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1272  if (!IT)
1273  return nullptr;
1274 
1275  unsigned BitWidth = IT->getBitWidth();
1276  KnownBits Known(BitWidth);
1277  IC.computeKnownBits(Op0, Known, 0, &II);
1278 
1279  unsigned MinCount = Known.countMinPopulation();
1280  unsigned MaxCount = Known.countMaxPopulation();
1281 
1282  // Add range metadata since known bits can't completely reflect what we know.
1283  if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1284  Metadata *LowAndHigh[] = {
1286  ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1287  II.setMetadata(LLVMContext::MD_range,
1289  return &II;
1290  }
1291 
1292  return nullptr;
1293 }
1294 
1295 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1296 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1297 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1299  Value *Ptr = II.getOperand(0);
1300  Value *Mask = II.getOperand(1);
1301  Constant *ZeroVec = Constant::getNullValue(II.getType());
1302 
1303  // Special case a zero mask since that's not a ConstantDataVector.
1304  // This masked load instruction creates a zero vector.
1305  if (isa<ConstantAggregateZero>(Mask))
1306  return IC.replaceInstUsesWith(II, ZeroVec);
1307 
1308  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1309  if (!ConstMask)
1310  return nullptr;
1311 
1312  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1313  // to allow target-independent optimizations.
1314 
1315  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1316  // the LLVM intrinsic definition for the pointer argument.
1317  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1318  PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1319  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1320 
1321  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1322  // on each element's most significant bit (the sign bit).
1323  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1324 
1325  // The pass-through vector for an x86 masked load is a zero vector.
1326  CallInst *NewMaskedLoad =
1327  IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1328  return IC.replaceInstUsesWith(II, NewMaskedLoad);
1329 }
1330 
1331 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1332 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1333 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1335  Value *Ptr = II.getOperand(0);
1336  Value *Mask = II.getOperand(1);
1337  Value *Vec = II.getOperand(2);
1338 
1339  // Special case a zero mask since that's not a ConstantDataVector:
1340  // this masked store instruction does nothing.
1341  if (isa<ConstantAggregateZero>(Mask)) {
1342  IC.eraseInstFromFunction(II);
1343  return true;
1344  }
1345 
1346  // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1347  // anything else at this level.
1348  if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1349  return false;
1350 
1351  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1352  if (!ConstMask)
1353  return false;
1354 
1355  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1356  // to allow target-independent optimizations.
1357 
1358  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1359  // the LLVM intrinsic definition for the pointer argument.
1360  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1361  PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1362  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1363 
1364  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1365  // on each element's most significant bit (the sign bit).
1366  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1367 
1368  IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1369 
1370  // 'Replace uses' doesn't work for stores. Erase the original masked store.
1371  IC.eraseInstFromFunction(II);
1372  return true;
1373 }
1374 
1375 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1376 //
1377 // A single NaN input is folded to minnum, so we rely on that folding for
1378 // handling NaNs.
1379 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1380  const APFloat &Src2) {
1381  APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1382 
1383  APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1384  assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1385  if (Cmp0 == APFloat::cmpEqual)
1386  return maxnum(Src1, Src2);
1387 
1388  APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1389  assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1390  if (Cmp1 == APFloat::cmpEqual)
1391  return maxnum(Src0, Src2);
1392 
1393  return maxnum(Src0, Src1);
1394 }
1395 
1396 /// Convert a table lookup to shufflevector if the mask is constant.
1397 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1398 /// which case we could lower the shufflevector with rev64 instructions
1399 /// as it's actually a byte reverse.
1401  InstCombiner::BuilderTy &Builder) {
1402  // Bail out if the mask is not a constant.
1403  auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1404  if (!C)
1405  return nullptr;
1406 
1407  auto *VecTy = cast<VectorType>(II.getType());
1408  unsigned NumElts = VecTy->getNumElements();
1409 
1410  // Only perform this transformation for <8 x i8> vector types.
1411  if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1412  return nullptr;
1413 
1414  uint32_t Indexes[8];
1415 
1416  for (unsigned I = 0; I < NumElts; ++I) {
1417  Constant *COp = C->getAggregateElement(I);
1418 
1419  if (!COp || !isa<ConstantInt>(COp))
1420  return nullptr;
1421 
1422  Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1423 
1424  // Make sure the mask indices are in range.
1425  if (Indexes[I] >= NumElts)
1426  return nullptr;
1427  }
1428 
1429  auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1430  makeArrayRef(Indexes));
1431  auto *V1 = II.getArgOperand(0);
1432  auto *V2 = Constant::getNullValue(V1->getType());
1433  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1434 }
1435 
1436 /// Convert a vector load intrinsic into a simple llvm load instruction.
1437 /// This is beneficial when the underlying object being addressed comes
1438 /// from a constant, since we get constant-folding for free.
1440  unsigned MemAlign,
1441  InstCombiner::BuilderTy &Builder) {
1442  auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1443 
1444  if (!IntrAlign)
1445  return nullptr;
1446 
1447  unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1448  MemAlign : IntrAlign->getLimitedValue();
1449 
1450  if (!isPowerOf2_32(Alignment))
1451  return nullptr;
1452 
1453  auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1454  PointerType::get(II.getType(), 0));
1455  return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1456 }
1457 
1458 // Returns true iff the 2 intrinsics have the same operands, limiting the
1459 // comparison to the first NumOperands.
1460 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1461  unsigned NumOperands) {
1462  assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1463  assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1464  for (unsigned i = 0; i < NumOperands; i++)
1465  if (I.getArgOperand(i) != E.getArgOperand(i))
1466  return false;
1467  return true;
1468 }
1469 
1470 // Remove trivially empty start/end intrinsic ranges, i.e. a start
1471 // immediately followed by an end (ignoring debuginfo or other
1472 // start/end intrinsics in between). As this handles only the most trivial
1473 // cases, tracking the nesting level is not needed:
1474 //
1475 // call @llvm.foo.start(i1 0) ; &I
1476 // call @llvm.foo.start(i1 0)
1477 // call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1478 // call @llvm.foo.end(i1 0)
1479 static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1480  unsigned EndID, InstCombiner &IC) {
1481  assert(I.getIntrinsicID() == StartID &&
1482  "Start intrinsic does not have expected ID");
1483  BasicBlock::iterator BI(I), BE(I.getParent()->end());
1484  for (++BI; BI != BE; ++BI) {
1485  if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1486  if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1487  continue;
1488  if (E->getIntrinsicID() == EndID &&
1489  haveSameOperands(I, *E, E->getNumArgOperands())) {
1490  IC.eraseInstFromFunction(*E);
1491  IC.eraseInstFromFunction(I);
1492  return true;
1493  }
1494  }
1495  break;
1496  }
1497 
1498  return false;
1499 }
1500 
1501 // Convert NVVM intrinsics to target-generic LLVM code where possible.
1503  // Each NVVM intrinsic we can simplify can be replaced with one of:
1504  //
1505  // * an LLVM intrinsic,
1506  // * an LLVM cast operation,
1507  // * an LLVM binary operation, or
1508  // * ad-hoc LLVM IR for the particular operation.
1509 
1510  // Some transformations are only valid when the module's
1511  // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1512  // transformations are valid regardless of the module's ftz setting.
1513  enum FtzRequirementTy {
1514  FTZ_Any, // Any ftz setting is ok.
1515  FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1516  FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1517  };
1518  // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1519  // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1520  // simplify.
1521  enum SpecialCase {
1522  SPC_Reciprocal,
1523  };
1524 
1525  // SimplifyAction is a poor-man's variant (plus an additional flag) that
1526  // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1527  struct SimplifyAction {
1528  // Invariant: At most one of these Optionals has a value.
1532  Optional<SpecialCase> Special;
1533 
1534  FtzRequirementTy FtzRequirement = FTZ_Any;
1535 
1536  SimplifyAction() = default;
1537 
1538  SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1539  : IID(IID), FtzRequirement(FtzReq) {}
1540 
1541  // Cast operations don't have anything to do with FTZ, so we skip that
1542  // argument.
1543  SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1544 
1545  SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1546  : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1547 
1548  SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1549  : Special(Special), FtzRequirement(FtzReq) {}
1550  };
1551 
1552  // Try to generate a SimplifyAction describing how to replace our
1553  // IntrinsicInstr with target-generic LLVM IR.
1554  const SimplifyAction Action = [II]() -> SimplifyAction {
1555  switch (II->getIntrinsicID()) {
1556  // NVVM intrinsics that map directly to LLVM intrinsics.
1557  case Intrinsic::nvvm_ceil_d:
1558  return {Intrinsic::ceil, FTZ_Any};
1559  case Intrinsic::nvvm_ceil_f:
1560  return {Intrinsic::ceil, FTZ_MustBeOff};
1561  case Intrinsic::nvvm_ceil_ftz_f:
1562  return {Intrinsic::ceil, FTZ_MustBeOn};
1563  case Intrinsic::nvvm_fabs_d:
1564  return {Intrinsic::fabs, FTZ_Any};
1565  case Intrinsic::nvvm_fabs_f:
1566  return {Intrinsic::fabs, FTZ_MustBeOff};
1567  case Intrinsic::nvvm_fabs_ftz_f:
1568  return {Intrinsic::fabs, FTZ_MustBeOn};
1569  case Intrinsic::nvvm_floor_d:
1570  return {Intrinsic::floor, FTZ_Any};
1571  case Intrinsic::nvvm_floor_f:
1572  return {Intrinsic::floor, FTZ_MustBeOff};
1573  case Intrinsic::nvvm_floor_ftz_f:
1574  return {Intrinsic::floor, FTZ_MustBeOn};
1575  case Intrinsic::nvvm_fma_rn_d:
1576  return {Intrinsic::fma, FTZ_Any};
1577  case Intrinsic::nvvm_fma_rn_f:
1578  return {Intrinsic::fma, FTZ_MustBeOff};
1579  case Intrinsic::nvvm_fma_rn_ftz_f:
1580  return {Intrinsic::fma, FTZ_MustBeOn};
1581  case Intrinsic::nvvm_fmax_d:
1582  return {Intrinsic::maxnum, FTZ_Any};
1583  case Intrinsic::nvvm_fmax_f:
1584  return {Intrinsic::maxnum, FTZ_MustBeOff};
1585  case Intrinsic::nvvm_fmax_ftz_f:
1586  return {Intrinsic::maxnum, FTZ_MustBeOn};
1587  case Intrinsic::nvvm_fmin_d:
1588  return {Intrinsic::minnum, FTZ_Any};
1589  case Intrinsic::nvvm_fmin_f:
1590  return {Intrinsic::minnum, FTZ_MustBeOff};
1591  case Intrinsic::nvvm_fmin_ftz_f:
1592  return {Intrinsic::minnum, FTZ_MustBeOn};
1593  case Intrinsic::nvvm_round_d:
1594  return {Intrinsic::round, FTZ_Any};
1595  case Intrinsic::nvvm_round_f:
1596  return {Intrinsic::round, FTZ_MustBeOff};
1597  case Intrinsic::nvvm_round_ftz_f:
1598  return {Intrinsic::round, FTZ_MustBeOn};
1599  case Intrinsic::nvvm_sqrt_rn_d:
1600  return {Intrinsic::sqrt, FTZ_Any};
1601  case Intrinsic::nvvm_sqrt_f:
1602  // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1603  // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1604  // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1605  // the versions with explicit ftz-ness.
1606  return {Intrinsic::sqrt, FTZ_Any};
1607  case Intrinsic::nvvm_sqrt_rn_f:
1608  return {Intrinsic::sqrt, FTZ_MustBeOff};
1609  case Intrinsic::nvvm_sqrt_rn_ftz_f:
1610  return {Intrinsic::sqrt, FTZ_MustBeOn};
1611  case Intrinsic::nvvm_trunc_d:
1612  return {Intrinsic::trunc, FTZ_Any};
1613  case Intrinsic::nvvm_trunc_f:
1614  return {Intrinsic::trunc, FTZ_MustBeOff};
1615  case Intrinsic::nvvm_trunc_ftz_f:
1616  return {Intrinsic::trunc, FTZ_MustBeOn};
1617 
1618  // NVVM intrinsics that map to LLVM cast operations.
1619  //
1620  // Note that llvm's target-generic conversion operators correspond to the rz
1621  // (round to zero) versions of the nvvm conversion intrinsics, even though
1622  // most everything else here uses the rn (round to nearest even) nvvm ops.
1623  case Intrinsic::nvvm_d2i_rz:
1624  case Intrinsic::nvvm_f2i_rz:
1625  case Intrinsic::nvvm_d2ll_rz:
1626  case Intrinsic::nvvm_f2ll_rz:
1627  return {Instruction::FPToSI};
1628  case Intrinsic::nvvm_d2ui_rz:
1629  case Intrinsic::nvvm_f2ui_rz:
1630  case Intrinsic::nvvm_d2ull_rz:
1631  case Intrinsic::nvvm_f2ull_rz:
1632  return {Instruction::FPToUI};
1633  case Intrinsic::nvvm_i2d_rz:
1634  case Intrinsic::nvvm_i2f_rz:
1635  case Intrinsic::nvvm_ll2d_rz:
1636  case Intrinsic::nvvm_ll2f_rz:
1637  return {Instruction::SIToFP};
1638  case Intrinsic::nvvm_ui2d_rz:
1639  case Intrinsic::nvvm_ui2f_rz:
1640  case Intrinsic::nvvm_ull2d_rz:
1641  case Intrinsic::nvvm_ull2f_rz:
1642  return {Instruction::UIToFP};
1643 
1644  // NVVM intrinsics that map to LLVM binary ops.
1645  case Intrinsic::nvvm_add_rn_d:
1646  return {Instruction::FAdd, FTZ_Any};
1647  case Intrinsic::nvvm_add_rn_f:
1648  return {Instruction::FAdd, FTZ_MustBeOff};
1649  case Intrinsic::nvvm_add_rn_ftz_f:
1650  return {Instruction::FAdd, FTZ_MustBeOn};
1651  case Intrinsic::nvvm_mul_rn_d:
1652  return {Instruction::FMul, FTZ_Any};
1653  case Intrinsic::nvvm_mul_rn_f:
1654  return {Instruction::FMul, FTZ_MustBeOff};
1655  case Intrinsic::nvvm_mul_rn_ftz_f:
1656  return {Instruction::FMul, FTZ_MustBeOn};
1657  case Intrinsic::nvvm_div_rn_d:
1658  return {Instruction::FDiv, FTZ_Any};
1659  case Intrinsic::nvvm_div_rn_f:
1660  return {Instruction::FDiv, FTZ_MustBeOff};
1661  case Intrinsic::nvvm_div_rn_ftz_f:
1662  return {Instruction::FDiv, FTZ_MustBeOn};
1663 
1664  // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1665  // need special handling.
1666  //
1667  // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1668  // as well.
1669  case Intrinsic::nvvm_rcp_rn_d:
1670  return {SPC_Reciprocal, FTZ_Any};
1671  case Intrinsic::nvvm_rcp_rn_f:
1672  return {SPC_Reciprocal, FTZ_MustBeOff};
1673  case Intrinsic::nvvm_rcp_rn_ftz_f:
1674  return {SPC_Reciprocal, FTZ_MustBeOn};
1675 
1676  // We do not currently simplify intrinsics that give an approximate answer.
1677  // These include:
1678  //
1679  // - nvvm_cos_approx_{f,ftz_f}
1680  // - nvvm_ex2_approx_{d,f,ftz_f}
1681  // - nvvm_lg2_approx_{d,f,ftz_f}
1682  // - nvvm_sin_approx_{f,ftz_f}
1683  // - nvvm_sqrt_approx_{f,ftz_f}
1684  // - nvvm_rsqrt_approx_{d,f,ftz_f}
1685  // - nvvm_div_approx_{ftz_d,ftz_f,f}
1686  // - nvvm_rcp_approx_ftz_d
1687  //
1688  // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1689  // means that fastmath is enabled in the intrinsic. Unfortunately only
1690  // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1691  // information gets lost and we can't select on it.
1692  //
1693  // TODO: div and rcp are lowered to a binary op, so these we could in theory
1694  // lower them to "fast fdiv".
1695 
1696  default:
1697  return {};
1698  }
1699  }();
1700 
1701  // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1702  // can bail out now. (Notice that in the case that IID is not an NVVM
1703  // intrinsic, we don't have to look up any module metadata, as
1704  // FtzRequirementTy will be FTZ_Any.)
1705  if (Action.FtzRequirement != FTZ_Any) {
1706  bool FtzEnabled =
1707  II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1708  "true";
1709 
1710  if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1711  return nullptr;
1712  }
1713 
1714  // Simplify to target-generic intrinsic.
1715  if (Action.IID) {
1717  // All the target-generic intrinsics currently of interest to us have one
1718  // type argument, equal to that of the nvvm intrinsic's argument.
1719  Type *Tys[] = {II->getArgOperand(0)->getType()};
1720  return CallInst::Create(
1721  Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1722  }
1723 
1724  // Simplify to target-generic binary op.
1725  if (Action.BinaryOp)
1726  return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1727  II->getArgOperand(1), II->getName());
1728 
1729  // Simplify to target-generic cast op.
1730  if (Action.CastOp)
1731  return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1732  II->getName());
1733 
1734  // All that's left are the special cases.
1735  if (!Action.Special)
1736  return nullptr;
1737 
1738  switch (*Action.Special) {
1739  case SPC_Reciprocal:
1740  // Simplify reciprocal.
1741  return BinaryOperator::Create(
1742  Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1743  II->getArgOperand(0), II->getName());
1744  }
1745  llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1746 }
1747 
1749  removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1750  return nullptr;
1751 }
1752 
1754  removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1755  return nullptr;
1756 }
1757 
1759  assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1760  Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1761  if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1762  Call.setArgOperand(0, Arg1);
1763  Call.setArgOperand(1, Arg0);
1764  return &Call;
1765  }
1766  return nullptr;
1767 }
1768 
1769 Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
1770  WithOverflowInst *WO = cast<WithOverflowInst>(II);
1771  Value *OperationResult = nullptr;
1772  Constant *OverflowResult = nullptr;
1773  if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
1774  WO->getRHS(), *WO, OperationResult, OverflowResult))
1775  return CreateOverflowTuple(WO, OperationResult, OverflowResult);
1776  return nullptr;
1777 }
1778 
1779 /// CallInst simplification. This mostly only handles folding of intrinsic
1780 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1781 /// lifting.
1783  if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1784  return replaceInstUsesWith(CI, V);
1785 
1786  if (isFreeCall(&CI, &TLI))
1787  return visitFree(CI);
1788 
1789  // If the caller function is nounwind, mark the call as nounwind, even if the
1790  // callee isn't.
1791  if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1792  CI.setDoesNotThrow();
1793  return &CI;
1794  }
1795 
1796  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1797  if (!II) return visitCallBase(CI);
1798 
1799  // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1800  // instead of in visitCallBase.
1801  if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1802  bool Changed = false;
1803 
1804  // memmove/cpy/set of zero bytes is a noop.
1805  if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1806  if (NumBytes->isNullValue())
1807  return eraseInstFromFunction(CI);
1808 
1809  if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1810  if (CI->getZExtValue() == 1) {
1811  // Replace the instruction with just byte operations. We would
1812  // transform other cases to loads/stores, but we don't know if
1813  // alignment is sufficient.
1814  }
1815  }
1816 
1817  // No other transformations apply to volatile transfers.
1818  if (auto *M = dyn_cast<MemIntrinsic>(MI))
1819  if (M->isVolatile())
1820  return nullptr;
1821 
1822  // If we have a memmove and the source operation is a constant global,
1823  // then the source and dest pointers can't alias, so we can change this
1824  // into a call to memcpy.
1825  if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1826  if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1827  if (GVSrc->isConstant()) {
1828  Module *M = CI.getModule();
1829  Intrinsic::ID MemCpyID =
1830  isa<AtomicMemMoveInst>(MMI)
1831  ? Intrinsic::memcpy_element_unordered_atomic
1832  : Intrinsic::memcpy;
1833  Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1834  CI.getArgOperand(1)->getType(),
1835  CI.getArgOperand(2)->getType() };
1836  CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1837  Changed = true;
1838  }
1839  }
1840 
1841  if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1842  // memmove(x,x,size) -> noop.
1843  if (MTI->getSource() == MTI->getDest())
1844  return eraseInstFromFunction(CI);
1845  }
1846 
1847  // If we can determine a pointer alignment that is bigger than currently
1848  // set, update the alignment.
1849  if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1850  if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1851  return I;
1852  } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1853  if (Instruction *I = SimplifyAnyMemSet(MSI))
1854  return I;
1855  }
1856 
1857  if (Changed) return II;
1858  }
1859 
1860  // For vector result intrinsics, use the generic demanded vector support.
1861  if (II->getType()->isVectorTy()) {
1862  auto VWidth = II->getType()->getVectorNumElements();
1863  APInt UndefElts(VWidth, 0);
1864  APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
1865  if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1866  if (V != II)
1867  return replaceInstUsesWith(*II, V);
1868  return II;
1869  }
1870  }
1871 
1872  if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1873  return I;
1874 
1875  auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1876  unsigned DemandedWidth) {
1877  APInt UndefElts(Width, 0);
1878  APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1879  return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1880  };
1881 
1882  Intrinsic::ID IID = II->getIntrinsicID();
1883  switch (IID) {
1884  default: break;
1885  case Intrinsic::objectsize:
1886  if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1887  return replaceInstUsesWith(CI, V);
1888  return nullptr;
1889  case Intrinsic::bswap: {
1890  Value *IIOperand = II->getArgOperand(0);
1891  Value *X = nullptr;
1892 
1893  // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1894  if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1895  unsigned C = X->getType()->getPrimitiveSizeInBits() -
1896  IIOperand->getType()->getPrimitiveSizeInBits();
1897  Value *CV = ConstantInt::get(X->getType(), C);
1898  Value *V = Builder.CreateLShr(X, CV);
1899  return new TruncInst(V, IIOperand->getType());
1900  }
1901  break;
1902  }
1903  case Intrinsic::masked_load:
1904  if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1905  return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1906  break;
1907  case Intrinsic::masked_store:
1908  return simplifyMaskedStore(*II);
1909  case Intrinsic::masked_gather:
1910  return simplifyMaskedGather(*II);
1911  case Intrinsic::masked_scatter:
1912  return simplifyMaskedScatter(*II);
1913  case Intrinsic::launder_invariant_group:
1914  case Intrinsic::strip_invariant_group:
1915  if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1916  return replaceInstUsesWith(*II, SkippedBarrier);
1917  break;
1918  case Intrinsic::powi:
1919  if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1920  // 0 and 1 are handled in instsimplify
1921 
1922  // powi(x, -1) -> 1/x
1923  if (Power->isMinusOne())
1924  return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1925  II->getArgOperand(0));
1926  // powi(x, 2) -> x*x
1927  if (Power->equalsInt(2))
1928  return BinaryOperator::CreateFMul(II->getArgOperand(0),
1929  II->getArgOperand(0));
1930  }
1931  break;
1932 
1933  case Intrinsic::cttz:
1934  case Intrinsic::ctlz:
1935  if (auto *I = foldCttzCtlz(*II, *this))
1936  return I;
1937  break;
1938 
1939  case Intrinsic::ctpop:
1940  if (auto *I = foldCtpop(*II, *this))
1941  return I;
1942  break;
1943 
1944  case Intrinsic::fshl:
1945  case Intrinsic::fshr: {
1946  Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1947  Type *Ty = II->getType();
1948  unsigned BitWidth = Ty->getScalarSizeInBits();
1949  Constant *ShAmtC;
1950  if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
1951  !isa<ConstantExpr>(ShAmtC) && !ShAmtC->containsConstantExpression()) {
1952  // Canonicalize a shift amount constant operand to modulo the bit-width.
1953  Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1954  Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1955  if (ModuloC != ShAmtC) {
1956  II->setArgOperand(2, ModuloC);
1957  return II;
1958  }
1959  assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1961  "Shift amount expected to be modulo bitwidth");
1962 
1963  // Canonicalize funnel shift right by constant to funnel shift left. This
1964  // is not entirely arbitrary. For historical reasons, the backend may
1965  // recognize rotate left patterns but miss rotate right patterns.
1966  if (IID == Intrinsic::fshr) {
1967  // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1968  Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1969  Module *Mod = II->getModule();
1970  Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1971  return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1972  }
1973  assert(IID == Intrinsic::fshl &&
1974  "All funnel shifts by simple constants should go left");
1975 
1976  // fshl(X, 0, C) --> shl X, C
1977  // fshl(X, undef, C) --> shl X, C
1978  if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1979  return BinaryOperator::CreateShl(Op0, ShAmtC);
1980 
1981  // fshl(0, X, C) --> lshr X, (BW-C)
1982  // fshl(undef, X, C) --> lshr X, (BW-C)
1983  if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1984  return BinaryOperator::CreateLShr(Op1,
1985  ConstantExpr::getSub(WidthC, ShAmtC));
1986 
1987  // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1988  if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1989  Module *Mod = II->getModule();
1990  Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1991  return CallInst::Create(Bswap, { Op0 });
1992  }
1993  }
1994 
1995  // Left or right might be masked.
1996  if (SimplifyDemandedInstructionBits(*II))
1997  return &CI;
1998 
1999  // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2000  // so only the low bits of the shift amount are demanded if the bitwidth is
2001  // a power-of-2.
2002  if (!isPowerOf2_32(BitWidth))
2003  break;
2004  APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2005  KnownBits Op2Known(BitWidth);
2006  if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2007  return &CI;
2008  break;
2009  }
2010  case Intrinsic::uadd_with_overflow:
2011  case Intrinsic::sadd_with_overflow: {
2013  return I;
2014  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2015  return I;
2016 
2017  // Given 2 constant operands whose sum does not overflow:
2018  // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2019  // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2020  Value *X;
2021  const APInt *C0, *C1;
2022  Value *Arg0 = II->getArgOperand(0);
2023  Value *Arg1 = II->getArgOperand(1);
2024  bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2025  bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
2026  : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
2027  if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2028  bool Overflow;
2029  APInt NewC =
2030  IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2031  if (!Overflow)
2032  return replaceInstUsesWith(
2033  *II, Builder.CreateBinaryIntrinsic(
2034  IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2035  }
2036  break;
2037  }
2038 
2039  case Intrinsic::umul_with_overflow:
2040  case Intrinsic::smul_with_overflow:
2042  return I;
2044 
2045  case Intrinsic::usub_with_overflow:
2046  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2047  return I;
2048  break;
2049 
2050  case Intrinsic::ssub_with_overflow: {
2051  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2052  return I;
2053 
2054  Constant *C;
2055  Value *Arg0 = II->getArgOperand(0);
2056  Value *Arg1 = II->getArgOperand(1);
2057  // Given a constant C that is not the minimum signed value
2058  // for an integer of a given bit width:
2059  //
2060  // ssubo X, C -> saddo X, -C
2061  if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2062  Value *NegVal = ConstantExpr::getNeg(C);
2063  // Build a saddo call that is equivalent to the discovered
2064  // ssubo call.
2065  return replaceInstUsesWith(
2066  *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2067  Arg0, NegVal));
2068  }
2069 
2070  break;
2071  }
2072 
2073  case Intrinsic::uadd_sat:
2074  case Intrinsic::sadd_sat:
2076  return I;
2078  case Intrinsic::usub_sat:
2079  case Intrinsic::ssub_sat: {
2080  SaturatingInst *SI = cast<SaturatingInst>(II);
2081  Type *Ty = SI->getType();
2082  Value *Arg0 = SI->getLHS();
2083  Value *Arg1 = SI->getRHS();
2084 
2085  // Make use of known overflow information.
2086  OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2087  Arg0, Arg1, SI);
2088  switch (OR) {
2090  break;
2092  if (SI->isSigned())
2093  return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2094  else
2095  return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2097  unsigned BitWidth = Ty->getScalarSizeInBits();
2098  APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2099  return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2100  }
2102  unsigned BitWidth = Ty->getScalarSizeInBits();
2103  APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2104  return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2105  }
2106  }
2107 
2108  // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2109  Constant *C;
2110  if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2111  C->isNotMinSignedValue()) {
2112  Value *NegVal = ConstantExpr::getNeg(C);
2113  return replaceInstUsesWith(
2114  *II, Builder.CreateBinaryIntrinsic(
2115  Intrinsic::sadd_sat, Arg0, NegVal));
2116  }
2117 
2118  // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2119  // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2120  // if Val and Val2 have the same sign
2121  if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2122  Value *X;
2123  const APInt *Val, *Val2;
2124  APInt NewVal;
2125  bool IsUnsigned =
2126  IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2127  if (Other->getIntrinsicID() == IID &&
2128  match(Arg1, m_APInt(Val)) &&
2129  match(Other->getArgOperand(0), m_Value(X)) &&
2130  match(Other->getArgOperand(1), m_APInt(Val2))) {
2131  if (IsUnsigned)
2132  NewVal = Val->uadd_sat(*Val2);
2133  else if (Val->isNonNegative() == Val2->isNonNegative()) {
2134  bool Overflow;
2135  NewVal = Val->sadd_ov(*Val2, Overflow);
2136  if (Overflow) {
2137  // Both adds together may add more than SignedMaxValue
2138  // without saturating the final result.
2139  break;
2140  }
2141  } else {
2142  // Cannot fold saturated addition with different signs.
2143  break;
2144  }
2145 
2146  return replaceInstUsesWith(
2147  *II, Builder.CreateBinaryIntrinsic(
2148  IID, X, ConstantInt::get(II->getType(), NewVal)));
2149  }
2150  }
2151  break;
2152  }
2153 
2154  case Intrinsic::minnum:
2155  case Intrinsic::maxnum:
2156  case Intrinsic::minimum:
2157  case Intrinsic::maximum: {
2159  return I;
2160  Value *Arg0 = II->getArgOperand(0);
2161  Value *Arg1 = II->getArgOperand(1);
2162  Value *X, *Y;
2163  if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2164  (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2165  // If both operands are negated, invert the call and negate the result:
2166  // min(-X, -Y) --> -(max(X, Y))
2167  // max(-X, -Y) --> -(min(X, Y))
2168  Intrinsic::ID NewIID;
2169  switch (IID) {
2170  case Intrinsic::maxnum:
2171  NewIID = Intrinsic::minnum;
2172  break;
2173  case Intrinsic::minnum:
2174  NewIID = Intrinsic::maxnum;
2175  break;
2176  case Intrinsic::maximum:
2177  NewIID = Intrinsic::minimum;
2178  break;
2179  case Intrinsic::minimum:
2180  NewIID = Intrinsic::maximum;
2181  break;
2182  default:
2183  llvm_unreachable("unexpected intrinsic ID");
2184  }
2185  Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2186  Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2187  FNeg->copyIRFlags(II);
2188  return FNeg;
2189  }
2190 
2191  // m(m(X, C2), C1) -> m(X, C)
2192  const APFloat *C1, *C2;
2193  if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2194  if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2195  ((match(M->getArgOperand(0), m_Value(X)) &&
2196  match(M->getArgOperand(1), m_APFloat(C2))) ||
2197  (match(M->getArgOperand(1), m_Value(X)) &&
2198  match(M->getArgOperand(0), m_APFloat(C2))))) {
2199  APFloat Res(0.0);
2200  switch (IID) {
2201  case Intrinsic::maxnum:
2202  Res = maxnum(*C1, *C2);
2203  break;
2204  case Intrinsic::minnum:
2205  Res = minnum(*C1, *C2);
2206  break;
2207  case Intrinsic::maximum:
2208  Res = maximum(*C1, *C2);
2209  break;
2210  case Intrinsic::minimum:
2211  Res = minimum(*C1, *C2);
2212  break;
2213  default:
2214  llvm_unreachable("unexpected intrinsic ID");
2215  }
2216  Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2217  IID, X, ConstantFP::get(Arg0->getType(), Res));
2218  NewCall->copyIRFlags(II);
2219  return replaceInstUsesWith(*II, NewCall);
2220  }
2221  }
2222 
2223  break;
2224  }
2225  case Intrinsic::fmuladd: {
2226  // Canonicalize fast fmuladd to the separate fmul + fadd.
2227  if (II->isFast()) {
2228  BuilderTy::FastMathFlagGuard Guard(Builder);
2229  Builder.setFastMathFlags(II->getFastMathFlags());
2230  Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2231  II->getArgOperand(1));
2232  Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2233  Add->takeName(II);
2234  return replaceInstUsesWith(*II, Add);
2235  }
2236 
2238  }
2239  case Intrinsic::fma: {
2241  return I;
2242 
2243  // fma fneg(x), fneg(y), z -> fma x, y, z
2244  Value *Src0 = II->getArgOperand(0);
2245  Value *Src1 = II->getArgOperand(1);
2246  Value *X, *Y;
2247  if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2248  II->setArgOperand(0, X);
2249  II->setArgOperand(1, Y);
2250  return II;
2251  }
2252 
2253  // fma fabs(x), fabs(x), z -> fma x, x, z
2254  if (match(Src0, m_FAbs(m_Value(X))) &&
2255  match(Src1, m_FAbs(m_Specific(X)))) {
2256  II->setArgOperand(0, X);
2257  II->setArgOperand(1, X);
2258  return II;
2259  }
2260 
2261  // fma x, 1, z -> fadd x, z
2262  if (match(Src1, m_FPOne())) {
2263  auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
2264  FAdd->copyFastMathFlags(II);
2265  return FAdd;
2266  }
2267 
2268  break;
2269  }
2270  case Intrinsic::fabs: {
2271  Value *Cond;
2272  Constant *LHS, *RHS;
2273  if (match(II->getArgOperand(0),
2274  m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2275  CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2276  CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2277  return SelectInst::Create(Cond, Call0, Call1);
2278  }
2279 
2281  }
2282  case Intrinsic::ceil:
2283  case Intrinsic::floor:
2284  case Intrinsic::round:
2285  case Intrinsic::nearbyint:
2286  case Intrinsic::rint:
2287  case Intrinsic::trunc: {
2288  Value *ExtSrc;
2289  if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2290  // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2291  Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2292  return new FPExtInst(NarrowII, II->getType());
2293  }
2294  break;
2295  }
2296  case Intrinsic::cos:
2297  case Intrinsic::amdgcn_cos: {
2298  Value *X;
2299  Value *Src = II->getArgOperand(0);
2300  if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
2301  // cos(-x) -> cos(x)
2302  // cos(fabs(x)) -> cos(x)
2303  II->setArgOperand(0, X);
2304  return II;
2305  }
2306  break;
2307  }
2308  case Intrinsic::sin: {
2309  Value *X;
2310  if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2311  // sin(-x) --> -sin(x)
2312  Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2313  Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2314  FNeg->copyFastMathFlags(II);
2315  return FNeg;
2316  }
2317  break;
2318  }
2319  case Intrinsic::ppc_altivec_lvx:
2320  case Intrinsic::ppc_altivec_lvxl:
2321  // Turn PPC lvx -> load if the pointer is known aligned.
2322  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2323  &DT) >= 16) {
2324  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2325  PointerType::getUnqual(II->getType()));
2326  return new LoadInst(II->getType(), Ptr);
2327  }
2328  break;
2329  case Intrinsic::ppc_vsx_lxvw4x:
2330  case Intrinsic::ppc_vsx_lxvd2x: {
2331  // Turn PPC VSX loads into normal loads.
2332  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2333  PointerType::getUnqual(II->getType()));
2334  return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
2335  }
2336  case Intrinsic::ppc_altivec_stvx:
2337  case Intrinsic::ppc_altivec_stvxl:
2338  // Turn stvx -> store if the pointer is known aligned.
2339  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2340  &DT) >= 16) {
2341  Type *OpPtrTy =
2342  PointerType::getUnqual(II->getArgOperand(0)->getType());
2343  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2344  return new StoreInst(II->getArgOperand(0), Ptr);
2345  }
2346  break;
2347  case Intrinsic::ppc_vsx_stxvw4x:
2348  case Intrinsic::ppc_vsx_stxvd2x: {
2349  // Turn PPC VSX stores into normal stores.
2350  Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2351  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2352  return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2353  }
2354  case Intrinsic::ppc_qpx_qvlfs:
2355  // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2356  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2357  &DT) >= 16) {
2358  Type *VTy = VectorType::get(Builder.getFloatTy(),
2359  II->getType()->getVectorNumElements());
2360  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2361  PointerType::getUnqual(VTy));
2362  Value *Load = Builder.CreateLoad(VTy, Ptr);
2363  return new FPExtInst(Load, II->getType());
2364  }
2365  break;
2366  case Intrinsic::ppc_qpx_qvlfd:
2367  // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2368  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2369  &DT) >= 32) {
2370  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2371  PointerType::getUnqual(II->getType()));
2372  return new LoadInst(II->getType(), Ptr);
2373  }
2374  break;
2375  case Intrinsic::ppc_qpx_qvstfs:
2376  // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2377  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2378  &DT) >= 16) {
2379  Type *VTy = VectorType::get(Builder.getFloatTy(),
2380  II->getArgOperand(0)->getType()->getVectorNumElements());
2381  Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2382  Type *OpPtrTy = PointerType::getUnqual(VTy);
2383  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2384  return new StoreInst(TOp, Ptr);
2385  }
2386  break;
2387  case Intrinsic::ppc_qpx_qvstfd:
2388  // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2389  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2390  &DT) >= 32) {
2391  Type *OpPtrTy =
2392  PointerType::getUnqual(II->getArgOperand(0)->getType());
2393  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2394  return new StoreInst(II->getArgOperand(0), Ptr);
2395  }
2396  break;
2397 
2398  case Intrinsic::x86_bmi_bextr_32:
2399  case Intrinsic::x86_bmi_bextr_64:
2400  case Intrinsic::x86_tbm_bextri_u32:
2401  case Intrinsic::x86_tbm_bextri_u64:
2402  // If the RHS is a constant we can try some simplifications.
2403  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2404  uint64_t Shift = C->getZExtValue();
2405  uint64_t Length = (Shift >> 8) & 0xff;
2406  Shift &= 0xff;
2407  unsigned BitWidth = II->getType()->getIntegerBitWidth();
2408  // If the length is 0 or the shift is out of range, replace with zero.
2409  if (Length == 0 || Shift >= BitWidth)
2410  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2411  // If the LHS is also a constant, we can completely constant fold this.
2412  if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2413  uint64_t Result = InC->getZExtValue() >> Shift;
2414  if (Length > BitWidth)
2415  Length = BitWidth;
2416  Result &= maskTrailingOnes<uint64_t>(Length);
2417  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2418  }
2419  // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2420  // are only masking bits that a shift already cleared?
2421  }
2422  break;
2423 
2424  case Intrinsic::x86_bmi_bzhi_32:
2425  case Intrinsic::x86_bmi_bzhi_64:
2426  // If the RHS is a constant we can try some simplifications.
2427  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2428  uint64_t Index = C->getZExtValue() & 0xff;
2429  unsigned BitWidth = II->getType()->getIntegerBitWidth();
2430  if (Index >= BitWidth)
2431  return replaceInstUsesWith(CI, II->getArgOperand(0));
2432  if (Index == 0)
2433  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2434  // If the LHS is also a constant, we can completely constant fold this.
2435  if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2436  uint64_t Result = InC->getZExtValue();
2437  Result &= maskTrailingOnes<uint64_t>(Index);
2438  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2439  }
2440  // TODO should we convert this to an AND if the RHS is constant?
2441  }
2442  break;
2443 
2444  case Intrinsic::x86_vcvtph2ps_128:
2445  case Intrinsic::x86_vcvtph2ps_256: {
2446  auto Arg = II->getArgOperand(0);
2447  auto ArgType = cast<VectorType>(Arg->getType());
2448  auto RetType = cast<VectorType>(II->getType());
2449  unsigned ArgWidth = ArgType->getNumElements();
2450  unsigned RetWidth = RetType->getNumElements();
2451  assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2452  assert(ArgType->isIntOrIntVectorTy() &&
2453  ArgType->getScalarSizeInBits() == 16 &&
2454  "CVTPH2PS input type should be 16-bit integer vector");
2455  assert(RetType->getScalarType()->isFloatTy() &&
2456  "CVTPH2PS output type should be 32-bit float vector");
2457 
2458  // Constant folding: Convert to generic half to single conversion.
2459  if (isa<ConstantAggregateZero>(Arg))
2460  return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2461 
2462  if (isa<ConstantDataVector>(Arg)) {
2463  auto VectorHalfAsShorts = Arg;
2464  if (RetWidth < ArgWidth) {
2465  SmallVector<uint32_t, 8> SubVecMask;
2466  for (unsigned i = 0; i != RetWidth; ++i)
2467  SubVecMask.push_back((int)i);
2468  VectorHalfAsShorts = Builder.CreateShuffleVector(
2469  Arg, UndefValue::get(ArgType), SubVecMask);
2470  }
2471 
2472  auto VectorHalfType =
2473  VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2474  auto VectorHalfs =
2475  Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2476  auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2477  return replaceInstUsesWith(*II, VectorFloats);
2478  }
2479 
2480  // We only use the lowest lanes of the argument.
2481  if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2482  II->setArgOperand(0, V);
2483  return II;
2484  }
2485  break;
2486  }
2487 
2488  case Intrinsic::x86_sse_cvtss2si:
2489  case Intrinsic::x86_sse_cvtss2si64:
2490  case Intrinsic::x86_sse_cvttss2si:
2491  case Intrinsic::x86_sse_cvttss2si64:
2492  case Intrinsic::x86_sse2_cvtsd2si:
2493  case Intrinsic::x86_sse2_cvtsd2si64:
2494  case Intrinsic::x86_sse2_cvttsd2si:
2495  case Intrinsic::x86_sse2_cvttsd2si64:
2496  case Intrinsic::x86_avx512_vcvtss2si32:
2497  case Intrinsic::x86_avx512_vcvtss2si64:
2498  case Intrinsic::x86_avx512_vcvtss2usi32:
2499  case Intrinsic::x86_avx512_vcvtss2usi64:
2500  case Intrinsic::x86_avx512_vcvtsd2si32:
2501  case Intrinsic::x86_avx512_vcvtsd2si64:
2502  case Intrinsic::x86_avx512_vcvtsd2usi32:
2503  case Intrinsic::x86_avx512_vcvtsd2usi64:
2504  case Intrinsic::x86_avx512_cvttss2si:
2505  case Intrinsic::x86_avx512_cvttss2si64:
2506  case Intrinsic::x86_avx512_cvttss2usi:
2507  case Intrinsic::x86_avx512_cvttss2usi64:
2508  case Intrinsic::x86_avx512_cvttsd2si:
2509  case Intrinsic::x86_avx512_cvttsd2si64:
2510  case Intrinsic::x86_avx512_cvttsd2usi:
2511  case Intrinsic::x86_avx512_cvttsd2usi64: {
2512  // These intrinsics only demand the 0th element of their input vectors. If
2513  // we can simplify the input based on that, do so now.
2514  Value *Arg = II->getArgOperand(0);
2515  unsigned VWidth = Arg->getType()->getVectorNumElements();
2516  if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2517  II->setArgOperand(0, V);
2518  return II;
2519  }
2520  break;
2521  }
2522 
2523  case Intrinsic::x86_mmx_pmovmskb:
2524  case Intrinsic::x86_sse_movmsk_ps:
2525  case Intrinsic::x86_sse2_movmsk_pd:
2526  case Intrinsic::x86_sse2_pmovmskb_128:
2527  case Intrinsic::x86_avx_movmsk_pd_256:
2528  case Intrinsic::x86_avx_movmsk_ps_256:
2529  case Intrinsic::x86_avx2_pmovmskb:
2530  if (Value *V = simplifyX86movmsk(*II, Builder))
2531  return replaceInstUsesWith(*II, V);
2532  break;
2533 
2534  case Intrinsic::x86_sse_comieq_ss:
2535  case Intrinsic::x86_sse_comige_ss:
2536  case Intrinsic::x86_sse_comigt_ss:
2537  case Intrinsic::x86_sse_comile_ss:
2538  case Intrinsic::x86_sse_comilt_ss:
2539  case Intrinsic::x86_sse_comineq_ss:
2540  case Intrinsic::x86_sse_ucomieq_ss:
2541  case Intrinsic::x86_sse_ucomige_ss:
2542  case Intrinsic::x86_sse_ucomigt_ss:
2543  case Intrinsic::x86_sse_ucomile_ss:
2544  case Intrinsic::x86_sse_ucomilt_ss:
2545  case Intrinsic::x86_sse_ucomineq_ss:
2546  case Intrinsic::x86_sse2_comieq_sd:
2547  case Intrinsic::x86_sse2_comige_sd:
2548  case Intrinsic::x86_sse2_comigt_sd:
2549  case Intrinsic::x86_sse2_comile_sd:
2550  case Intrinsic::x86_sse2_comilt_sd:
2551  case Intrinsic::x86_sse2_comineq_sd:
2552  case Intrinsic::x86_sse2_ucomieq_sd:
2553  case Intrinsic::x86_sse2_ucomige_sd:
2554  case Intrinsic::x86_sse2_ucomigt_sd:
2555  case Intrinsic::x86_sse2_ucomile_sd:
2556  case Intrinsic::x86_sse2_ucomilt_sd:
2557  case Intrinsic::x86_sse2_ucomineq_sd:
2558  case Intrinsic::x86_avx512_vcomi_ss:
2559  case Intrinsic::x86_avx512_vcomi_sd:
2560  case Intrinsic::x86_avx512_mask_cmp_ss:
2561  case Intrinsic::x86_avx512_mask_cmp_sd: {
2562  // These intrinsics only demand the 0th element of their input vectors. If
2563  // we can simplify the input based on that, do so now.
2564  bool MadeChange = false;
2565  Value *Arg0 = II->getArgOperand(0);
2566  Value *Arg1 = II->getArgOperand(1);
2567  unsigned VWidth = Arg0->getType()->getVectorNumElements();
2568  if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2569  II->setArgOperand(0, V);
2570  MadeChange = true;
2571  }
2572  if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2573  II->setArgOperand(1, V);
2574  MadeChange = true;
2575  }
2576  if (MadeChange)
2577  return II;
2578  break;
2579  }
2580  case Intrinsic::x86_avx512_cmp_pd_128:
2581  case Intrinsic::x86_avx512_cmp_pd_256:
2582  case Intrinsic::x86_avx512_cmp_pd_512:
2583  case Intrinsic::x86_avx512_cmp_ps_128:
2584  case Intrinsic::x86_avx512_cmp_ps_256:
2585  case Intrinsic::x86_avx512_cmp_ps_512: {
2586  // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2587  Value *Arg0 = II->getArgOperand(0);
2588  Value *Arg1 = II->getArgOperand(1);
2589  bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2590  if (Arg0IsZero)
2591  std::swap(Arg0, Arg1);
2592  Value *A, *B;
2593  // This fold requires only the NINF(not +/- inf) since inf minus
2594  // inf is nan.
2595  // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2596  // equal for both compares.
2597  // NNAN is not needed because nans compare the same for both compares.
2598  // The compare intrinsic uses the above assumptions and therefore
2599  // doesn't require additional flags.
2600  if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2601  match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
2602  cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2603  if (Arg0IsZero)
2604  std::swap(A, B);
2605  II->setArgOperand(0, A);
2606  II->setArgOperand(1, B);
2607  return II;
2608  }
2609  break;
2610  }
2611 
2612  case Intrinsic::x86_avx512_add_ps_512:
2613  case Intrinsic::x86_avx512_div_ps_512:
2614  case Intrinsic::x86_avx512_mul_ps_512:
2615  case Intrinsic::x86_avx512_sub_ps_512:
2616  case Intrinsic::x86_avx512_add_pd_512:
2617  case Intrinsic::x86_avx512_div_pd_512:
2618  case Intrinsic::x86_avx512_mul_pd_512:
2619  case Intrinsic::x86_avx512_sub_pd_512:
2620  // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2621  // IR operations.
2622  if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2623  if (R->getValue() == 4) {
2624  Value *Arg0 = II->getArgOperand(0);
2625  Value *Arg1 = II->getArgOperand(1);
2626 
2627  Value *V;
2628  switch (IID) {
2629  default: llvm_unreachable("Case stmts out of sync!");
2630  case Intrinsic::x86_avx512_add_ps_512:
2631  case Intrinsic::x86_avx512_add_pd_512:
2632  V = Builder.CreateFAdd(Arg0, Arg1);
2633  break;
2634  case Intrinsic::x86_avx512_sub_ps_512:
2635  case Intrinsic::x86_avx512_sub_pd_512:
2636  V = Builder.CreateFSub(Arg0, Arg1);
2637  break;
2638  case Intrinsic::x86_avx512_mul_ps_512:
2639  case Intrinsic::x86_avx512_mul_pd_512:
2640  V = Builder.CreateFMul(Arg0, Arg1);
2641  break;
2642  case Intrinsic::x86_avx512_div_ps_512:
2643  case Intrinsic::x86_avx512_div_pd_512:
2644  V = Builder.CreateFDiv(Arg0, Arg1);
2645  break;
2646  }
2647 
2648  return replaceInstUsesWith(*II, V);
2649  }
2650  }
2651  break;
2652 
2653  case Intrinsic::x86_avx512_mask_add_ss_round:
2654  case Intrinsic::x86_avx512_mask_div_ss_round:
2655  case Intrinsic::x86_avx512_mask_mul_ss_round:
2656  case Intrinsic::x86_avx512_mask_sub_ss_round:
2657  case Intrinsic::x86_avx512_mask_add_sd_round:
2658  case Intrinsic::x86_avx512_mask_div_sd_round:
2659  case Intrinsic::x86_avx512_mask_mul_sd_round:
2660  case Intrinsic::x86_avx512_mask_sub_sd_round:
2661  // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2662  // IR operations.
2663  if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2664  if (R->getValue() == 4) {
2665  // Extract the element as scalars.
2666  Value *Arg0 = II->getArgOperand(0);
2667  Value *Arg1 = II->getArgOperand(1);
2668  Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2669  Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2670 
2671  Value *V;
2672  switch (IID) {
2673  default: llvm_unreachable("Case stmts out of sync!");
2674  case Intrinsic::x86_avx512_mask_add_ss_round:
2675  case Intrinsic::x86_avx512_mask_add_sd_round:
2676  V = Builder.CreateFAdd(LHS, RHS);
2677  break;
2678  case Intrinsic::x86_avx512_mask_sub_ss_round:
2679  case Intrinsic::x86_avx512_mask_sub_sd_round:
2680  V = Builder.CreateFSub(LHS, RHS);
2681  break;
2682  case Intrinsic::x86_avx512_mask_mul_ss_round:
2683  case Intrinsic::x86_avx512_mask_mul_sd_round:
2684  V = Builder.CreateFMul(LHS, RHS);
2685  break;
2686  case Intrinsic::x86_avx512_mask_div_ss_round:
2687  case Intrinsic::x86_avx512_mask_div_sd_round:
2688  V = Builder.CreateFDiv(LHS, RHS);
2689  break;
2690  }
2691 
2692  // Handle the masking aspect of the intrinsic.
2693  Value *Mask = II->getArgOperand(3);
2694  auto *C = dyn_cast<ConstantInt>(Mask);
2695  // We don't need a select if we know the mask bit is a 1.
2696  if (!C || !C->getValue()[0]) {
2697  // Cast the mask to an i1 vector and then extract the lowest element.
2698  auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2699  cast<IntegerType>(Mask->getType())->getBitWidth());
2700  Mask = Builder.CreateBitCast(Mask, MaskTy);
2701  Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2702  // Extract the lowest element from the passthru operand.
2703  Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2704  (uint64_t)0);
2705  V = Builder.CreateSelect(Mask, V, Passthru);
2706  }
2707 
2708  // Insert the result back into the original argument 0.
2709  V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2710 
2711  return replaceInstUsesWith(*II, V);
2712  }
2713  }
2714  break;
2715 
2716  // Constant fold ashr( <A x Bi>, Ci ).
2717  // Constant fold lshr( <A x Bi>, Ci ).
2718  // Constant fold shl( <A x Bi>, Ci ).
2719  case Intrinsic::x86_sse2_psrai_d:
2720  case Intrinsic::x86_sse2_psrai_w:
2721  case Intrinsic::x86_avx2_psrai_d:
2722  case Intrinsic::x86_avx2_psrai_w:
2723  case Intrinsic::x86_avx512_psrai_q_128:
2724  case Intrinsic::x86_avx512_psrai_q_256:
2725  case Intrinsic::x86_avx512_psrai_d_512:
2726  case Intrinsic::x86_avx512_psrai_q_512:
2727  case Intrinsic::x86_avx512_psrai_w_512:
2728  case Intrinsic::x86_sse2_psrli_d:
2729  case Intrinsic::x86_sse2_psrli_q:
2730  case Intrinsic::x86_sse2_psrli_w:
2731  case Intrinsic::x86_avx2_psrli_d:
2732  case Intrinsic::x86_avx2_psrli_q:
2733  case Intrinsic::x86_avx2_psrli_w:
2734  case Intrinsic::x86_avx512_psrli_d_512:
2735  case Intrinsic::x86_avx512_psrli_q_512:
2736  case Intrinsic::x86_avx512_psrli_w_512:
2737  case Intrinsic::x86_sse2_pslli_d:
2738  case Intrinsic::x86_sse2_pslli_q:
2739  case Intrinsic::x86_sse2_pslli_w:
2740  case Intrinsic::x86_avx2_pslli_d:
2741  case Intrinsic::x86_avx2_pslli_q:
2742  case Intrinsic::x86_avx2_pslli_w:
2743  case Intrinsic::x86_avx512_pslli_d_512:
2744  case Intrinsic::x86_avx512_pslli_q_512:
2745  case Intrinsic::x86_avx512_pslli_w_512:
2746  if (Value *V = simplifyX86immShift(*II, Builder))
2747  return replaceInstUsesWith(*II, V);
2748  break;
2749 
2750  case Intrinsic::x86_sse2_psra_d:
2751  case Intrinsic::x86_sse2_psra_w:
2752  case Intrinsic::x86_avx2_psra_d:
2753  case Intrinsic::x86_avx2_psra_w:
2754  case Intrinsic::x86_avx512_psra_q_128:
2755  case Intrinsic::x86_avx512_psra_q_256:
2756  case Intrinsic::x86_avx512_psra_d_512:
2757  case Intrinsic::x86_avx512_psra_q_512:
2758  case Intrinsic::x86_avx512_psra_w_512:
2759  case Intrinsic::x86_sse2_psrl_d:
2760  case Intrinsic::x86_sse2_psrl_q:
2761  case Intrinsic::x86_sse2_psrl_w:
2762  case Intrinsic::x86_avx2_psrl_d:
2763  case Intrinsic::x86_avx2_psrl_q:
2764  case Intrinsic::x86_avx2_psrl_w:
2765  case Intrinsic::x86_avx512_psrl_d_512:
2766  case Intrinsic::x86_avx512_psrl_q_512:
2767  case Intrinsic::x86_avx512_psrl_w_512:
2768  case Intrinsic::x86_sse2_psll_d:
2769  case Intrinsic::x86_sse2_psll_q:
2770  case Intrinsic::x86_sse2_psll_w:
2771  case Intrinsic::x86_avx2_psll_d:
2772  case Intrinsic::x86_avx2_psll_q:
2773  case Intrinsic::x86_avx2_psll_w:
2774  case Intrinsic::x86_avx512_psll_d_512:
2775  case Intrinsic::x86_avx512_psll_q_512:
2776  case Intrinsic::x86_avx512_psll_w_512: {
2777  if (Value *V = simplifyX86immShift(*II, Builder))
2778  return replaceInstUsesWith(*II, V);
2779 
2780  // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2781  // operand to compute the shift amount.
2782  Value *Arg1 = II->getArgOperand(1);
2783  assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2784  "Unexpected packed shift size");
2785  unsigned VWidth = Arg1->getType()->getVectorNumElements();
2786 
2787  if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2788  II->setArgOperand(1, V);
2789  return II;
2790  }
2791  break;
2792  }
2793 
2794  case Intrinsic::x86_avx2_psllv_d:
2795  case Intrinsic::x86_avx2_psllv_d_256:
2796  case Intrinsic::x86_avx2_psllv_q:
2797  case Intrinsic::x86_avx2_psllv_q_256:
2798  case Intrinsic::x86_avx512_psllv_d_512:
2799  case Intrinsic::x86_avx512_psllv_q_512:
2800  case Intrinsic::x86_avx512_psllv_w_128:
2801  case Intrinsic::x86_avx512_psllv_w_256:
2802  case Intrinsic::x86_avx512_psllv_w_512:
2803  case Intrinsic::x86_avx2_psrav_d:
2804  case Intrinsic::x86_avx2_psrav_d_256:
2805  case Intrinsic::x86_avx512_psrav_q_128:
2806  case Intrinsic::x86_avx512_psrav_q_256:
2807  case Intrinsic::x86_avx512_psrav_d_512:
2808  case Intrinsic::x86_avx512_psrav_q_512:
2809  case Intrinsic::x86_avx512_psrav_w_128:
2810  case Intrinsic::x86_avx512_psrav_w_256:
2811  case Intrinsic::x86_avx512_psrav_w_512:
2812  case Intrinsic::x86_avx2_psrlv_d:
2813  case Intrinsic::x86_avx2_psrlv_d_256:
2814  case Intrinsic::x86_avx2_psrlv_q:
2815  case Intrinsic::x86_avx2_psrlv_q_256:
2816  case Intrinsic::x86_avx512_psrlv_d_512:
2817  case Intrinsic::x86_avx512_psrlv_q_512:
2818  case Intrinsic::x86_avx512_psrlv_w_128:
2819  case Intrinsic::x86_avx512_psrlv_w_256:
2820  case Intrinsic::x86_avx512_psrlv_w_512:
2821  if (Value *V = simplifyX86varShift(*II, Builder))
2822  return replaceInstUsesWith(*II, V);
2823  break;
2824 
2825  case Intrinsic::x86_sse2_packssdw_128:
2826  case Intrinsic::x86_sse2_packsswb_128:
2827  case Intrinsic::x86_avx2_packssdw:
2828  case Intrinsic::x86_avx2_packsswb:
2829  case Intrinsic::x86_avx512_packssdw_512:
2830  case Intrinsic::x86_avx512_packsswb_512:
2831  if (Value *V = simplifyX86pack(*II, Builder, true))
2832  return replaceInstUsesWith(*II, V);
2833  break;
2834 
2835  case Intrinsic::x86_sse2_packuswb_128:
2836  case Intrinsic::x86_sse41_packusdw:
2837  case Intrinsic::x86_avx2_packusdw:
2838  case Intrinsic::x86_avx2_packuswb:
2839  case Intrinsic::x86_avx512_packusdw_512:
2840  case Intrinsic::x86_avx512_packuswb_512:
2841  if (Value *V = simplifyX86pack(*II, Builder, false))
2842  return replaceInstUsesWith(*II, V);
2843  break;
2844 
2845  case Intrinsic::x86_pclmulqdq:
2846  case Intrinsic::x86_pclmulqdq_256:
2847  case Intrinsic::x86_pclmulqdq_512: {
2848  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2849  unsigned Imm = C->getZExtValue();
2850 
2851  bool MadeChange = false;
2852  Value *Arg0 = II->getArgOperand(0);
2853  Value *Arg1 = II->getArgOperand(1);
2854  unsigned VWidth = Arg0->getType()->getVectorNumElements();
2855 
2856  APInt UndefElts1(VWidth, 0);
2857  APInt DemandedElts1 = APInt::getSplat(VWidth,
2858  APInt(2, (Imm & 0x01) ? 2 : 1));
2859  if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2860  UndefElts1)) {
2861  II->setArgOperand(0, V);
2862  MadeChange = true;
2863  }
2864 
2865  APInt UndefElts2(VWidth, 0);
2866  APInt DemandedElts2 = APInt::getSplat(VWidth,
2867  APInt(2, (Imm & 0x10) ? 2 : 1));
2868  if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2869  UndefElts2)) {
2870  II->setArgOperand(1, V);
2871  MadeChange = true;
2872  }
2873 
2874  // If either input elements are undef, the result is zero.
2875  if (DemandedElts1.isSubsetOf(UndefElts1) ||
2876  DemandedElts2.isSubsetOf(UndefElts2))
2877  return replaceInstUsesWith(*II,
2878  ConstantAggregateZero::get(II->getType()));
2879 
2880  if (MadeChange)
2881  return II;
2882  }
2883  break;
2884  }
2885 
2886  case Intrinsic::x86_sse41_insertps:
2887  if (Value *V = simplifyX86insertps(*II, Builder))
2888  return replaceInstUsesWith(*II, V);
2889  break;
2890 
2891  case Intrinsic::x86_sse4a_extrq: {
2892  Value *Op0 = II->getArgOperand(0);
2893  Value *Op1 = II->getArgOperand(1);
2894  unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2895  unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2896  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2897  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2898  VWidth1 == 16 && "Unexpected operand sizes");
2899 
2900  // See if we're dealing with constant values.
2901  Constant *C1 = dyn_cast<Constant>(Op1);
2902  ConstantInt *CILength =
2903  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
2904  : nullptr;
2905  ConstantInt *CIIndex =
2906  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2907  : nullptr;
2908 
2909  // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
2910  if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2911  return replaceInstUsesWith(*II, V);
2912 
2913  // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2914  // operands and the lowest 16-bits of the second.
2915  bool MadeChange = false;
2916  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2917  II->setArgOperand(0, V);
2918  MadeChange = true;
2919  }
2920  if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2921  II->setArgOperand(1, V);
2922  MadeChange = true;
2923  }
2924  if (MadeChange)
2925  return II;
2926  break;
2927  }
2928 
2929  case Intrinsic::x86_sse4a_extrqi: {
2930  // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2931  // bits of the lower 64-bits. The upper 64-bits are undefined.
2932  Value *Op0 = II->getArgOperand(0);
2933  unsigned VWidth = Op0->getType()->getVectorNumElements();
2934  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2935  "Unexpected operand size");
2936 
2937  // See if we're dealing with constant values.
2938  ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2939  ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2940 
2941  // Attempt to simplify to a constant or shuffle vector.
2942  if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2943  return replaceInstUsesWith(*II, V);
2944 
2945  // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2946  // operand.
2947  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2948  II->setArgOperand(0, V);
2949  return II;
2950  }
2951  break;
2952  }
2953 
2954  case Intrinsic::x86_sse4a_insertq: {
2955  Value *Op0 = II->getArgOperand(0);
2956  Value *Op1 = II->getArgOperand(1);
2957  unsigned VWidth = Op0->getType()->getVectorNumElements();
2958  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2959  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2960  Op1->getType()->getVectorNumElements() == 2 &&
2961  "Unexpected operand size");
2962 
2963  // See if we're dealing with constant values.
2964  Constant *C1 = dyn_cast<Constant>(Op1);
2965  ConstantInt *CI11 =
2966  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2967  : nullptr;
2968 
2969  // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2970  if (CI11) {
2971  const APInt &V11 = CI11->getValue();
2972  APInt Len = V11.zextOrTrunc(6);
2973  APInt Idx = V11.lshr(8).zextOrTrunc(6);
2974  if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
2975  return replaceInstUsesWith(*II, V);
2976  }
2977 
2978  // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2979  // operand.
2980  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2981  II->setArgOperand(0, V);
2982  return II;
2983  }
2984  break;
2985  }
2986 
2987  case Intrinsic::x86_sse4a_insertqi: {
2988  // INSERTQI: Extract lowest Length bits from lower half of second source and
2989  // insert over first source starting at Index bit. The upper 64-bits are
2990  // undefined.
2991  Value *Op0 = II->getArgOperand(0);
2992  Value *Op1 = II->getArgOperand(1);
2993  unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2994  unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2995  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2996  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2997  VWidth1 == 2 && "Unexpected operand sizes");
2998 
2999  // See if we're dealing with constant values.
3000  ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3001  ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3002 
3003  // Attempt to simplify to a constant or shuffle vector.
3004  if (CILength && CIIndex) {
3005  APInt Len = CILength->getValue().zextOrTrunc(6);
3006  APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3007  if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3008  return replaceInstUsesWith(*II, V);
3009  }
3010 
3011  // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3012  // operands.
3013  bool MadeChange = false;
3014  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3015  II->setArgOperand(0, V);
3016  MadeChange = true;
3017  }
3018  if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3019  II->setArgOperand(1, V);
3020  MadeChange = true;
3021  }
3022  if (MadeChange)
3023  return II;
3024  break;
3025  }
3026 
3027  case Intrinsic::x86_sse41_pblendvb:
3028  case Intrinsic::x86_sse41_blendvps:
3029  case Intrinsic::x86_sse41_blendvpd:
3030  case Intrinsic::x86_avx_blendv_ps_256:
3031  case Intrinsic::x86_avx_blendv_pd_256:
3032  case Intrinsic::x86_avx2_pblendvb: {
3033  // fold (blend A, A, Mask) -> A
3034  Value *Op0 = II->getArgOperand(0);
3035  Value *Op1 = II->getArgOperand(1);
3036  Value *Mask = II->getArgOperand(2);
3037  if (Op0 == Op1)
3038  return replaceInstUsesWith(CI, Op0);
3039 
3040  // Zero Mask - select 1st argument.
3041  if (isa<ConstantAggregateZero>(Mask))
3042  return replaceInstUsesWith(CI, Op0);
3043 
3044  // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3045  if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3046  Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3047  return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3048  }
3049 
3050  // Convert to a vector select if we can bypass casts and find a boolean
3051  // vector condition value.
3052  Value *BoolVec;
3053  Mask = peekThroughBitcast(Mask);
3054  if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3055  BoolVec->getType()->isVectorTy() &&
3056  BoolVec->getType()->getScalarSizeInBits() == 1) {
3057  assert(Mask->getType()->getPrimitiveSizeInBits() ==
3058  II->getType()->getPrimitiveSizeInBits() &&
3059  "Not expecting mask and operands with different sizes");
3060 
3061  unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3062  unsigned NumOperandElts = II->getType()->getVectorNumElements();
3063  if (NumMaskElts == NumOperandElts)
3064  return SelectInst::Create(BoolVec, Op1, Op0);
3065 
3066  // If the mask has less elements than the operands, each mask bit maps to
3067  // multiple elements of the operands. Bitcast back and forth.
3068  if (NumMaskElts < NumOperandElts) {
3069  Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3070  Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3071  Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3072  return new BitCastInst(Sel, II->getType());
3073  }
3074  }
3075 
3076  break;
3077  }
3078 
3079  case Intrinsic::x86_ssse3_pshuf_b_128:
3080  case Intrinsic::x86_avx2_pshuf_b:
3081  case Intrinsic::x86_avx512_pshuf_b_512:
3082  if (Value *V = simplifyX86pshufb(*II, Builder))
3083  return replaceInstUsesWith(*II, V);
3084  break;
3085 
3086  case Intrinsic::x86_avx_vpermilvar_ps:
3087  case Intrinsic::x86_avx_vpermilvar_ps_256:
3088  case Intrinsic::x86_avx512_vpermilvar_ps_512:
3089  case Intrinsic::x86_avx_vpermilvar_pd:
3090  case Intrinsic::x86_avx_vpermilvar_pd_256:
3091  case Intrinsic::x86_avx512_vpermilvar_pd_512:
3092  if (Value *V = simplifyX86vpermilvar(*II, Builder))
3093  return replaceInstUsesWith(*II, V);
3094  break;
3095 
3096  case Intrinsic::x86_avx2_permd:
3097  case Intrinsic::x86_avx2_permps:
3098  case Intrinsic::x86_avx512_permvar_df_256:
3099  case Intrinsic::x86_avx512_permvar_df_512:
3100  case Intrinsic::x86_avx512_permvar_di_256:
3101  case Intrinsic::x86_avx512_permvar_di_512:
3102  case Intrinsic::x86_avx512_permvar_hi_128:
3103  case Intrinsic::x86_avx512_permvar_hi_256:
3104  case Intrinsic::x86_avx512_permvar_hi_512:
3105  case Intrinsic::x86_avx512_permvar_qi_128:
3106  case Intrinsic::x86_avx512_permvar_qi_256:
3107  case Intrinsic::x86_avx512_permvar_qi_512:
3108  case Intrinsic::x86_avx512_permvar_sf_512:
3109  case Intrinsic::x86_avx512_permvar_si_512:
3110  if (Value *V = simplifyX86vpermv(*II, Builder))
3111  return replaceInstUsesWith(*II, V);
3112  break;
3113 
3114  case Intrinsic::x86_avx_maskload_ps:
3115  case Intrinsic::x86_avx_maskload_pd:
3116  case Intrinsic::x86_avx_maskload_ps_256:
3117  case Intrinsic::x86_avx_maskload_pd_256:
3118  case Intrinsic::x86_avx2_maskload_d:
3119  case Intrinsic::x86_avx2_maskload_q:
3120  case Intrinsic::x86_avx2_maskload_d_256:
3121  case Intrinsic::x86_avx2_maskload_q_256:
3122  if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3123  return I;
3124  break;
3125 
3126  case Intrinsic::x86_sse2_maskmov_dqu:
3127  case Intrinsic::x86_avx_maskstore_ps:
3128  case Intrinsic::x86_avx_maskstore_pd:
3129  case Intrinsic::x86_avx_maskstore_ps_256:
3130  case Intrinsic::x86_avx_maskstore_pd_256:
3131  case Intrinsic::x86_avx2_maskstore_d:
3132  case Intrinsic::x86_avx2_maskstore_q:
3133  case Intrinsic::x86_avx2_maskstore_d_256:
3134  case Intrinsic::x86_avx2_maskstore_q_256:
3135  if (simplifyX86MaskedStore(*II, *this))
3136  return nullptr;
3137  break;
3138 
3139  case Intrinsic::x86_addcarry_32:
3140  case Intrinsic::x86_addcarry_64:
3141  if (Value *V = simplifyX86addcarry(*II, Builder))
3142  return replaceInstUsesWith(*II, V);
3143  break;
3144 
3145  case Intrinsic::ppc_altivec_vperm:
3146  // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3147  // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3148  // a vectorshuffle for little endian, we must undo the transformation
3149  // performed on vec_perm in altivec.h. That is, we must complement
3150  // the permutation mask with respect to 31 and reverse the order of
3151  // V1 and V2.
3152  if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3153  assert(Mask->getType()->getVectorNumElements() == 16 &&
3154  "Bad type for intrinsic!");
3155 
3156  // Check that all of the elements are integer constants or undefs.
3157  bool AllEltsOk = true;
3158  for (unsigned i = 0; i != 16; ++i) {
3159  Constant *Elt = Mask->getAggregateElement(i);
3160  if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3161  AllEltsOk = false;
3162  break;
3163  }
3164  }
3165 
3166  if (AllEltsOk) {
3167  // Cast the input vectors to byte vectors.
3168  Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3169  Mask->getType());
3170  Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3171  Mask->getType());
3172  Value *Result = UndefValue::get(Op0->getType());
3173 
3174  // Only extract each element once.
3175  Value *ExtractedElts[32];
3176  memset(ExtractedElts, 0, sizeof(ExtractedElts));
3177 
3178  for (unsigned i = 0; i != 16; ++i) {
3179  if (isa<UndefValue>(Mask->getAggregateElement(i)))
3180  continue;
3181  unsigned Idx =
3182  cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3183  Idx &= 31; // Match the hardware behavior.
3184  if (DL.isLittleEndian())
3185  Idx = 31 - Idx;
3186 
3187  if (!ExtractedElts[Idx]) {
3188  Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3189  Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3190  ExtractedElts[Idx] =
3191  Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3192  Builder.getInt32(Idx&15));
3193  }
3194 
3195  // Insert this value into the result vector.
3196  Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3197  Builder.getInt32(i));
3198  }
3199  return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3200  }
3201  }
3202  break;
3203 
3204  case Intrinsic::arm_neon_vld1: {
3205  unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3206  DL, II, &AC, &DT);
3207  if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3208  return replaceInstUsesWith(*II, V);
3209  break;
3210  }
3211 
3212  case Intrinsic::arm_neon_vld2:
3213  case Intrinsic::arm_neon_vld3:
3214  case Intrinsic::arm_neon_vld4:
3215  case Intrinsic::arm_neon_vld2lane:
3216  case Intrinsic::arm_neon_vld3lane:
3217  case Intrinsic::arm_neon_vld4lane:
3218  case Intrinsic::arm_neon_vst1:
3219  case Intrinsic::arm_neon_vst2:
3220  case Intrinsic::arm_neon_vst3:
3221  case Intrinsic::arm_neon_vst4:
3222  case Intrinsic::arm_neon_vst2lane:
3223  case Intrinsic::arm_neon_vst3lane:
3224  case Intrinsic::arm_neon_vst4lane: {
3225  unsigned MemAlign =
3226  getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3227  unsigned AlignArg = II->getNumArgOperands() - 1;
3228  ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3229  if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3230  II->setArgOperand(AlignArg,
3231  ConstantInt::get(Type::getInt32Ty(II->getContext()),
3232  MemAlign, false));
3233  return II;
3234  }
3235  break;
3236  }
3237 
3238  case Intrinsic::arm_neon_vtbl1:
3239  case Intrinsic::aarch64_neon_tbl1:
3240  if (Value *V = simplifyNeonTbl1(*II, Builder))
3241  return replaceInstUsesWith(*II, V);
3242  break;
3243 
3244  case Intrinsic::arm_neon_vmulls:
3245  case Intrinsic::arm_neon_vmullu:
3246  case Intrinsic::aarch64_neon_smull:
3247  case Intrinsic::aarch64_neon_umull: {
3248  Value *Arg0 = II->getArgOperand(0);
3249  Value *Arg1 = II->getArgOperand(1);
3250 
3251  // Handle mul by zero first:
3252  if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3253  return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3254  }
3255 
3256  // Check for constant LHS & RHS - in this case we just simplify.
3257  bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3258  IID == Intrinsic::aarch64_neon_umull);
3259  VectorType *NewVT = cast<VectorType>(II->getType());
3260  if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3261  if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3262  CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3263  CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3264 
3265  return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3266  }
3267 
3268  // Couldn't simplify - canonicalize constant to the RHS.
3269  std::swap(Arg0, Arg1);
3270  }
3271 
3272  // Handle mul by one:
3273  if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3274  if (ConstantInt *Splat =
3275  dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3276  if (Splat->isOne())
3277  return CastInst::CreateIntegerCast(Arg0, II->getType(),
3278  /*isSigned=*/!Zext);
3279 
3280  break;
3281  }
3282  case Intrinsic::arm_neon_aesd:
3283  case Intrinsic::arm_neon_aese:
3284  case Intrinsic::aarch64_crypto_aesd:
3285  case Intrinsic::aarch64_crypto_aese: {
3286  Value *DataArg = II->getArgOperand(0);
3287  Value *KeyArg = II->getArgOperand(1);
3288 
3289  // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3290  Value *Data, *Key;
3291  if (match(KeyArg, m_ZeroInt()) &&
3292  match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3293  II->setArgOperand(0, Data);
3294  II->setArgOperand(1, Key);
3295  return II;
3296  }
3297  break;
3298  }
3299  case Intrinsic::amdgcn_rcp: {
3300  Value *Src = II->getArgOperand(0);
3301 
3302  // TODO: Move to ConstantFolding/InstSimplify?
3303  if (isa<UndefValue>(Src))
3304  return replaceInstUsesWith(CI, Src);
3305 
3306  if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3307  const APFloat &ArgVal = C->getValueAPF();
3308  APFloat Val(ArgVal.getSemantics(), 1.0);
3309  APFloat::opStatus Status = Val.divide(ArgVal,
3311  // Only do this if it was exact and therefore not dependent on the
3312  // rounding mode.
3313  if (Status == APFloat::opOK)
3314  return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3315  }
3316 
3317  break;
3318  }
3319  case Intrinsic::amdgcn_rsq: {
3320  Value *Src = II->getArgOperand(0);
3321 
3322  // TODO: Move to ConstantFolding/InstSimplify?
3323  if (isa<UndefValue>(Src))
3324  return replaceInstUsesWith(CI, Src);
3325  break;
3326  }
3327  case Intrinsic::amdgcn_frexp_mant:
3328  case Intrinsic::amdgcn_frexp_exp: {
3329  Value *Src = II->getArgOperand(0);
3330  if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3331  int Exp;
3332  APFloat Significand = frexp(C->getValueAPF(), Exp,
3334 
3335  if (IID == Intrinsic::amdgcn_frexp_mant) {
3336  return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3337  Significand));
3338  }
3339 
3340  // Match instruction special case behavior.
3341  if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3342  Exp = 0;
3343 
3344  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3345  }
3346 
3347  if (isa<UndefValue>(Src))
3348  return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3349 
3350  break;
3351  }
3352  case Intrinsic::amdgcn_class: {
3353  enum {
3354  S_NAN = 1 << 0, // Signaling NaN
3355  Q_NAN = 1 << 1, // Quiet NaN
3356  N_INFINITY = 1 << 2, // Negative infinity
3357  N_NORMAL = 1 << 3, // Negative normal
3358  N_SUBNORMAL = 1 << 4, // Negative subnormal
3359  N_ZERO = 1 << 5, // Negative zero
3360  P_ZERO = 1 << 6, // Positive zero
3361  P_SUBNORMAL = 1 << 7, // Positive subnormal
3362  P_NORMAL = 1 << 8, // Positive normal
3363  P_INFINITY = 1 << 9 // Positive infinity
3364  };
3365 
3366  const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3368 
3369  Value *Src0 = II->getArgOperand(0);
3370  Value *Src1 = II->getArgOperand(1);
3371  const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3372  if (!CMask) {
3373  if (isa<UndefValue>(Src0))
3374  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3375 
3376  if (isa<UndefValue>(Src1))
3377  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3378  break;
3379  }
3380 
3381  uint32_t Mask = CMask->getZExtValue();
3382 
3383  // If all tests are made, it doesn't matter what the value is.
3384  if ((Mask & FullMask) == FullMask)
3385  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3386 
3387  if ((Mask & FullMask) == 0)
3388  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3389 
3390  if (Mask == (S_NAN | Q_NAN)) {
3391  // Equivalent of isnan. Replace with standard fcmp.
3392  Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3393  FCmp->takeName(II);
3394  return replaceInstUsesWith(*II, FCmp);
3395  }
3396 
3397  if (Mask == (N_ZERO | P_ZERO)) {
3398  // Equivalent of == 0.
3399  Value *FCmp = Builder.CreateFCmpOEQ(
3400  Src0, ConstantFP::get(Src0->getType(), 0.0));
3401 
3402  FCmp->takeName(II);
3403  return replaceInstUsesWith(*II, FCmp);
3404  }
3405 
3406  // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3407  if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3408  II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3409  Mask & ~(S_NAN | Q_NAN)));
3410  return II;
3411  }
3412 
3413  const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3414  if (!CVal) {
3415  if (isa<UndefValue>(Src0))
3416  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3417 
3418  // Clamp mask to used bits
3419  if ((Mask & FullMask) != Mask) {
3420  CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3421  { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3422  );
3423 
3424  NewCall->takeName(II);
3425  return replaceInstUsesWith(*II, NewCall);
3426  }
3427 
3428  break;
3429  }
3430 
3431  const APFloat &Val = CVal->getValueAPF();
3432 
3433  bool Result =
3434  ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3435  ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3436  ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3437  ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3438  ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3439  ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3440  ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3441  ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3442  ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3443  ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3444 
3445  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3446  }
3447  case Intrinsic::amdgcn_cvt_pkrtz: {
3448  Value *Src0 = II->getArgOperand(0);
3449  Value *Src1 = II->getArgOperand(1);
3450  if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3451  if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3452  const fltSemantics &HalfSem
3453  = II->getType()->getScalarType()->getFltSemantics();
3454  bool LosesInfo;
3455  APFloat Val0 = C0->getValueAPF();
3456  APFloat Val1 = C1->getValueAPF();
3457  Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3458  Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3459 
3460  Constant *Folded = ConstantVector::get({
3461  ConstantFP::get(II->getContext(), Val0),
3462  ConstantFP::get(II->getContext(), Val1) });
3463  return replaceInstUsesWith(*II, Folded);
3464  }
3465  }
3466 
3467  if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3468  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3469 
3470  break;
3471  }
3472  case Intrinsic::amdgcn_cvt_pknorm_i16:
3473  case Intrinsic::amdgcn_cvt_pknorm_u16:
3474  case Intrinsic::amdgcn_cvt_pk_i16:
3475  case Intrinsic::amdgcn_cvt_pk_u16: {
3476  Value *Src0 = II->getArgOperand(0);
3477  Value *Src1 = II->getArgOperand(1);
3478 
3479  if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3480  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3481 
3482  break;
3483  }
3484  case Intrinsic::amdgcn_ubfe:
3485  case Intrinsic::amdgcn_sbfe: {
3486  // Decompose simple cases into standard shifts.
3487  Value *Src = II->getArgOperand(0);
3488  if (isa<UndefValue>(Src))
3489  return replaceInstUsesWith(*II, Src);
3490 
3491  unsigned Width;
3492  Type *Ty = II->getType();
3493  unsigned IntSize = Ty->getIntegerBitWidth();
3494 
3495  ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3496  if (CWidth) {
3497  Width = CWidth->getZExtValue();
3498  if ((Width & (IntSize - 1)) == 0)
3499  return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3500 
3501  if (Width >= IntSize) {
3502  // Hardware ignores high bits, so remove those.
3503  II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3504  Width & (IntSize - 1)));
3505  return II;
3506  }
3507  }
3508 
3509  unsigned Offset;
3510  ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3511  if (COffset) {
3512  Offset = COffset->getZExtValue();
3513  if (Offset >= IntSize) {
3514  II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3515  Offset & (IntSize - 1)));
3516  return II;
3517  }
3518  }
3519 
3520  bool Signed = IID == Intrinsic::amdgcn_sbfe;
3521 
3522  if (!CWidth || !COffset)
3523  break;
3524 
3525  // The case of Width == 0 is handled above, which makes this tranformation
3526  // safe. If Width == 0, then the ashr and lshr instructions become poison
3527  // value since the shift amount would be equal to the bit size.
3528  assert(Width != 0);
3529 
3530  // TODO: This allows folding to undef when the hardware has specific
3531  // behavior?
3532  if (Offset + Width < IntSize) {
3533  Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3534  Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3535  : Builder.CreateLShr(Shl, IntSize - Width);
3536  RightShift->takeName(II);
3537  return replaceInstUsesWith(*II, RightShift);
3538  }
3539 
3540  Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3541  : Builder.CreateLShr(Src, Offset);
3542 
3543  RightShift->takeName(II);
3544  return replaceInstUsesWith(*II, RightShift);
3545  }
3546  case Intrinsic::amdgcn_exp:
3547  case Intrinsic::amdgcn_exp_compr: {
3548  ConstantInt *En = cast<ConstantInt>(II->getArgOperand(1));
3549  unsigned EnBits = En->getZExtValue();
3550  if (EnBits == 0xf)
3551  break; // All inputs enabled.
3552 
3553  bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3554  bool Changed = false;
3555  for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3556  if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3557  (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3558  Value *Src = II->getArgOperand(I + 2);
3559  if (!isa<UndefValue>(Src)) {
3560  II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3561  Changed = true;
3562  }
3563  }
3564  }
3565 
3566  if (Changed)
3567  return II;
3568 
3569  break;
3570  }
3571  case Intrinsic::amdgcn_fmed3: {
3572  // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3573  // for the shader.
3574 
3575  Value *Src0 = II->getArgOperand(0);
3576  Value *Src1 = II->getArgOperand(1);
3577  Value *Src2 = II->getArgOperand(2);
3578 
3579  // Checking for NaN before canonicalization provides better fidelity when
3580  // mapping other operations onto fmed3 since the order of operands is
3581  // unchanged.
3582  CallInst *NewCall = nullptr;
3583  if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3584  NewCall = Builder.CreateMinNum(Src1, Src2);
3585  } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3586  NewCall = Builder.CreateMinNum(Src0, Src2);
3587  } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3588  NewCall = Builder.CreateMaxNum(Src0, Src1);
3589  }
3590 
3591  if (NewCall) {
3592  NewCall->copyFastMathFlags(II);
3593  NewCall->takeName(II);
3594  return replaceInstUsesWith(*II, NewCall);
3595  }
3596 
3597  bool Swap = false;
3598  // Canonicalize constants to RHS operands.
3599  //
3600  // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3601  if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3602  std::swap(Src0, Src1);
3603  Swap = true;
3604  }
3605 
3606  if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3607  std::swap(Src1, Src2);
3608  Swap = true;
3609  }
3610 
3611  if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3612  std::swap(Src0, Src1);
3613  Swap = true;
3614  }
3615 
3616  if (Swap) {
3617  II->setArgOperand(0, Src0);
3618  II->setArgOperand(1, Src1);
3619  II->setArgOperand(2, Src2);
3620  return II;
3621  }
3622 
3623  if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3624  if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3625  if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3626  APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3627  C2->getValueAPF());
3628  return replaceInstUsesWith(*II,
3629  ConstantFP::get(Builder.getContext(), Result));
3630  }
3631  }
3632  }
3633 
3634  break;
3635  }
3636  case Intrinsic::amdgcn_icmp:
3637  case Intrinsic::amdgcn_fcmp: {
3638  const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand(2));
3639  // Guard against invalid arguments.
3640  int64_t CCVal = CC->getZExtValue();
3641  bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3642  if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3643  CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3644  (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3645  CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3646  break;
3647 
3648  Value *Src0 = II->getArgOperand(0);
3649  Value *Src1 = II->getArgOperand(1);
3650 
3651  if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3652  if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3653  Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3654  if (CCmp->isNullValue()) {
3655  return replaceInstUsesWith(
3656  *II, ConstantExpr::getSExt(CCmp, II->getType()));
3657  }
3658 
3659  // The result of V_ICMP/V_FCMP assembly instructions (which this
3660  // intrinsic exposes) is one bit per thread, masked with the EXEC
3661  // register (which contains the bitmask of live threads). So a
3662  // comparison that always returns true is the same as a read of the
3663  // EXEC register.
3665  II->getModule(), Intrinsic::read_register, II->getType());
3666  Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3667  MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3668  Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3669  CallInst *NewCall = Builder.CreateCall(NewF, Args);
3672  NewCall->takeName(II);
3673  return replaceInstUsesWith(*II, NewCall);
3674  }
3675 
3676  // Canonicalize constants to RHS.
3677  CmpInst::Predicate SwapPred
3678  = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3679  II->setArgOperand(0, Src1);
3680  II->setArgOperand(1, Src0);
3681  II->setArgOperand(2, ConstantInt::get(CC->getType(),
3682  static_cast<int>(SwapPred)));
3683  return II;
3684  }
3685 
3686  if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3687  break;
3688 
3689  // Canonicalize compare eq with true value to compare != 0
3690  // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3691  // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3692  // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3693  // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3694  Value *ExtSrc;
3695  if (CCVal == CmpInst::ICMP_EQ &&
3696  ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3697  (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3698  ExtSrc->getType()->isIntegerTy(1)) {
3699  II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3700  II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3701  return II;
3702  }
3703 
3704  CmpInst::Predicate SrcPred;
3705  Value *SrcLHS;
3706  Value *SrcRHS;
3707 
3708  // Fold compare eq/ne with 0 from a compare result as the predicate to the
3709  // intrinsic. The typical use is a wave vote function in the library, which
3710  // will be fed from a user code condition compared with 0. Fold in the
3711  // redundant compare.
3712 
3713  // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3714  // -> llvm.amdgcn.[if]cmp(a, b, pred)
3715  //
3716  // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3717  // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3718  if (match(Src1, m_Zero()) &&
3719  match(Src0,
3720  m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3721  if (CCVal == CmpInst::ICMP_EQ)
3722  SrcPred = CmpInst::getInversePredicate(SrcPred);
3723 
3724  Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3725  Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3726 
3727  Type *Ty = SrcLHS->getType();
3728  if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3729  // Promote to next legal integer type.
3730  unsigned Width = CmpType->getBitWidth();
3731  unsigned NewWidth = Width;
3732 
3733  // Don't do anything for i1 comparisons.
3734  if (Width == 1)
3735  break;
3736 
3737  if (Width <= 16)
3738  NewWidth = 16;
3739  else if (Width <= 32)
3740  NewWidth = 32;
3741  else if (Width <= 64)
3742  NewWidth = 64;
3743  else if (Width > 64)
3744  break; // Can't handle this.
3745 
3746  if (Width != NewWidth) {
3747  IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3748  if (CmpInst::isSigned(SrcPred)) {
3749  SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3750  SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3751  } else {
3752  SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3753  SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3754  }
3755  }
3756  } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3757  break;
3758 
3759  Function *NewF =
3760  Intrinsic::getDeclaration(II->getModule(), NewIID,
3761  { II->getType(),
3762  SrcLHS->getType() });
3763  Value *Args[] = { SrcLHS, SrcRHS,
3764  ConstantInt::get(CC->getType(), SrcPred) };
3765  CallInst *NewCall = Builder.CreateCall(NewF, Args);
3766  NewCall->takeName(II);
3767  return replaceInstUsesWith(*II, NewCall);
3768  }
3769 
3770  break;
3771  }
3772  case Intrinsic::amdgcn_wqm_vote: {
3773  // wqm_vote is identity when the argument is constant.
3774  if (!isa<Constant>(II->getArgOperand(0)))
3775  break;
3776 
3777  return replaceInstUsesWith(*II, II->getArgOperand(0));
3778  }
3779  case Intrinsic::amdgcn_kill: {
3780  const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3781  if (!C || !C->getZExtValue())
3782  break;
3783 
3784  // amdgcn.kill(i1 1) is a no-op
3785  return eraseInstFromFunction(CI);
3786  }
3787  case Intrinsic::amdgcn_update_dpp: {
3788  Value *Old = II->getArgOperand(0);
3789 
3790  auto BC = cast<ConstantInt>(II->getArgOperand(5));
3791  auto RM = cast<ConstantInt>(II->getArgOperand(3));
3792  auto BM = cast<ConstantInt>(II->getArgOperand(4));
3793  if (BC->isZeroValue() ||
3794  RM->getZExtValue() != 0xF ||
3795  BM->getZExtValue() != 0xF ||
3796  isa<UndefValue>(Old))
3797  break;
3798 
3799  // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3800  II->setOperand(0, UndefValue::get(Old->getType()));
3801  return II;
3802  }
3803  case Intrinsic::amdgcn_readfirstlane:
3804  case Intrinsic::amdgcn_readlane: {
3805  // A constant value is trivially uniform.
3806  if (Constant *C = dyn_cast<Constant>(II->getArgOperand(0)))
3807  return replaceInstUsesWith(*II, C);
3808 
3809  // The rest of these may not be safe if the exec may not be the same between
3810  // the def and use.
3811  Value *Src = II->getArgOperand(0);
3812  Instruction *SrcInst = dyn_cast<Instruction>(Src);
3813  if (SrcInst && SrcInst->getParent() != II->getParent())
3814  break;
3815 
3816  // readfirstlane (readfirstlane x) -> readfirstlane x
3817  // readlane (readfirstlane x), y -> readfirstlane x
3818  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readfirstlane>()))
3819  return replaceInstUsesWith(*II, Src);
3820 
3821  if (IID == Intrinsic::amdgcn_readfirstlane) {
3822  // readfirstlane (readlane x, y) -> readlane x, y
3823  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>()))
3824  return replaceInstUsesWith(*II, Src);
3825  } else {
3826  // readlane (readlane x, y), y -> readlane x, y
3827  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>(
3828  m_Value(), m_Specific(II->getArgOperand(1)))))
3829  return replaceInstUsesWith(*II, Src);
3830  }
3831 
3832  break;
3833  }
3834  case Intrinsic::stackrestore: {
3835  // If the save is right next to the restore, remove the restore. This can
3836  // happen when variable allocas are DCE'd.
3837  if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3838  if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3839  // Skip over debug info.
3840  if (SS->getNextNonDebugInstruction() == II) {
3841  return eraseInstFromFunction(CI);
3842  }
3843  }
3844  }
3845 
3846  // Scan down this block to see if there is another stack restore in the
3847  // same block without an intervening call/alloca.
3848  BasicBlock::iterator BI(II);
3849  Instruction *TI = II->getParent()->getTerminator();
3850  bool CannotRemove = false;
3851  for (++BI; &*BI != TI; ++BI) {
3852  if (isa<AllocaInst>(BI)) {
3853  CannotRemove = true;
3854  break;
3855  }
3856  if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3857  if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
3858  // If there is a stackrestore below this one, remove this one.
3859  if (II2->getIntrinsicID() == Intrinsic::stackrestore)
3860  return eraseInstFromFunction(CI);
3861 
3862  // Bail if we cross over an intrinsic with side effects, such as
3863  // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3864  if (II2->mayHaveSideEffects()) {
3865  CannotRemove = true;
3866  break;
3867  }
3868  } else {
3869  // If we found a non-intrinsic call, we can't remove the stack
3870  // restore.
3871  CannotRemove = true;
3872  break;
3873  }
3874  }
3875  }
3876 
3877  // If the stack restore is in a return, resume, or unwind block and if there
3878  // are no allocas or calls between the restore and the return, nuke the
3879  // restore.
3880  if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3881  return eraseInstFromFunction(CI);
3882  break;
3883  }
3884  case Intrinsic::lifetime_start:
3885  // Asan needs to poison memory to detect invalid access which is possible
3886  // even for empty lifetime range.
3887  if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3888  II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3889  II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3890  break;
3891 
3892  if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3893  Intrinsic::lifetime_end, *this))
3894  return nullptr;
3895  break;
3896  case Intrinsic::assume: {
3897  Value *IIOperand = II->getArgOperand(0);
3898  // Remove an assume if it is followed by an identical assume.
3899  // TODO: Do we need this? Unless there are conflicting assumptions, the
3900  // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3902  if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3903  return eraseInstFromFunction(CI);
3904 
3905  // Canonicalize assume(a && b) -> assume(a); assume(b);
3906  // Note: New assumption intrinsics created here are registered by
3907  // the InstCombineIRInserter object.
3908  FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3909  Value *AssumeIntrinsic = II->getCalledValue();
3910  Value *A, *B;
3911  if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
3912  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
3913  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3914  return eraseInstFromFunction(*II);
3915  }
3916  // assume(!(a || b)) -> assume(!a); assume(!b);
3917  if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
3918  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3919  Builder.CreateNot(A), II->getName());
3920  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3921  Builder.CreateNot(B), II->getName());
3922  return eraseInstFromFunction(*II);
3923  }
3924 
3925  // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3926  // (if assume is valid at the load)
3927  CmpInst::Predicate Pred;
3928  Instruction *LHS;
3929  if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3930  Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3931  LHS->getType()->isPointerTy() &&
3932  isValidAssumeForContext(II, LHS, &DT)) {
3933  MDNode *MD = MDNode::get(II->getContext(), None);
3934  LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3935  return eraseInstFromFunction(*II);
3936 
3937  // TODO: apply nonnull return attributes to calls and invokes
3938  // TODO: apply range metadata for range check patterns?
3939  }
3940 
3941  // If there is a dominating assume with the same condition as this one,
3942  // then this one is redundant, and should be removed.
3943  KnownBits Known(1);
3944  computeKnownBits(IIOperand, Known, 0, II);
3945  if (Known.isAllOnes())
3946  return eraseInstFromFunction(*II);
3947 
3948  // Update the cache of affected values for this assumption (we might be
3949  // here because we just simplified the condition).
3950  AC.updateAffectedValues(II);
3951  break;
3952  }
3953  case Intrinsic::experimental_gc_relocate: {
3954  // Translate facts known about a pointer before relocating into
3955  // facts about the relocate value, while being careful to
3956  // preserve relocation semantics.
3957  Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
3958 
3959  // Remove the relocation if unused, note that this check is required
3960  // to prevent the cases below from looping forever.
3961  if (II->use_empty())
3962  return eraseInstFromFunction(*II);
3963 
3964  // Undef is undef, even after relocation.
3965  // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3966  // most practical collectors, but there was discussion in the review thread
3967  // about whether it was legal for all possible collectors.
3968  if (isa<UndefValue>(DerivedPtr))
3969  // Use undef of gc_relocate's type to replace it.
3970  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3971 
3972  if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3973  // The relocation of null will be null for most any collector.
3974  // TODO: provide a hook for this in GCStrategy. There might be some
3975  // weird collector this property does not hold for.
3976  if (isa<ConstantPointerNull>(DerivedPtr))
3977  // Use null-pointer of gc_relocate's type to replace it.
3978  return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
3979 
3980  // isKnownNonNull -> nonnull attribute
3981  if (!II->hasRetAttr(Attribute::NonNull) &&
3982  isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
3983  II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
3984  return II;
3985  }
3986  }
3987 
3988  // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3989  // Canonicalize on the type from the uses to the defs
3990 
3991  // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
3992  break;
3993  }
3994 
3995  case Intrinsic::experimental_guard: {
3996  // Is this guard followed by another guard? We scan forward over a small
3997  // fixed window of instructions to handle common cases with conditions
3998  // computed between guards.
3999  Instruction *NextInst = II->getNextNode();
4000  for (unsigned i = 0; i < GuardWideningWindow; i++) {
4001  // Note: Using context-free form to avoid compile time blow up
4002  if (!isSafeToSpeculativelyExecute(NextInst))
4003  break;
4004  NextInst = NextInst->getNextNode();
4005  }
4006  Value *NextCond = nullptr;
4007  if (match(NextInst,
4008  m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4009  Value *CurrCond = II->getArgOperand(0);
4010 
4011  // Remove a guard that it is immediately preceded by an identical guard.
4012  if (CurrCond == NextCond)
4013  return eraseInstFromFunction(*NextInst);
4014 
4015  // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4016  Instruction* MoveI = II->getNextNode();
4017  while (MoveI != NextInst) {
4018  auto *Temp = MoveI;
4019  MoveI = MoveI->getNextNode();
4020  Temp->moveBefore(II);
4021  }
4022  II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4023  return eraseInstFromFunction(*NextInst);
4024  }
4025  break;
4026  }
4027  }
4028  return visitCallBase(*II);
4029 }
4030 
4031 // Fence instruction simplification
4033  // Remove identical consecutive fences.
4035  if (auto *NFI = dyn_cast<FenceInst>(Next))
4036  if (FI.isIdenticalTo(NFI))
4037  return eraseInstFromFunction(FI);
4038  return nullptr;
4039 }
4040 
4041 // InvokeInst simplification
4043  return visitCallBase(II);
4044 }
4045 
4046 // CallBrInst simplification
4048  return visitCallBase(CBI);
4049 }
4050 
4051 /// If this cast does not affect the value passed through the varargs area, we
4052 /// can eliminate the use of the cast.
4053 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
4054  const DataLayout &DL,
4055  const CastInst *const CI,
4056  const int ix) {
4057  if (!CI->isLosslessCast())
4058  return false;
4059 
4060  // If this is a GC intrinsic, avoid munging types. We need types for
4061  // statepoint reconstruction in SelectionDAG.
4062  // TODO: This is probably something which should be expanded to all
4063  // intrinsics since the entire point of intrinsics is that
4064  // they are understandable by the optimizer.
4065  if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
4066  return false;
4067 
4068  // The size of ByVal or InAlloca arguments is derived from the type, so we
4069  // can't change to a type with a different size. If the size were
4070  // passed explicitly we could avoid this check.
4071  if (!Call.isByValOrInAllocaArgument(ix))
4072  return true;
4073 
4074  Type* SrcTy =
4075  cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4076  Type *DstTy = Call.isByValArgument(ix)
4077  ? Call.getParamByValType(ix)
4078  : cast<PointerType>(CI->getType())->getElementType();
4079  if (!SrcTy->isSized() || !DstTy->isSized())
4080  return false;
4081  if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4082  return false;
4083  return true;
4084 }
4085 
4086 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4087  if (!CI->getCalledFunction()) return nullptr;
4088 
4089  auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4090  replaceInstUsesWith(*From, With);
4091  };
4092  auto InstCombineErase = [this](Instruction *I) {
4093  eraseInstFromFunction(*I);
4094  };
4095  LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
4096  InstCombineErase);
4097  if (Value *With = Simplifier.optimizeCall(CI)) {
4098  ++NumSimplified;
4099  return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4100  }
4101 
4102  return nullptr;
4103 }
4104 
4106  // Strip off at most one level of pointer casts, looking for an alloca. This
4107  // is good enough in practice and simpler than handling any number of casts.
4108  Value *Underlying = TrampMem->stripPointerCasts();
4109  if (Underlying != TrampMem &&
4110  (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4111  return nullptr;
4112  if (!isa<AllocaInst>(Underlying))
4113  return nullptr;
4114 
4115  IntrinsicInst *InitTrampoline = nullptr;
4116  for (User *U : TrampMem->users()) {
4118  if (!II)
4119  return nullptr;
4120  if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4121  if (InitTrampoline)
4122  // More than one init_trampoline writes to this value. Give up.
4123  return nullptr;
4124  InitTrampoline = II;
4125  continue;
4126  }
4127  if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4128  // Allow any number of calls to adjust.trampoline.
4129  continue;
4130  return nullptr;
4131  }
4132 
4133  // No call to init.trampoline found.
4134  if (!InitTrampoline)
4135  return nullptr;
4136 
4137  // Check that the alloca is being used in the expected way.
4138  if (InitTrampoline->getOperand(0) != TrampMem)
4139  return nullptr;
4140 
4141  return InitTrampoline;
4142 }
4143 
4145  Value *TrampMem) {
4146  // Visit all the previous instructions in the basic block, and try to find a
4147  // init.trampoline which has a direct path to the adjust.trampoline.
4148  for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4149  E = AdjustTramp->getParent()->begin();
4150  I != E;) {
4151  Instruction *Inst = &*--I;
4152  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4153  if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4154  II->getOperand(0) == TrampMem)
4155  return II;
4156  if (Inst->mayWriteToMemory())
4157  return nullptr;
4158  }
4159  return nullptr;
4160 }
4161 
4162 // Given a call to llvm.adjust.trampoline, find and return the corresponding
4163 // call to llvm.init.trampoline if the call to the trampoline can be optimized
4164 // to a direct call to a function. Otherwise return NULL.
4166  Callee = Callee->stripPointerCasts();
4167  IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4168  if (!AdjustTramp ||
4169  AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4170  return nullptr;
4171 
4172  Value *TrampMem = AdjustTramp->getOperand(0);
4173 
4175  return IT;
4176  if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4177  return IT;
4178  return nullptr;
4179 }
4180 
4181 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
4182  ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
4183  ConstantInt *Op1C = (Call.getNumArgOperands() == 1)
4184  ? nullptr
4185  : dyn_cast<ConstantInt>(Call.getOperand(1));
4186  // Bail out if the allocation size is zero.
4187  if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
4188  return;
4189 
4190  if (isMallocLikeFn(&Call, TLI) && Op0C) {
4191  if (isOpNewLikeFn(&Call, TLI))
4194  Call.getContext(), Op0C->getZExtValue()));
4195  else
4198  Call.getContext(), Op0C->getZExtValue()));
4199  } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
4202  Call.getContext(), Op1C->getZExtValue()));
4203  } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
4204  bool Overflow;
4205  const APInt &N = Op0C->getValue();
4206  APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
4207  if (!Overflow)
4210  Call.getContext(), Size.getZExtValue()));
4211  } else if (isStrdupLikeFn(&Call, TLI) && Call.getNumArgOperands() == 1) {
4212  // TODO: handle strndup
4213  if (uint64_t Len = GetStringLength(Call.getOperand(0)))
4214  Call.addAttribute(
4217  }
4218 }
4219 
4220 /// Improvements for call, callbr and invoke instructions.
4221 Instruction *InstCombiner::visitCallBase(CallBase &Call) {
4222  if (isAllocationFn(&Call, &TLI))
4223  annotateAnyAllocSite(Call, &TLI);
4224 
4225  if (isAllocLikeFn(&Call, &TLI))
4226  return visitAllocSite(Call);
4227 
4228  bool Changed = false;
4229 
4230  // Mark any parameters that are known to be non-null with the nonnull
4231  // attribute. This is helpful for inlining calls to functions with null
4232  // checks on their arguments.
4233  SmallVector<unsigned, 4> ArgNos;
4234  unsigned ArgNo = 0;
4235 
4236  for (Value *V : Call.args()) {
4237  if (V->getType()->isPointerTy() &&
4238  !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
4239  isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
4240  ArgNos.push_back(ArgNo);
4241  ArgNo++;
4242  }
4243 
4244  assert(ArgNo == Call.arg_size() && "sanity check");
4245 
4246  if (!ArgNos.empty()) {
4247  AttributeList AS = Call.getAttributes();
4248  LLVMContext &Ctx = Call.getContext();
4249  AS = AS.addParamAttribute(Ctx, ArgNos,
4250  Attribute::get(Ctx, Attribute::NonNull));
4251  Call.setAttributes(AS);
4252  Changed = true;
4253  }
4254 
4255  // If the callee is a pointer to a function, attempt to move any casts to the
4256  // arguments of the call/callbr/invoke.
4257  Value *Callee = Call.getCalledValue();
4258  if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
4259  return nullptr;
4260 
4261  if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4262  // Remove the convergent attr on calls when the callee is not convergent.
4263  if (Call.isConvergent() && !CalleeF->isConvergent() &&
4264  !CalleeF->isIntrinsic()) {
4265  LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4266  << "\n");
4267  Call.setNotConvergent();
4268  return &Call;
4269  }
4270 
4271  // If the call and callee calling conventions don't match, this call must
4272  // be unreachable, as the call is undefined.
4273  if (CalleeF->getCallingConv() != Call.getCallingConv() &&
4274  // Only do this for calls to a function with a body. A prototype may
4275  // not actually end up matching the implementation's calling conv for a
4276  // variety of reasons (e.g. it may be written in assembly).
4277  !CalleeF->isDeclaration()) {
4278  Instruction *OldCall = &Call;
4279  CreateNonTerminatorUnreachable(OldCall);
4280  // If OldCall does not return void then replaceAllUsesWith undef.
4281  // This allows ValueHandlers and custom metadata to adjust itself.
4282  if (!OldCall->getType()->isVoidTy())
4283  replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4284  if (isa<CallInst>(OldCall))
4285  return eraseInstFromFunction(*OldCall);
4286 
4287  // We cannot remove an invoke or a callbr, because it would change thexi
4288  // CFG, just change the callee to a null pointer.
4289  cast<CallBase>(OldCall)->setCalledFunction(
4290  CalleeF->getFunctionType(),
4291  Constant::getNullValue(CalleeF->getType()));
4292  return nullptr;
4293  }
4294  }
4295 
4296  if ((isa<ConstantPointerNull>(Callee) &&
4297  !NullPointerIsDefined(Call.getFunction())) ||
4298  isa<UndefValue>(Callee)) {
4299  // If Call does not return void then replaceAllUsesWith undef.
4300  // This allows ValueHandlers and custom metadata to adjust itself.
4301  if (!Call.getType()->isVoidTy())
4302  replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
4303 
4304  if (Call.isTerminator()) {
4305  // Can't remove an invoke or callbr because we cannot change the CFG.
4306  return nullptr;
4307  }
4308 
4309  // This instruction is not reachable, just remove it.
4310  CreateNonTerminatorUnreachable(&Call);
4311  return eraseInstFromFunction(Call);
4312  }
4313 
4314  if (IntrinsicInst *II = findInitTrampoline(Callee))
4315  return transformCallThroughTrampoline(Call, *II);
4316 
4317  PointerType *PTy = cast<PointerType>(Callee->getType());
4318  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4319  if (FTy->isVarArg()) {
4320  int ix = FTy->getNumParams();
4321  // See if we can optimize any arguments passed through the varargs area of
4322  // the call.
4323  for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
4324  I != E; ++I, ++ix) {
4325  CastInst *CI = dyn_cast<CastInst>(*I);
4326  if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
4327  *I = CI->getOperand(0);
4328 
4329  // Update the byval type to match the argument type.
4330  if (Call.isByValArgument(ix)) {
4331  Call.removeParamAttr(ix, Attribute::ByVal);
4332  Call.addParamAttr(
4334  Call.getContext(),
4335  CI->getOperand(0)->getType()->getPointerElementType()));
4336  }
4337  Changed = true;
4338  }
4339  }
4340  }
4341 
4342  if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
4343  // Inline asm calls cannot throw - mark them 'nounwind'.
4344  Call.setDoesNotThrow();
4345  Changed = true;
4346  }
4347 
4348  // Try to optimize the call if possible, we require DataLayout for most of
4349  // this. None of these calls are seen as possibly dead so go ahead and
4350  // delete the instruction now.
4351  if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4352  Instruction *I = tryOptimizeCall(CI);
4353  // If we changed something return the result, etc. Otherwise let
4354  // the fallthrough check.
4355  if (I) return eraseInstFromFunction(*I);
4356  }
4357 
4358  return Changed ? &Call : nullptr;
4359 }
4360 
4361 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4362 /// the arguments of the call/callbr/invoke.
4363 bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
4365  if (!Callee)
4366  return false;
4367 
4368  // If this is a call to a thunk function, don't remove the cast. Thunks are
4369  // used to transparently forward all incoming parameters and outgoing return
4370  // values, so it's important to leave the cast in place.
4371  if (Callee->hasFnAttribute("thunk"))
4372  return false;
4373 
4374  // If this is a musttail call, the callee's prototype must match the caller's
4375  // prototype with the exception of pointee types. The code below doesn't
4376  // implement that, so we can't do this transform.
4377  // TODO: Do the transform if it only requires adding pointer casts.
4378  if (Call.isMustTailCall())
4379  return false;
4380 
4381  Instruction *Caller = &Call;
4382  const AttributeList &CallerPAL = Call.getAttributes();
4383 
4384  // Okay, this is a cast from a function to a different type. Unless doing so
4385  // would cause a type conversion of one of our arguments, change this call to
4386  // be a direct call with arguments casted to the appropriate types.
4388  Type *OldRetTy = Caller->getType();
4389  Type *NewRetTy = FT->getReturnType();
4390 
4391  // Check to see if we are changing the return type...
4392  if (OldRetTy != NewRetTy) {
4393 
4394  if (NewRetTy->isStructTy())
4395  return false; // TODO: Handle multiple return values.
4396 
4397  if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4398  if (Callee->isDeclaration())
4399  return false; // Cannot transform this return value.
4400 
4401  if (!Caller->use_empty() &&
4402  // void -> non-void is handled specially
4403  !NewRetTy->isVoidTy())
4404  return false; // Cannot transform this return value.
4405  }
4406 
4407  if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4408  AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4409  if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4410  return false; // Attribute not compatible with transformed value.
4411  }
4412 
4413  // If the callbase is an invoke/callbr instruction, and the return value is
4414  // used by a PHI node in a successor, we cannot change the return type of
4415  // the call because there is no place to put the cast instruction (without
4416  // breaking the critical edge). Bail out in this case.
4417  if (!Caller->use_empty()) {
4418  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4419  for (User *U : II->users())
4420  if (PHINode *PN = dyn_cast<PHINode>(U))
4421  if (PN->getParent() == II->getNormalDest() ||
4422  PN->getParent() == II->getUnwindDest())
4423  return false;
4424  // FIXME: Be conservative for callbr to avoid a quadratic search.
4425  if (isa<CallBrInst>(Caller))
4426  return false;
4427  }
4428  }
4429 
4430  unsigned NumActualArgs = Call.arg_size();
4431  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4432 
4433  // Prevent us turning:
4434  // declare void @takes_i32_inalloca(i32* inalloca)
4435  // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4436  //
4437  // into:
4438  // call void @takes_i32_inalloca(i32* null)
4439  //
4440  // Similarly, avoid folding away bitcasts of byval calls.
4441  if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4442  Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4443  return false;
4444 
4445  auto AI = Call.arg_begin();
4446  for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4447  Type *ParamTy = FT->getParamType(i);
4448  Type *ActTy = (*AI)->getType();
4449 
4450  if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4451  return false; // Cannot transform this parameter value.
4452 
4453  if (AttrBuilder(CallerPAL.getParamAttributes(i))
4454  .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4455  return false; // Attribute not compatible with transformed value.
4456 
4457  if (Call.isInAllocaArgument(i))
4458  return false; // Cannot transform to and from inalloca.
4459 
4460  // If the parameter is passed as a byval argument, then we have to have a
4461  // sized type and the sized type has to have the same size as the old type.
4462  if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4463  PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4464  if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4465  return false;
4466 
4467  Type *CurElTy = Call.getParamByValType(i);
4468  if (DL.getTypeAllocSize(CurElTy) !=
4469  DL.getTypeAllocSize(ParamPTy->getElementType()))
4470  return false;
4471  }
4472  }
4473 
4474  if (Callee->isDeclaration()) {
4475  // Do not delete arguments unless we have a function body.
4476  if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4477  return false;
4478 
4479  // If the callee is just a declaration, don't change the varargsness of the
4480  // call. We don't want to introduce a varargs call where one doesn't
4481  // already exist.
4482  PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
4483  if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4484  return false;
4485 
4486  // If both the callee and the cast type are varargs, we still have to make
4487  // sure the number of fixed parameters are the same or we have the same
4488  // ABI issues as if we introduce a varargs call.
4489  if (FT->isVarArg() &&
4490  cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4491  FT->getNumParams() !=
4492  cast<FunctionType>(APTy->getElementType())->getNumParams())
4493  return false;
4494  }
4495 
4496  if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4497  !CallerPAL.isEmpty()) {
4498  // In this case we have more arguments than the new function type, but we
4499  // won't be dropping them. Check that these extra arguments have attributes
4500  // that are compatible with being a vararg call argument.
4501  unsigned SRetIdx;
4502  if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4503  SRetIdx > FT->getNumParams())
4504  return false;
4505  }
4506 
4507  // Okay, we decided that this is a safe thing to do: go ahead and start
4508  // inserting cast instructions as necessary.
4511  Args.reserve(NumActualArgs);
4512  ArgAttrs.reserve(NumActualArgs);
4513 
4514  // Get any return attributes.
4515  AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4516 
4517  // If the return value is not being used, the type may not be compatible
4518  // with the existing attributes. Wipe out any problematic attributes.
4519  RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4520 
4521  LLVMContext &Ctx = Call.getContext();
4522  AI = Call.arg_begin();
4523  for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4524  Type *ParamTy = FT->getParamType(i);
4525 
4526  Value *NewArg = *AI;
4527  if ((*AI)->getType() != ParamTy)
4528  NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4529  Args.push_back(NewArg);
4530 
4531  // Add any parameter attributes.
4532  if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4533  AttrBuilder AB(CallerPAL.getParamAttributes(i));
4534  AB.addByValAttr(NewArg->getType()->getPointerElementType());
4535  ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
4536  } else
4537  ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4538  }
4539 
4540  // If the function takes more arguments than the call was taking, add them
4541  // now.
4542  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4544  ArgAttrs.push_back(AttributeSet());
4545  }
4546 
4547  // If we are removing arguments to the function, emit an obnoxious warning.
4548  if (FT->getNumParams() < NumActualArgs) {
4549  // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4550  if (FT->isVarArg()) {
4551  // Add all of the arguments in their promoted form to the arg list.
4552  for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4553  Type *PTy = getPromotedType((*AI)->getType());
4554  Value *NewArg = *AI;
4555  if (PTy != (*AI)->getType()) {
4556  // Must promote to pass through va_arg area!
4557  Instruction::CastOps opcode =
4558  CastInst::getCastOpcode(*AI, false, PTy, false);
4559  NewArg = Builder.CreateCast(opcode, *AI, PTy);
4560  }
4561  Args.push_back(NewArg);
4562 
4563  // Add any parameter attributes.
4564  ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4565  }
4566  }
4567  }
4568 
4569  AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4570 
4571  if (NewRetTy->isVoidTy())
4572  Caller->setName(""); // Void type should not have a name.
4573 
4574  assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4575  "missing argument attributes");
4576  AttributeList NewCallerPAL = AttributeList::get(
4577  Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4578 
4580  Call.getOperandBundlesAsDefs(OpBundles);
4581 
4582  CallBase *NewCall;
4583  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4584  NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4585  II->getUnwindDest(), Args, OpBundles);
4586  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4587  NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
4588  CBI->getIndirectDests(), Args, OpBundles);
4589  } else {
4590  NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4591  cast<CallInst>(NewCall)->setTailCallKind(
4592  cast<CallInst>(Caller)->getTailCallKind());
4593  }
4594  NewCall->takeName(Caller);
4595  NewCall->setCallingConv(Call.getCallingConv());
4596  NewCall->setAttributes(NewCallerPAL);
4597 
4598  // Preserve the weight metadata for the new call instruction. The metadata
4599  // is used by SamplePGO to check callsite's hotness.
4600  uint64_t W;
4601  if (Caller->extractProfTotalWeight(W))
4602  NewCall->setProfWeight(W);
4603 
4604  // Insert a cast of the return type as necessary.
4605  Instruction *NC = NewCall;
4606  Value *NV = NC;
4607  if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4608  if (!NV->getType()->isVoidTy()) {
4609  NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4610  NC->setDebugLoc(Caller->getDebugLoc());
4611 
4612  // If this is an invoke/callbr instruction, we should insert it after the
4613  // first non-phi instruction in the normal successor block.
4614  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4615  BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4616  InsertNewInstBefore(NC, *I);
4617  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4618  BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
4619  InsertNewInstBefore(NC, *I);
4620  } else {
4621  // Otherwise, it's a call, just insert cast right after the call.
4622  InsertNewInstBefore(NC, *Caller);
4623  }
4624  Worklist.AddUsersToWorkList(*Caller);
4625  } else {
4626  NV = UndefValue::get(Caller->getType());
4627  }
4628  }
4629 
4630  if (!Caller->use_empty())
4631  replaceInstUsesWith(*Caller, NV);
4632  else if (Caller->hasValueHandle()) {
4633  if (OldRetTy == NV->getType())
4634  ValueHandleBase::ValueIsRAUWd(Caller, NV);
4635  else
4636  // We cannot call ValueIsRAUWd with a different type, and the
4637  // actual tracked value will disappear.
4639  }
4640 
4641  eraseInstFromFunction(*Caller);
4642  return true;
4643 }
4644 
4645 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4646 /// intrinsic pair into a direct call to the underlying function.
4647 Instruction *
4648 InstCombiner::transformCallThroughTrampoline(CallBase &Call,
4649  IntrinsicInst &Tramp) {
4650  Value *Callee = Call.getCalledValue();
4651  Type *CalleeTy = Callee->getType();
4652  FunctionType *FTy = Call.getFunctionType();
4654 
4655  // If the call already has the 'nest' attribute somewhere then give up -
4656  // otherwise 'nest' would occur twice after splicing in the chain.
4657  if (Attrs.hasAttrSomewhere(Attribute::Nest))
4658  return nullptr;
4659 
4660  Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4661  FunctionType *NestFTy = NestF->getFunctionType();
4662 
4663  AttributeList NestAttrs = NestF->getAttributes();
4664  if (!NestAttrs.isEmpty()) {
4665  unsigned NestArgNo = 0;
4666  Type *NestTy = nullptr;
4667  AttributeSet NestAttr;
4668 
4669  // Look for a parameter marked with the 'nest' attribute.
4670  for (FunctionType::param_iterator I = NestFTy->param_begin(),
4671  E = NestFTy->param_end();
4672  I != E; ++NestArgNo, ++I) {
4673  AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4674  if (AS.hasAttribute(Attribute::Nest)) {
4675  // Record the parameter type and any other attributes.
4676  NestTy = *I;
4677  NestAttr = AS;
4678  break;
4679  }
4680  }
4681 
4682  if (NestTy) {
4683  std::vector<Value*> NewArgs;
4684  std::vector<AttributeSet> NewArgAttrs;
4685  NewArgs.reserve(Call.arg_size() + 1);
4686  NewArgAttrs.reserve(Call.arg_size());
4687 
4688  // Insert the nest argument into the call argument list, which may
4689  // mean appending it. Likewise for attributes.
4690 
4691  {
4692  unsigned ArgNo = 0;
4693  auto I = Call.arg_begin(), E = Call.arg_end();
4694  do {
4695  if (ArgNo == NestArgNo) {
4696  // Add the chain argument and attributes.
4697  Value *NestVal = Tramp.getArgOperand(2);
4698  if (NestVal->getType() != NestTy)
4699  NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4700  NewArgs.push_back(NestVal);
4701  NewArgAttrs.push_back(NestAttr);
4702  }
4703 
4704  if (I == E)
4705  break;
4706 
4707  // Add the original argument and attributes.
4708  NewArgs.push_back(*I);
4709  NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4710 
4711  ++ArgNo;
4712  ++I;
4713  } while (true);
4714  }
4715 
4716  // The trampoline may have been bitcast to a bogus type (FTy).
4717  // Handle this by synthesizing a new function type, equal to FTy
4718  // with the chain parameter inserted.
4719 
4720  std::vector<Type*> NewTypes;
4721  NewTypes.reserve(FTy->getNumParams()+1);
4722 
4723  // Insert the chain's type into the list of parameter types, which may
4724  // mean appending it.
4725  {
4726  unsigned ArgNo = 0;
4728  E = FTy->param_end();
4729 
4730  do {
4731  if (ArgNo == NestArgNo)
4732  // Add the chain's type.
4733  NewTypes.push_back(NestTy);
4734 
4735  if (I == E)
4736  break;
4737 
4738  // Add the original type.
4739  NewTypes.push_back(*I);
4740 
4741  ++ArgNo;
4742  ++I;
4743  } while (true);
4744  }
4745 
4746  // Replace the trampoline call with a direct call. Let the generic
4747  // code sort out any function type mismatches.
4748  FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4749  FTy->isVarArg());
4750  Constant *NewCallee =
4751  NestF->getType() == PointerType::getUnqual(NewFTy) ?
4752  NestF : ConstantExpr::getBitCast(NestF,
4753  PointerType::getUnqual(NewFTy));
4754  AttributeList NewPAL =
4756  Attrs.getRetAttributes(), NewArgAttrs);
4757 
4759  Call.getOperandBundlesAsDefs(OpBundles);
4760 
4761  Instruction *NewCaller;
4762  if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4763  NewCaller = InvokeInst::Create(NewFTy, NewCallee,
4764  II->getNormalDest(), II->getUnwindDest(),
4765  NewArgs, OpBundles);
4766  cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4767  cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4768  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4769  NewCaller =
4770  CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
4771  CBI->getIndirectDests(), NewArgs, OpBundles);
4772  cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4773  cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4774  } else {
4775  NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
4776  cast<CallInst>(NewCaller)->setTailCallKind(
4777  cast<CallInst>(Call).getTailCallKind());
4778  cast<CallInst>(NewCaller)->setCallingConv(
4779  cast<CallInst>(Call).getCallingConv());
4780  cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4781  }
4782  NewCaller->setDebugLoc(Call.getDebugLoc());
4783 
4784  return NewCaller;
4785  }
4786  }
4787 
4788  // Replace the trampoline call with a direct call. Since there is no 'nest'
4789  // parameter, there is no need to adjust the argument list. Let the generic
4790  // code sort out any function type mismatches.
4791  Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
4792  Call.setCalledFunction(FTy, NewCallee);
4793  return &Call;
4794 }
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
bool isFPPredicate() const
Definition: InstrTypes.h:824
const NoneType None
Definition: None.h:23
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
Definition: Constants.h:761
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:831
uint64_t CallInst * C
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LibCallSimplifier - This class implements a collection of optimizations that replace well formed call...
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:171
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction, which must be an operator which supports these flags.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:70
static void ValueIsDeleted(Value *V)
Definition: Value.cpp:860
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:86
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
bool isZero() const
Definition: APFloat.h:1158
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
Definition: PatternMatch.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1571
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1181
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:888
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
static Value * simplifyX86immShift(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:561
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:398
DiagnosticInfoOptimizationBase::Argument NV
Atomic ordering constants.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1969
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1888
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:975
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
Constant * getElementAsConstant(unsigned i) const
Return a Constant for a specified index&#39;s element.
Definition: Constants.cpp:2773
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:196
static Value * simplifyX86pack(IntrinsicInst &II, InstCombiner::BuilderTy &Builder, bool IsSigned)
Represents an op.with.overflow intrinsic.
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, const char *Name)
Provided to resolve &#39;CreateAlignedLoad(Ptr, Align, "...")&#39; correctly, instead of converting the strin...
Definition: IRBuilder.h:1612
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:1695
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:158
An instruction for ordering other memory operations.
Definition: Instructions.h:454
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:453
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Instruction * visitVACopyInst(VACopyInst &I)
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombiner &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1363
void setProfWeight(uint64_t W)
Sets the branch_weights metadata to W for CallInst.
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2134
APInt uadd_sat(const APInt &RHS) const
Definition: APInt.cpp:2023
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC)
This class represents a function call, abstracting a target machine&#39;s calling convention.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
This file contains the declarations for metadata subclasses.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:253
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:89
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Definition: Type.cpp:632
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:952
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:505
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1328
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC)
Always overflows in the direction of signed/unsigned min value.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:733
bool isTerminator() const
Definition: Instruction.h:128
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic, I, at the point in the control-flow identified by the context instruction, CxtI.
STATISTIC(NumFunctions, "Total number of functions")
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1246
Metadata node.
Definition: Metadata.h:863
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
Type * getStructElementType(unsigned N) const
Definition: DerivedTypes.h:365
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1212
const fltSemantics & getSemantics() const
Definition: APFloat.h:1170
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:580
param_iterator param_end() const
Definition: DerivedTypes.h:129
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:734
An instruction for reading from memory.
Definition: Instructions.h:167
bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that reallocates memory (e...
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:176
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:930
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1968
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2261
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition: KnownBits.h:176
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1277
void reserve(size_type N)
Definition: SmallVector.h:369
void addAttribute(unsigned i, Attribute::AttrKind Kind)
adds the attribute to the list of attributes.
Definition: InstrTypes.h:1383
Value * getLength() const
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock *> IndirectDests, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:386
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, unsigned Align, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested...
Definition: Loads.cpp:135
Instruction * visitVAStartInst(VAStartInst &I)
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:534
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1517
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Definition: IRBuilder.h:2408
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:289
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:146
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op...
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:289
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:983
static Instruction * SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:47
Instruction * visitInvokeInst(InvokeInst &II)
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1644
bool isSigned() const
Definition: InstrTypes.h:902
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition: APInt.cpp:570
static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, const APFloat &Src2)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:843
Type * getPointerElementType() const
Definition: Type.h:376
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Definition: InstrTypes.h:831
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
static Value * simplifyX86movmsk(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Absolute value.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:439
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:368
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:992
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
Instruction * eraseInstFromFunction(Instruction &I)
Combiner aware instruction erasure.
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:164
The core instruction combiner logic.
This file contains the simple types necessary to represent the attributes associated with functions a...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Definition: APFloat.h:1264
AttributeSet getRetAttributes() const
The attributes for the ret value are returned.
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1682
AttrBuilder & addByValAttr(Type *Ty)
This turns a byval type into the form used internally in Attribute.
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:285
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2126
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
Definition: DerivedTypes.h:393
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
Definition: APInt.h:977
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This file implements a class to represent arbitrary precision integral constant values and operations...
All zero aggregate value.
Definition: Constants.h:340
static Value * simplifyX86vpermv(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
Metadata * LowAndHigh[]
static Value * simplifyX86addcarry(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
DominatorTree & getDominatorTree() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:201
Key
PAL metadata keys.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:927
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1683
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:85
Class to represent function types.
Definition: DerivedTypes.h:103
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1964
bool isInfinity() const
Definition: APFloat.h:1159
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
Definition: PatternMatch.h:501
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:193
This represents the llvm.va_start intrinsic.
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
Matches FPExt.
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:4483
May or may not overflow.
bool isStatepoint(const CallBase *Call)
Definition: Statepoint.cpp:20
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1323
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isVarArg() const
Definition: DerivedTypes.h:123
This class represents a no-op cast from one type to another.
bool isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory and throws if an all...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.h:2462
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of the form <Y x="" i1>="">, Return true if all of the elements of this predicate...
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
Definition: PatternMatch.h:519
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1233
AttrBuilder & remove(const AttrBuilder &B)
Remove the attributes from the builder.
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:176
An instruction for storing to memory.
Definition: Instructions.h:320
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1336
Value * getRHS() const
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:913
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1878
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return &#39;len+1&#39;...
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:291
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:409
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1057
This class represents a truncation of integer types.
Type * getElementType() const
Return the element type of the array/vector.
Definition: Constants.cpp:2433
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:544
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:657
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:359
const DataLayout & getDataLayout() const
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:105
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1804
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
bool isFloatTy() const
Return true if this is &#39;float&#39;, a 32-bit IEEE fp type.
Definition: Type.h:146
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:61
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1165
Instruction * visitFenceInst(FenceInst &FI)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Definition: Attributes.cpp:592
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
Definition: PatternMatch.h:189
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:328
bool isNegative() const
Definition: APFloat.h:1162
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1432
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1248
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
unsigned arg_size() const
Definition: InstrTypes.h:1229
Value * getCalledValue() const
Definition: InstrTypes.h:1280
LLVM_NODISCARD AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:413
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition: PatternMatch.h:837
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:323
bool isNaN() const
Definition: APFloat.h:1160
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.h:2285
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
bool isSigned() const
Whether the intrinsic is signed or unsigned.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:308
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1144
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:30
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:331
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:576
Instruction * visitCallBrInst(CallBrInst &CBI)
const Instruction * getNextNonDebugInstruction() const
Return a pointer to the next non-debug instruction in the same basic block as &#39;this&#39;, or nullptr if no such instruction exists.
param_iterator param_begin() const
Definition: DerivedTypes.h:128
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static VectorType * getInteger(VectorType *VTy)
This static method gets a VectorType with the same number of elements as the input type...
Definition: DerivedTypes.h:463
bool isFast() const
Determine whether all fast-math-flags are set.
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E&#39;s underlying type.
Definition: BitmaskEnum.h:90
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
bool isHalfTy() const
Return true if this is &#39;half&#39;, a 16-bit IEEE fp type.
Definition: Type.h:143
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1348
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
bool isAllOnes() const
Returns true if value is all one bits.
Definition: KnownBits.h:77
This class represents any memset intrinsic.
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:296
static Attribute getWithByValType(LLVMContext &Context, Type *Ty)
Definition: Attributes.cpp:170
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
Definition: Constants.cpp:2065
self_iterator getIterator()
Definition: ilist_node.h:81
Value * SimplifyCall(CallBase *Call, const SimplifyQuery &Q)
Given a callsite, fold the result or return null.
Class to represent integer types.
Definition: DerivedTypes.h:40
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:396
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:59
void setAlignment(unsigned Align)
static Value * simplifyX86varShift(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
bool isByValOrInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed by value or in an alloca.
Definition: InstrTypes.h:1538
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1446
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:525
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2349
size_t size() const
Definition: SmallVector.h:52
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1938
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1222
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1222