LLVM  16.0.0git
TruncInstCombine.cpp
Go to the documentation of this file.
1 //===- TruncInstCombine.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // TruncInstCombine - looks for expression graphs post-dominated by TruncInst
10 // and for each eligible graph, it will create a reduced bit-width expression,
11 // replace the old expression with this new one and remove the old expression.
12 // Eligible expression graph is such that:
13 // 1. Contains only supported instructions.
14 // 2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value.
15 // 3. Can be evaluated into type with reduced legal bit-width.
16 // 4. All instructions in the graph must not have users outside the graph.
17 // The only exception is for {ZExt, SExt}Inst with operand type equal to
18 // the new reduced type evaluated in (3).
19 //
20 // The motivation for this optimization is that evaluating and expression using
21 // smaller bit-width is preferable, especially for vectorization where we can
22 // fit more values in one vectorized instruction. In addition, this optimization
23 // may decrease the number of cast instructions, but will not increase it.
24 //
25 //===----------------------------------------------------------------------===//
26 
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/Statistic.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/Support/KnownBits.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "aggressive-instcombine"
40 
41 STATISTIC(NumExprsReduced, "Number of truncations eliminated by reducing bit "
42  "width of expression graph");
43 STATISTIC(NumInstrsReduced,
44  "Number of instructions whose bit width was reduced");
45 
46 /// Given an instruction and a container, it fills all the relevant operands of
47 /// that instruction, with respect to the Trunc expression graph optimizaton.
49  unsigned Opc = I->getOpcode();
50  switch (Opc) {
51  case Instruction::Trunc:
52  case Instruction::ZExt:
53  case Instruction::SExt:
54  // These CastInst are considered leaves of the evaluated expression, thus,
55  // their operands are not relevent.
56  break;
57  case Instruction::Add:
58  case Instruction::Sub:
59  case Instruction::Mul:
60  case Instruction::And:
61  case Instruction::Or:
62  case Instruction::Xor:
63  case Instruction::Shl:
64  case Instruction::LShr:
65  case Instruction::AShr:
66  case Instruction::UDiv:
67  case Instruction::URem:
68  case Instruction::InsertElement:
69  Ops.push_back(I->getOperand(0));
70  Ops.push_back(I->getOperand(1));
71  break;
72  case Instruction::ExtractElement:
73  Ops.push_back(I->getOperand(0));
74  break;
76  Ops.push_back(I->getOperand(1));
77  Ops.push_back(I->getOperand(2));
78  break;
79  case Instruction::PHI:
80  for (Value *V : cast<PHINode>(I)->incoming_values())
81  Ops.push_back(V);
82  break;
83  default:
84  llvm_unreachable("Unreachable!");
85  }
86 }
87 
88 bool TruncInstCombine::buildTruncExpressionGraph() {
89  SmallVector<Value *, 8> Worklist;
91  // Clear old instructions info.
92  InstInfoMap.clear();
93 
94  Worklist.push_back(CurrentTruncInst->getOperand(0));
95 
96  while (!Worklist.empty()) {
97  Value *Curr = Worklist.back();
98 
99  if (isa<Constant>(Curr)) {
100  Worklist.pop_back();
101  continue;
102  }
103 
104  auto *I = dyn_cast<Instruction>(Curr);
105  if (!I)
106  return false;
107 
108  if (!Stack.empty() && Stack.back() == I) {
109  // Already handled all instruction operands, can remove it from both the
110  // Worklist and the Stack, and add it to the instruction info map.
111  Worklist.pop_back();
112  Stack.pop_back();
113  // Insert I to the Info map.
114  InstInfoMap.insert(std::make_pair(I, Info()));
115  continue;
116  }
117 
118  if (InstInfoMap.count(I)) {
119  Worklist.pop_back();
120  continue;
121  }
122 
123  // Add the instruction to the stack before start handling its operands.
124  Stack.push_back(I);
125 
126  unsigned Opc = I->getOpcode();
127  switch (Opc) {
128  case Instruction::Trunc:
129  case Instruction::ZExt:
130  case Instruction::SExt:
131  // trunc(trunc(x)) -> trunc(x)
132  // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
133  // trunc(ext(x)) -> trunc(x) if the source type is larger than the new
134  // dest
135  break;
136  case Instruction::Add:
137  case Instruction::Sub:
138  case Instruction::Mul:
139  case Instruction::And:
140  case Instruction::Or:
141  case Instruction::Xor:
142  case Instruction::Shl:
143  case Instruction::LShr:
144  case Instruction::AShr:
145  case Instruction::UDiv:
146  case Instruction::URem:
147  case Instruction::InsertElement:
148  case Instruction::ExtractElement:
149  case Instruction::Select: {
152  append_range(Worklist, Operands);
153  break;
154  }
155  case Instruction::PHI: {
158  // Add only operands not in Stack to prevent cycle
159  for (auto *Op : Operands)
160  if (!llvm::is_contained(Stack, Op))
161  Worklist.push_back(Op);
162  break;
163  }
164  default:
165  // TODO: Can handle more cases here:
166  // 1. shufflevector
167  // 2. sdiv, srem
168  // ...
169  return false;
170  }
171  }
172  return true;
173 }
174 
175 unsigned TruncInstCombine::getMinBitWidth() {
176  SmallVector<Value *, 8> Worklist;
178 
179  Value *Src = CurrentTruncInst->getOperand(0);
180  Type *DstTy = CurrentTruncInst->getType();
181  unsigned TruncBitWidth = DstTy->getScalarSizeInBits();
182  unsigned OrigBitWidth =
183  CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
184 
185  if (isa<Constant>(Src))
186  return TruncBitWidth;
187 
188  Worklist.push_back(Src);
189  InstInfoMap[cast<Instruction>(Src)].ValidBitWidth = TruncBitWidth;
190 
191  while (!Worklist.empty()) {
192  Value *Curr = Worklist.back();
193 
194  if (isa<Constant>(Curr)) {
195  Worklist.pop_back();
196  continue;
197  }
198 
199  // Otherwise, it must be an instruction.
200  auto *I = cast<Instruction>(Curr);
201 
202  auto &Info = InstInfoMap[I];
203 
206 
207  if (!Stack.empty() && Stack.back() == I) {
208  // Already handled all instruction operands, can remove it from both, the
209  // Worklist and the Stack, and update MinBitWidth.
210  Worklist.pop_back();
211  Stack.pop_back();
212  for (auto *Operand : Operands)
213  if (auto *IOp = dyn_cast<Instruction>(Operand))
214  Info.MinBitWidth =
215  std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth);
216  continue;
217  }
218 
219  // Add the instruction to the stack before start handling its operands.
220  Stack.push_back(I);
221  unsigned ValidBitWidth = Info.ValidBitWidth;
222 
223  // Update minimum bit-width before handling its operands. This is required
224  // when the instruction is part of a loop.
225  Info.MinBitWidth = std::max(Info.MinBitWidth, Info.ValidBitWidth);
226 
227  for (auto *Operand : Operands)
228  if (auto *IOp = dyn_cast<Instruction>(Operand)) {
229  // If we already calculated the minimum bit-width for this valid
230  // bit-width, or for a smaller valid bit-width, then just keep the
231  // answer we already calculated.
232  unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth;
233  if (IOpBitwidth >= ValidBitWidth)
234  continue;
235  InstInfoMap[IOp].ValidBitWidth = ValidBitWidth;
236  Worklist.push_back(IOp);
237  }
238  }
239  unsigned MinBitWidth = InstInfoMap.lookup(cast<Instruction>(Src)).MinBitWidth;
240  assert(MinBitWidth >= TruncBitWidth);
241 
242  if (MinBitWidth > TruncBitWidth) {
243  // In this case reducing expression with vector type might generate a new
244  // vector type, which is not preferable as it might result in generating
245  // sub-optimal code.
246  if (DstTy->isVectorTy())
247  return OrigBitWidth;
248  // Use the smallest integer type in the range [MinBitWidth, OrigBitWidth).
249  Type *Ty = DL.getSmallestLegalIntType(DstTy->getContext(), MinBitWidth);
250  // Update minimum bit-width with the new destination type bit-width if
251  // succeeded to find such, otherwise, with original bit-width.
252  MinBitWidth = Ty ? Ty->getScalarSizeInBits() : OrigBitWidth;
253  } else { // MinBitWidth == TruncBitWidth
254  // In this case the expression can be evaluated with the trunc instruction
255  // destination type, and trunc instruction can be omitted. However, we
256  // should not perform the evaluation if the original type is a legal scalar
257  // type and the target type is illegal.
258  bool FromLegal = MinBitWidth == 1 || DL.isLegalInteger(OrigBitWidth);
259  bool ToLegal = MinBitWidth == 1 || DL.isLegalInteger(MinBitWidth);
260  if (!DstTy->isVectorTy() && FromLegal && !ToLegal)
261  return OrigBitWidth;
262  }
263  return MinBitWidth;
264 }
265 
266 Type *TruncInstCombine::getBestTruncatedType() {
267  if (!buildTruncExpressionGraph())
268  return nullptr;
269 
270  // We don't want to duplicate instructions, which isn't profitable. Thus, we
271  // can't shrink something that has multiple users, unless all users are
272  // post-dominated by the trunc instruction, i.e., were visited during the
273  // expression evaluation.
274  unsigned DesiredBitWidth = 0;
275  for (auto Itr : InstInfoMap) {
276  Instruction *I = Itr.first;
277  if (I->hasOneUse())
278  continue;
279  bool IsExtInst = (isa<ZExtInst>(I) || isa<SExtInst>(I));
280  for (auto *U : I->users())
281  if (auto *UI = dyn_cast<Instruction>(U))
282  if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) {
283  if (!IsExtInst)
284  return nullptr;
285  // If this is an extension from the dest type, we can eliminate it,
286  // even if it has multiple users. Thus, update the DesiredBitWidth and
287  // validate all extension instructions agrees on same DesiredBitWidth.
288  unsigned ExtInstBitWidth =
289  I->getOperand(0)->getType()->getScalarSizeInBits();
290  if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth)
291  return nullptr;
292  DesiredBitWidth = ExtInstBitWidth;
293  }
294  }
295 
296  unsigned OrigBitWidth =
297  CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
298 
299  // Initialize MinBitWidth for shift instructions with the minimum number
300  // that is greater than shift amount (i.e. shift amount + 1).
301  // For `lshr` adjust MinBitWidth so that all potentially truncated
302  // bits of the value-to-be-shifted are zeros.
303  // For `ashr` adjust MinBitWidth so that all potentially truncated
304  // bits of the value-to-be-shifted are sign bits (all zeros or ones)
305  // and even one (first) untruncated bit is sign bit.
306  // Exit early if MinBitWidth is not less than original bitwidth.
307  for (auto &Itr : InstInfoMap) {
308  Instruction *I = Itr.first;
309  if (I->isShift()) {
310  KnownBits KnownRHS = computeKnownBits(I->getOperand(1));
311  unsigned MinBitWidth = KnownRHS.getMaxValue()
312  .uadd_sat(APInt(OrigBitWidth, 1))
313  .getLimitedValue(OrigBitWidth);
314  if (MinBitWidth == OrigBitWidth)
315  return nullptr;
316  if (I->getOpcode() == Instruction::LShr) {
317  KnownBits KnownLHS = computeKnownBits(I->getOperand(0));
318  MinBitWidth =
319  std::max(MinBitWidth, KnownLHS.getMaxValue().getActiveBits());
320  }
321  if (I->getOpcode() == Instruction::AShr) {
322  unsigned NumSignBits = ComputeNumSignBits(I->getOperand(0));
323  MinBitWidth = std::max(MinBitWidth, OrigBitWidth - NumSignBits + 1);
324  }
325  if (MinBitWidth >= OrigBitWidth)
326  return nullptr;
327  Itr.second.MinBitWidth = MinBitWidth;
328  }
329  if (I->getOpcode() == Instruction::UDiv ||
330  I->getOpcode() == Instruction::URem) {
331  unsigned MinBitWidth = 0;
332  for (const auto &Op : I->operands()) {
333  KnownBits Known = computeKnownBits(Op);
334  MinBitWidth =
335  std::max(Known.getMaxValue().getActiveBits(), MinBitWidth);
336  if (MinBitWidth >= OrigBitWidth)
337  return nullptr;
338  }
339  Itr.second.MinBitWidth = MinBitWidth;
340  }
341  }
342 
343  // Calculate minimum allowed bit-width allowed for shrinking the currently
344  // visited truncate's operand.
345  unsigned MinBitWidth = getMinBitWidth();
346 
347  // Check that we can shrink to smaller bit-width than original one and that
348  // it is similar to the DesiredBitWidth is such exists.
349  if (MinBitWidth >= OrigBitWidth ||
350  (DesiredBitWidth && DesiredBitWidth != MinBitWidth))
351  return nullptr;
352 
353  return IntegerType::get(CurrentTruncInst->getContext(), MinBitWidth);
354 }
355 
356 /// Given a reduced scalar type \p Ty and a \p V value, return a reduced type
357 /// for \p V, according to its type, if it vector type, return the vector
358 /// version of \p Ty, otherwise return \p Ty.
359 static Type *getReducedType(Value *V, Type *Ty) {
360  assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type");
361  if (auto *VTy = dyn_cast<VectorType>(V->getType()))
362  return VectorType::get(Ty, VTy->getElementCount());
363  return Ty;
364 }
365 
366 Value *TruncInstCombine::getReducedOperand(Value *V, Type *SclTy) {
367  Type *Ty = getReducedType(V, SclTy);
368  if (auto *C = dyn_cast<Constant>(V)) {
369  C = ConstantExpr::getIntegerCast(C, Ty, false);
370  // If we got a constantexpr back, try to simplify it with DL info.
371  return ConstantFoldConstant(C, DL, &TLI);
372  }
373 
374  auto *I = cast<Instruction>(V);
375  Info Entry = InstInfoMap.lookup(I);
376  assert(Entry.NewValue);
377  return Entry.NewValue;
378 }
379 
380 void TruncInstCombine::ReduceExpressionGraph(Type *SclTy) {
381  NumInstrsReduced += InstInfoMap.size();
382  // Pairs of old and new phi-nodes
384  for (auto &Itr : InstInfoMap) { // Forward
385  Instruction *I = Itr.first;
386  TruncInstCombine::Info &NodeInfo = Itr.second;
387 
388  assert(!NodeInfo.NewValue && "Instruction has been evaluated");
389 
391  Value *Res = nullptr;
392  unsigned Opc = I->getOpcode();
393  switch (Opc) {
394  case Instruction::Trunc:
395  case Instruction::ZExt:
396  case Instruction::SExt: {
397  Type *Ty = getReducedType(I, SclTy);
398  // If the source type of the cast is the type we're trying for then we can
399  // just return the source. There's no need to insert it because it is not
400  // new.
401  if (I->getOperand(0)->getType() == Ty) {
402  assert(!isa<TruncInst>(I) && "Cannot reach here with TruncInst");
403  NodeInfo.NewValue = I->getOperand(0);
404  continue;
405  }
406  // Otherwise, must be the same type of cast, so just reinsert a new one.
407  // This also handles the case of zext(trunc(x)) -> zext(x).
408  Res = Builder.CreateIntCast(I->getOperand(0), Ty,
409  Opc == Instruction::SExt);
410 
411  // Update Worklist entries with new value if needed.
412  // There are three possible changes to the Worklist:
413  // 1. Update Old-TruncInst -> New-TruncInst.
414  // 2. Remove Old-TruncInst (if New node is not TruncInst).
415  // 3. Add New-TruncInst (if Old node was not TruncInst).
416  auto *Entry = find(Worklist, I);
417  if (Entry != Worklist.end()) {
418  if (auto *NewCI = dyn_cast<TruncInst>(Res))
419  *Entry = NewCI;
420  else
421  Worklist.erase(Entry);
422  } else if (auto *NewCI = dyn_cast<TruncInst>(Res))
423  Worklist.push_back(NewCI);
424  break;
425  }
426  case Instruction::Add:
427  case Instruction::Sub:
428  case Instruction::Mul:
429  case Instruction::And:
430  case Instruction::Or:
431  case Instruction::Xor:
432  case Instruction::Shl:
433  case Instruction::LShr:
434  case Instruction::AShr:
435  case Instruction::UDiv:
436  case Instruction::URem: {
437  Value *LHS = getReducedOperand(I->getOperand(0), SclTy);
438  Value *RHS = getReducedOperand(I->getOperand(1), SclTy);
439  Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
440  // Preserve `exact` flag since truncation doesn't change exactness
441  if (auto *PEO = dyn_cast<PossiblyExactOperator>(I))
442  if (auto *ResI = dyn_cast<Instruction>(Res))
443  ResI->setIsExact(PEO->isExact());
444  break;
445  }
446  case Instruction::ExtractElement: {
447  Value *Vec = getReducedOperand(I->getOperand(0), SclTy);
448  Value *Idx = I->getOperand(1);
449  Res = Builder.CreateExtractElement(Vec, Idx);
450  break;
451  }
452  case Instruction::InsertElement: {
453  Value *Vec = getReducedOperand(I->getOperand(0), SclTy);
454  Value *NewElt = getReducedOperand(I->getOperand(1), SclTy);
455  Value *Idx = I->getOperand(2);
456  Res = Builder.CreateInsertElement(Vec, NewElt, Idx);
457  break;
458  }
459  case Instruction::Select: {
460  Value *Op0 = I->getOperand(0);
461  Value *LHS = getReducedOperand(I->getOperand(1), SclTy);
462  Value *RHS = getReducedOperand(I->getOperand(2), SclTy);
463  Res = Builder.CreateSelect(Op0, LHS, RHS);
464  break;
465  }
466  case Instruction::PHI: {
467  Res = Builder.CreatePHI(getReducedType(I, SclTy), I->getNumOperands());
468  OldNewPHINodes.push_back(
469  std::make_pair(cast<PHINode>(I), cast<PHINode>(Res)));
470  break;
471  }
472  default:
473  llvm_unreachable("Unhandled instruction");
474  }
475 
476  NodeInfo.NewValue = Res;
477  if (auto *ResI = dyn_cast<Instruction>(Res))
478  ResI->takeName(I);
479  }
480 
481  for (auto &Node : OldNewPHINodes) {
482  PHINode *OldPN = Node.first;
483  PHINode *NewPN = Node.second;
484  for (auto Incoming : zip(OldPN->incoming_values(), OldPN->blocks()))
485  NewPN->addIncoming(getReducedOperand(std::get<0>(Incoming), SclTy),
486  std::get<1>(Incoming));
487  }
488 
489  Value *Res = getReducedOperand(CurrentTruncInst->getOperand(0), SclTy);
490  Type *DstTy = CurrentTruncInst->getType();
491  if (Res->getType() != DstTy) {
492  IRBuilder<> Builder(CurrentTruncInst);
493  Res = Builder.CreateIntCast(Res, DstTy, false);
494  if (auto *ResI = dyn_cast<Instruction>(Res))
495  ResI->takeName(CurrentTruncInst);
496  }
497  CurrentTruncInst->replaceAllUsesWith(Res);
498 
499  // Erase old expression graph, which was replaced by the reduced expression
500  // graph.
501  CurrentTruncInst->eraseFromParent();
502  // First, erase old phi-nodes and its uses
503  for (auto &Node : OldNewPHINodes) {
504  PHINode *OldPN = Node.first;
505  OldPN->replaceAllUsesWith(PoisonValue::get(OldPN->getType()));
506  InstInfoMap.erase(OldPN);
507  OldPN->eraseFromParent();
508  }
509  // Now we have expression graph turned into dag.
510  // We iterate backward, which means we visit the instruction before we
511  // visit any of its operands, this way, when we get to the operand, we already
512  // removed the instructions (from the expression dag) that uses it.
513  for (auto &I : llvm::reverse(InstInfoMap)) {
514  // We still need to check that the instruction has no users before we erase
515  // it, because {SExt, ZExt}Inst Instruction might have other users that was
516  // not reduced, in such case, we need to keep that instruction.
517  if (I.first->use_empty())
518  I.first->eraseFromParent();
519  else
520  assert((isa<SExtInst>(I.first) || isa<ZExtInst>(I.first)) &&
521  "Only {SExt, ZExt}Inst might have unreduced users");
522  }
523 }
524 
526  bool MadeIRChange = false;
527 
528  // Collect all TruncInst in the function into the Worklist for evaluating.
529  for (auto &BB : F) {
530  // Ignore unreachable basic block.
531  if (!DT.isReachableFromEntry(&BB))
532  continue;
533  for (auto &I : BB)
534  if (auto *CI = dyn_cast<TruncInst>(&I))
535  Worklist.push_back(CI);
536  }
537 
538  // Process all TruncInst in the Worklist, for each instruction:
539  // 1. Check if it dominates an eligible expression graph to be reduced.
540  // 2. Create a reduced expression graph and replace the old one with it.
541  while (!Worklist.empty()) {
542  CurrentTruncInst = Worklist.pop_back_val();
543 
544  if (Type *NewDstSclTy = getBestTruncatedType()) {
545  LLVM_DEBUG(
546  dbgs() << "ICE: TruncInstCombine reducing type of expression graph "
547  "dominated by: "
548  << CurrentTruncInst << '\n');
549  ReduceExpressionGraph(NewDstSclTy);
550  ++NumExprsReduced;
551  MadeIRChange = true;
552  }
553  }
554 
555  return MadeIRChange;
556 }
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:723
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2785
PHI
Rewrite undef for PHI
Definition: AMDGPURewriteUndefForPHI.cpp:101
llvm::Function
Definition: Function.h:60
llvm::SmallVector< Value *, 8 >
Statistic.h
llvm::IRBuilder<>
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
STLExtras.h
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
ConstantFolding.h
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::TruncInstCombine::run
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
Definition: TruncInstCombine.cpp:525
KnownBits.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction.h
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
getReducedType
static Type * getReducedType(Value *V, Type *Ty)
Given a reduced scalar type Ty and a V value, return a reduced type for V, according to its type,...
Definition: TruncInstCombine.cpp:359
getRelevantOperands
static void getRelevantOperands(Instruction *I, SmallVectorImpl< Value * > &Ops)
Given an instruction and a container, it fills all the relevant operands of that instruction,...
Definition: TruncInstCombine.cpp:48
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::APInt::getLimitedValue
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:456
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:232
llvm::Instruction
Definition: Instruction.h:42
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:189
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:74
AggressiveInstCombineInternal.h
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:77
llvm::find
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1610
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2849
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1673
llvm::KnownBits::getMaxValue
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition: KnownBits.h:136
llvm::pdb::PDB_MemoryType::Stack
@ Stack
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:335
llvm::zip
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&... args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:755
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
DataLayout.h
llvm::PHINode::blocks
iterator_range< block_iterator > blocks()
Definition: Instructions.h:2777
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1818
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
llvm::ConstantExpr::getIntegerCast
static Constant * getIntegerCast(Constant *C, Type *Ty, bool IsSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:2040
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::DataLayout::isLegalInteger
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:266
llvm::DataLayout::getSmallestLegalIntType
Type * getSmallestLegalIntType(LLVMContext &C, unsigned Width=0) const
Returns the smallest integer type with size at least as big as Width bits.
Definition: DataLayout.cpp:857
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
llvm::APInt::uadd_sat
APInt uadd_sat(const APInt &RHS) const
Definition: APInt.cpp:2019
llvm::KnownBits
Definition: KnownBits.h:23
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::Instruction::BinaryOps
BinaryOps
Definition: Instruction.h:773
Dominators.h
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition: SmallVector.h:659
llvm::APInt::getActiveBits
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition: APInt.h:1435
llvm::PHINode
Definition: Instructions.h:2699
llvm::SmallVectorImpl< Value * >
llvm::reverse
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:365
llvm::ConstantFoldConstant
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
Definition: ConstantFolding.cpp:1207
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::VectorType::get
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:668
llvm::PoisonValue::get
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1732