LLVM  9.0.0svn
ARMCodeGenPrepare.cpp
Go to the documentation of this file.
1 //===----- ARMCodeGenPrepare.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass inserts intrinsics to handle small types that would otherwise be
11 /// promoted during legalization. Here we can manually promote types or insert
12 /// intrinsics which can handle narrow types that aren't supported by the
13 /// register classes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "ARM.h"
18 #include "ARMSubtarget.h"
19 #include "ARMTargetMachine.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/IR/Attributes.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/IR/Value.h"
34 #include "llvm/IR/Verifier.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/Casting.h"
38 
39 #define DEBUG_TYPE "arm-codegenprepare"
40 
41 using namespace llvm;
42 
43 static cl::opt<bool>
44 DisableCGP("arm-disable-cgp", cl::Hidden, cl::init(true),
45  cl::desc("Disable ARM specific CodeGenPrepare pass"));
46 
47 static cl::opt<bool>
48 EnableDSP("arm-enable-scalar-dsp", cl::Hidden, cl::init(false),
49  cl::desc("Use DSP instructions for scalar operations"));
50 
51 static cl::opt<bool>
52 EnableDSPWithImms("arm-enable-scalar-dsp-imms", cl::Hidden, cl::init(false),
53  cl::desc("Use DSP instructions for scalar operations\
54  with immediate operands"));
55 
56 // The goal of this pass is to enable more efficient code generation for
57 // operations on narrow types (i.e. types with < 32-bits) and this is a
58 // motivating IR code example:
59 //
60 // define hidden i32 @cmp(i8 zeroext) {
61 // %2 = add i8 %0, -49
62 // %3 = icmp ult i8 %2, 3
63 // ..
64 // }
65 //
66 // The issue here is that i8 is type-legalized to i32 because i8 is not a
67 // legal type. Thus, arithmetic is done in integer-precision, but then the
68 // byte value is masked out as follows:
69 //
70 // t19: i32 = add t4, Constant:i32<-49>
71 // t24: i32 = and t19, Constant:i32<255>
72 //
73 // Consequently, we generate code like this:
74 //
75 // subs r0, #49
76 // uxtb r1, r0
77 // cmp r1, #3
78 //
79 // This shows that masking out the byte value results in generation of
80 // the UXTB instruction. This is not optimal as r0 already contains the byte
81 // value we need, and so instead we can just generate:
82 //
83 // sub.w r1, r0, #49
84 // cmp r1, #3
85 //
86 // We achieve this by type promoting the IR to i32 like so for this example:
87 //
88 // define i32 @cmp(i8 zeroext %c) {
89 // %0 = zext i8 %c to i32
90 // %c.off = add i32 %0, -49
91 // %1 = icmp ult i32 %c.off, 3
92 // ..
93 // }
94 //
95 // For this to be valid and legal, we need to prove that the i32 add is
96 // producing the same value as the i8 addition, and that e.g. no overflow
97 // happens.
98 //
99 // A brief sketch of the algorithm and some terminology.
100 // We pattern match interesting IR patterns:
101 // - which have "sources": instructions producing narrow values (i8, i16), and
102 // - they have "sinks": instructions consuming these narrow values.
103 //
104 // We collect all instruction connecting sources and sinks in a worklist, so
105 // that we can mutate these instruction and perform type promotion when it is
106 // legal to do so.
107 
108 namespace {
109 class IRPromoter {
110  SmallPtrSet<Value*, 8> NewInsts;
111  SmallPtrSet<Instruction*, 4> InstsToRemove;
113  SmallPtrSet<Value*, 8> Promoted;
114  Module *M = nullptr;
115  LLVMContext &Ctx;
116  IntegerType *ExtTy = nullptr;
117  IntegerType *OrigTy = nullptr;
118  SmallPtrSetImpl<Value*> *Visited;
119  SmallPtrSetImpl<Value*> *Sources;
121  SmallPtrSetImpl<Instruction*> *SafeToPromote;
122 
123  void ReplaceAllUsersOfWith(Value *From, Value *To);
124  void PrepareConstants(void);
125  void ExtendSources(void);
126  void ConvertTruncs(void);
127  void PromoteTree(void);
128  void TruncateSinks(void);
129  void Cleanup(void);
130 
131 public:
132  IRPromoter(Module *M) : M(M), Ctx(M->getContext()),
133  ExtTy(Type::getInt32Ty(Ctx)) { }
134 
135 
136  void Mutate(Type *OrigTy,
137  SmallPtrSetImpl<Value*> &Visited,
138  SmallPtrSetImpl<Value*> &Sources,
140  SmallPtrSetImpl<Instruction*> &SafeToPromote);
141 };
142 
143 class ARMCodeGenPrepare : public FunctionPass {
144  const ARMSubtarget *ST = nullptr;
145  IRPromoter *Promoter = nullptr;
146  std::set<Value*> AllVisited;
147  SmallPtrSet<Instruction*, 8> SafeToPromote;
148 
149  bool isSafeOverflow(Instruction *I);
150  bool isSupportedValue(Value *V);
151  bool isLegalToPromote(Value *V);
152  bool TryToPromote(Value *V);
153 
154 public:
155  static char ID;
156  static unsigned TypeSize;
157  Type *OrigTy = nullptr;
158 
159  ARMCodeGenPrepare() : FunctionPass(ID) {}
160 
161  void getAnalysisUsage(AnalysisUsage &AU) const override {
163  }
164 
165  StringRef getPassName() const override { return "ARM IR optimizations"; }
166 
167  bool doInitialization(Module &M) override;
168  bool runOnFunction(Function &F) override;
169  bool doFinalization(Module &M) override;
170 };
171 
172 }
173 
174 static bool generateSignBits(Value *V) {
175  if (!isa<Instruction>(V))
176  return false;
177 
178  unsigned Opc = cast<Instruction>(V)->getOpcode();
179  return Opc == Instruction::AShr || Opc == Instruction::SDiv ||
180  Opc == Instruction::SRem;
181 }
182 
183 static bool EqualTypeSize(Value *V) {
184  return V->getType()->getScalarSizeInBits() == ARMCodeGenPrepare::TypeSize;
185 }
186 
187 static bool LessOrEqualTypeSize(Value *V) {
188  return V->getType()->getScalarSizeInBits() <= ARMCodeGenPrepare::TypeSize;
189 }
190 
191 static bool GreaterThanTypeSize(Value *V) {
192  return V->getType()->getScalarSizeInBits() > ARMCodeGenPrepare::TypeSize;
193 }
194 
195 static bool LessThanTypeSize(Value *V) {
196  return V->getType()->getScalarSizeInBits() < ARMCodeGenPrepare::TypeSize;
197 }
198 
199 /// Some instructions can use 8- and 16-bit operands, and we don't need to
200 /// promote anything larger. We disallow booleans to make life easier when
201 /// dealing with icmps but allow any other integer that is <= 16 bits. Void
202 /// types are accepted so we can handle switches.
203 static bool isSupportedType(Value *V) {
204  Type *Ty = V->getType();
205 
206  // Allow voids and pointers, these won't be promoted.
207  if (Ty->isVoidTy() || Ty->isPointerTy())
208  return true;
209 
210  if (auto *Ld = dyn_cast<LoadInst>(V))
211  Ty = cast<PointerType>(Ld->getPointerOperandType())->getElementType();
212 
213  if (!isa<IntegerType>(Ty) ||
214  cast<IntegerType>(V->getType())->getBitWidth() == 1)
215  return false;
216 
217  return LessOrEqualTypeSize(V);
218 }
219 
220 /// Return true if the given value is a source in the use-def chain, producing
221 /// a narrow 'TypeSize' value. These values will be zext to start the promotion
222 /// of the tree to i32. We guarantee that these won't populate the upper bits
223 /// of the register. ZExt on the loads will be free, and the same for call
224 /// return values because we only accept ones that guarantee a zeroext ret val.
225 /// Many arguments will have the zeroext attribute too, so those would be free
226 /// too.
227 static bool isSource(Value *V) {
228  if (!isa<IntegerType>(V->getType()))
229  return false;
230 
231  // TODO Allow zext to be sources.
232  if (isa<Argument>(V))
233  return true;
234  else if (isa<LoadInst>(V))
235  return true;
236  else if (isa<BitCastInst>(V))
237  return true;
238  else if (auto *Call = dyn_cast<CallInst>(V))
239  return Call->hasRetAttr(Attribute::AttrKind::ZExt);
240  else if (auto *Trunc = dyn_cast<TruncInst>(V))
241  return EqualTypeSize(Trunc);
242  return false;
243 }
244 
245 /// Return true if V will require any promoted values to be truncated for the
246 /// the IR to remain valid. We can't mutate the value type of these
247 /// instructions.
248 static bool isSink(Value *V) {
249  // TODO The truncate also isn't actually necessary because we would already
250  // proved that the data value is kept within the range of the original data
251  // type.
252 
253  // Sinks are:
254  // - points where the value in the register is being observed, such as an
255  // icmp, switch or store.
256  // - points where value types have to match, such as calls and returns.
257  // - zext are included to ease the transformation and are generally removed
258  // later on.
259  if (auto *Store = dyn_cast<StoreInst>(V))
260  return LessOrEqualTypeSize(Store->getValueOperand());
261  if (auto *Return = dyn_cast<ReturnInst>(V))
262  return LessOrEqualTypeSize(Return->getReturnValue());
263  if (auto *ZExt = dyn_cast<ZExtInst>(V))
264  return GreaterThanTypeSize(ZExt);
265  if (auto *Switch = dyn_cast<SwitchInst>(V))
266  return LessThanTypeSize(Switch->getCondition());
267  if (auto *ICmp = dyn_cast<ICmpInst>(V))
268  return ICmp->isSigned() || LessThanTypeSize(ICmp->getOperand(0));
269 
270  return isa<CallInst>(V);
271 }
272 
273 /// Return whether the instruction can be promoted within any modifications to
274 /// its operands or result.
275 bool ARMCodeGenPrepare::isSafeOverflow(Instruction *I) {
276  // FIXME Do we need NSW too?
277  if (isa<OverflowingBinaryOperator>(I) && I->hasNoUnsignedWrap())
278  return true;
279 
280  // We can support a, potentially, overflowing instruction (I) if:
281  // - It is only used by an unsigned icmp.
282  // - The icmp uses a constant.
283  // - The overflowing value (I) is decreasing, i.e would underflow - wrapping
284  // around zero to become a larger number than before.
285  // - The underflowing instruction (I) also uses a constant.
286  //
287  // We can then use the two constants to calculate whether the result would
288  // wrap in respect to itself in the original bitwidth. If it doesn't wrap,
289  // just underflows the range, the icmp would give the same result whether the
290  // result has been truncated or not. We calculate this by:
291  // - Zero extending both constants, if needed, to 32-bits.
292  // - Take the absolute value of I's constant, adding this to the icmp const.
293  // - Check that this value is not out of range for small type. If it is, it
294  // means that it has underflowed enough to wrap around the icmp constant.
295  //
296  // For example:
297  //
298  // %sub = sub i8 %a, 2
299  // %cmp = icmp ule i8 %sub, 254
300  //
301  // If %a = 0, %sub = -2 == FE == 254
302  // But if this is evalulated as a i32
303  // %sub = -2 == FF FF FF FE == 4294967294
304  // So the unsigned compares (i8 and i32) would not yield the same result.
305  //
306  // Another way to look at it is:
307  // %a - 2 <= 254
308  // %a + 2 <= 254 + 2
309  // %a <= 256
310  // And we can't represent 256 in the i8 format, so we don't support it.
311  //
312  // Whereas:
313  //
314  // %sub i8 %a, 1
315  // %cmp = icmp ule i8 %sub, 254
316  //
317  // If %a = 0, %sub = -1 == FF == 255
318  // As i32:
319  // %sub = -1 == FF FF FF FF == 4294967295
320  //
321  // In this case, the unsigned compare results would be the same and this
322  // would also be true for ult, uge and ugt:
323  // - (255 < 254) == (0xFFFFFFFF < 254) == false
324  // - (255 <= 254) == (0xFFFFFFFF <= 254) == false
325  // - (255 > 254) == (0xFFFFFFFF > 254) == true
326  // - (255 >= 254) == (0xFFFFFFFF >= 254) == true
327  //
328  // To demonstrate why we can't handle increasing values:
329  //
330  // %add = add i8 %a, 2
331  // %cmp = icmp ult i8 %add, 127
332  //
333  // If %a = 254, %add = 256 == (i8 1)
334  // As i32:
335  // %add = 256
336  //
337  // (1 < 127) != (256 < 127)
338 
339  unsigned Opc = I->getOpcode();
340  if (Opc != Instruction::Add && Opc != Instruction::Sub)
341  return false;
342 
343  if (!I->hasOneUse() ||
344  !isa<ICmpInst>(*I->user_begin()) ||
345  !isa<ConstantInt>(I->getOperand(1)))
346  return false;
347 
348  ConstantInt *OverflowConst = cast<ConstantInt>(I->getOperand(1));
349  bool NegImm = OverflowConst->isNegative();
350  bool IsDecreasing = ((Opc == Instruction::Sub) && !NegImm) ||
351  ((Opc == Instruction::Add) && NegImm);
352  if (!IsDecreasing)
353  return false;
354 
355  // Don't support an icmp that deals with sign bits.
356  auto *CI = cast<ICmpInst>(*I->user_begin());
357  if (CI->isSigned() || CI->isEquality())
358  return false;
359 
360  ConstantInt *ICmpConst = nullptr;
361  if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(0)))
362  ICmpConst = Const;
363  else if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(1)))
364  ICmpConst = Const;
365  else
366  return false;
367 
368  // Now check that the result can't wrap on itself.
369  APInt Total = ICmpConst->getValue().getBitWidth() < 32 ?
370  ICmpConst->getValue().zext(32) : ICmpConst->getValue();
371 
372  Total += OverflowConst->getValue().getBitWidth() < 32 ?
373  OverflowConst->getValue().abs().zext(32) : OverflowConst->getValue().abs();
374 
375  APInt Max = APInt::getAllOnesValue(ARMCodeGenPrepare::TypeSize);
376 
377  if (Total.getBitWidth() > Max.getBitWidth()) {
378  if (Total.ugt(Max.zext(Total.getBitWidth())))
379  return false;
380  } else if (Max.getBitWidth() > Total.getBitWidth()) {
381  if (Total.zext(Max.getBitWidth()).ugt(Max))
382  return false;
383  } else if (Total.ugt(Max))
384  return false;
385 
386  LLVM_DEBUG(dbgs() << "ARM CGP: Allowing safe overflow for " << *I << "\n");
387  return true;
388 }
389 
390 static bool shouldPromote(Value *V) {
391  if (!isa<IntegerType>(V->getType()) || isSink(V))
392  return false;
393 
394  if (isSource(V))
395  return true;
396 
397  auto *I = dyn_cast<Instruction>(V);
398  if (!I)
399  return false;
400 
401  if (isa<ICmpInst>(I))
402  return false;
403 
404  return true;
405 }
406 
407 /// Return whether we can safely mutate V's type to ExtTy without having to be
408 /// concerned with zero extending or truncation.
409 static bool isPromotedResultSafe(Value *V) {
410  if (!isa<Instruction>(V))
411  return true;
412 
413  if (generateSignBits(V))
414  return false;
415 
416  return !isa<OverflowingBinaryOperator>(V);
417 }
418 
419 /// Return the intrinsic for the instruction that can perform the same
420 /// operation but on a narrow type. This is using the parallel dsp intrinsics
421 /// on scalar values.
423  // Whether we use the signed or unsigned versions of these intrinsics
424  // doesn't matter because we're not using the GE bits that they set in
425  // the APSR.
426  switch(I->getOpcode()) {
427  default:
428  break;
429  case Instruction::Add:
430  return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_uadd16 :
431  Intrinsic::arm_uadd8;
432  case Instruction::Sub:
433  return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_usub16 :
434  Intrinsic::arm_usub8;
435  }
436  llvm_unreachable("unhandled opcode for narrow intrinsic");
437 }
438 
439 void IRPromoter::ReplaceAllUsersOfWith(Value *From, Value *To) {
441  Instruction *InstTo = dyn_cast<Instruction>(To);
442  bool ReplacedAll = true;
443 
444  LLVM_DEBUG(dbgs() << "ARM CGP: Replacing " << *From << " with " << *To
445  << "\n");
446 
447  for (Use &U : From->uses()) {
448  auto *User = cast<Instruction>(U.getUser());
449  if (InstTo && User->isIdenticalTo(InstTo)) {
450  ReplacedAll = false;
451  continue;
452  }
453  Users.push_back(User);
454  }
455 
456  for (auto *U : Users)
457  U->replaceUsesOfWith(From, To);
458 
459  if (ReplacedAll)
460  if (auto *I = dyn_cast<Instruction>(From))
461  InstsToRemove.insert(I);
462 }
463 
464 void IRPromoter::PrepareConstants() {
465  IRBuilder<> Builder{Ctx};
466  // First step is to prepare the instructions for mutation. Most constants
467  // just need to be zero extended into their new type, but complications arise
468  // because:
469  // - For nuw binary operators, negative immediates would need sign extending;
470  // however, instead we'll change them to positive and zext them. We can do
471  // this because:
472  // > The operators that can wrap are: add, sub, mul and shl.
473  // > shl interprets its second operand as unsigned and if the first operand
474  // is an immediate, it will need zext to be nuw.
475  // > I'm assuming mul has to interpret immediates as unsigned for nuw.
476  // > Which leaves the nuw add and sub to be handled; as with shl, if an
477  // immediate is used as operand 0, it will need zext to be nuw.
478  // - We also allow add and sub to safely overflow in certain circumstances
479  // and only when the value (operand 0) is being decreased.
480  //
481  // For adds and subs, that are either nuw or safely wrap and use a negative
482  // immediate as operand 1, we create an equivalent instruction using a
483  // positive immediate. That positive immediate can then be zext along with
484  // all the other immediates later.
485  for (auto *V : *Visited) {
486  if (!isa<Instruction>(V))
487  continue;
488 
489  auto *I = cast<Instruction>(V);
490  if (SafeToPromote->count(I)) {
491 
492  if (!isa<OverflowingBinaryOperator>(I))
493  continue;
494 
495  if (auto *Const = dyn_cast<ConstantInt>(I->getOperand(1))) {
496  if (!Const->isNegative())
497  break;
498 
499  unsigned Opc = I->getOpcode();
500  if (Opc != Instruction::Add && Opc != Instruction::Sub)
501  continue;
502 
503  LLVM_DEBUG(dbgs() << "ARM CGP: Adjusting " << *I << "\n");
504  auto *NewConst = ConstantInt::get(Ctx, Const->getValue().abs());
505  Builder.SetInsertPoint(I);
506  Value *NewVal = Opc == Instruction::Sub ?
507  Builder.CreateAdd(I->getOperand(0), NewConst) :
508  Builder.CreateSub(I->getOperand(0), NewConst);
509  LLVM_DEBUG(dbgs() << "ARM CGP: New equivalent: " << *NewVal << "\n");
510 
511  if (auto *NewInst = dyn_cast<Instruction>(NewVal)) {
512  NewInst->copyIRFlags(I);
513  NewInsts.insert(NewInst);
514  }
515  InstsToRemove.insert(I);
516  I->replaceAllUsesWith(NewVal);
517  }
518  }
519  }
520  for (auto *I : NewInsts)
521  Visited->insert(I);
522 }
523 
524 void IRPromoter::ExtendSources() {
525  IRBuilder<> Builder{Ctx};
526 
527  auto InsertZExt = [&](Value *V, Instruction *InsertPt) {
528  assert(V->getType() != ExtTy && "zext already extends to i32");
529  LLVM_DEBUG(dbgs() << "ARM CGP: Inserting ZExt for " << *V << "\n");
530  Builder.SetInsertPoint(InsertPt);
531  if (auto *I = dyn_cast<Instruction>(V))
532  Builder.SetCurrentDebugLocation(I->getDebugLoc());
533 
534  Value *ZExt = Builder.CreateZExt(V, ExtTy);
535  if (auto *I = dyn_cast<Instruction>(ZExt)) {
536  if (isa<Argument>(V))
537  I->moveBefore(InsertPt);
538  else
539  I->moveAfter(InsertPt);
540  NewInsts.insert(I);
541  }
542 
543  ReplaceAllUsersOfWith(V, ZExt);
544  };
545 
546  // Now, insert extending instructions between the sources and their users.
547  LLVM_DEBUG(dbgs() << "ARM CGP: Promoting sources:\n");
548  for (auto V : *Sources) {
549  LLVM_DEBUG(dbgs() << " - " << *V << "\n");
550  if (auto *I = dyn_cast<Instruction>(V))
551  InsertZExt(I, I);
552  else if (auto *Arg = dyn_cast<Argument>(V)) {
553  BasicBlock &BB = Arg->getParent()->front();
554  InsertZExt(Arg, &*BB.getFirstInsertionPt());
555  } else {
556  llvm_unreachable("unhandled source that needs extending");
557  }
558  Promoted.insert(V);
559  }
560 }
561 
562 void IRPromoter::PromoteTree() {
563  LLVM_DEBUG(dbgs() << "ARM CGP: Mutating the tree..\n");
564 
565  IRBuilder<> Builder{Ctx};
566 
567  // Mutate the types of the instructions within the tree. Here we handle
568  // constant operands.
569  for (auto *V : *Visited) {
570  if (Sources->count(V))
571  continue;
572 
573  auto *I = cast<Instruction>(V);
574  if (Sinks->count(I))
575  continue;
576 
577  for (unsigned i = 0, e = I->getNumOperands(); i < e; ++i) {
578  Value *Op = I->getOperand(i);
579  if ((Op->getType() == ExtTy) || !isa<IntegerType>(Op->getType()))
580  continue;
581 
582  if (auto *Const = dyn_cast<ConstantInt>(Op)) {
583  Constant *NewConst = ConstantExpr::getZExt(Const, ExtTy);
584  I->setOperand(i, NewConst);
585  } else if (isa<UndefValue>(Op))
586  I->setOperand(i, UndefValue::get(ExtTy));
587  }
588 
589  if (shouldPromote(I)) {
590  I->mutateType(ExtTy);
591  Promoted.insert(I);
592  }
593  }
594 
595  // Finally, any instructions that should be promoted but haven't yet been,
596  // need to be handled using intrinsics.
597  for (auto *V : *Visited) {
598  auto *I = dyn_cast<Instruction>(V);
599  if (!I)
600  continue;
601 
602  if (Sources->count(I) || Sinks->count(I))
603  continue;
604 
605  if (!shouldPromote(I) || SafeToPromote->count(I) || NewInsts.count(I))
606  continue;
607 
608  assert(EnableDSP && "DSP intrinisc insertion not enabled!");
609 
610  // Replace unsafe instructions with appropriate intrinsic calls.
611  LLVM_DEBUG(dbgs() << "ARM CGP: Inserting DSP intrinsic for "
612  << *I << "\n");
613  Function *DSPInst =
615  Builder.SetInsertPoint(I);
616  Builder.SetCurrentDebugLocation(I->getDebugLoc());
617  Value *Args[] = { I->getOperand(0), I->getOperand(1) };
618  CallInst *Call = Builder.CreateCall(DSPInst, Args);
619  NewInsts.insert(Call);
620  ReplaceAllUsersOfWith(I, Call);
621  }
622 }
623 
624 void IRPromoter::TruncateSinks() {
625  LLVM_DEBUG(dbgs() << "ARM CGP: Fixing up the sinks:\n");
626 
627  IRBuilder<> Builder{Ctx};
628 
629  auto InsertTrunc = [&](Value *V, Type *TruncTy) -> Instruction* {
630  if (!isa<Instruction>(V) || !isa<IntegerType>(V->getType()))
631  return nullptr;
632 
633  if ((!Promoted.count(V) && !NewInsts.count(V)) || Sources->count(V))
634  return nullptr;
635 
636  LLVM_DEBUG(dbgs() << "ARM CGP: Creating " << *TruncTy << " Trunc for "
637  << *V << "\n");
638  Builder.SetInsertPoint(cast<Instruction>(V));
639  auto *Trunc = dyn_cast<Instruction>(Builder.CreateTrunc(V, TruncTy));
640  if (Trunc)
641  NewInsts.insert(Trunc);
642  return Trunc;
643  };
644 
645  // Fix up any stores or returns that use the results of the promoted
646  // chain.
647  for (auto I : *Sinks) {
648  LLVM_DEBUG(dbgs() << "ARM CGP: For Sink: " << *I << "\n");
649 
650  // Handle calls separately as we need to iterate over arg operands.
651  if (auto *Call = dyn_cast<CallInst>(I)) {
652  for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) {
653  Value *Arg = Call->getArgOperand(i);
654  Type *Ty = TruncTysMap[Call][i];
655  if (Instruction *Trunc = InsertTrunc(Arg, Ty)) {
656  Trunc->moveBefore(Call);
657  Call->setArgOperand(i, Trunc);
658  }
659  }
660  continue;
661  }
662 
663  // Special case switches because we need to truncate the condition.
664  if (auto *Switch = dyn_cast<SwitchInst>(I)) {
665  Type *Ty = TruncTysMap[Switch][0];
666  if (Instruction *Trunc = InsertTrunc(Switch->getCondition(), Ty)) {
667  Trunc->moveBefore(Switch);
668  Switch->setCondition(Trunc);
669  }
670  continue;
671  }
672 
673  // Now handle the others.
674  for (unsigned i = 0; i < I->getNumOperands(); ++i) {
675  Type *Ty = TruncTysMap[I][i];
676  if (Instruction *Trunc = InsertTrunc(I->getOperand(i), Ty)) {
677  Trunc->moveBefore(I);
678  I->setOperand(i, Trunc);
679  }
680  }
681  }
682 }
683 
684 void IRPromoter::Cleanup() {
685  // Some zexts will now have become redundant, along with their trunc
686  // operands, so remove them
687  for (auto V : *Visited) {
688  if (!isa<CastInst>(V))
689  continue;
690 
691  auto ZExt = cast<CastInst>(V);
692  if (ZExt->getDestTy() != ExtTy)
693  continue;
694 
695  Value *Src = ZExt->getOperand(0);
696  if (ZExt->getSrcTy() == ZExt->getDestTy()) {
697  LLVM_DEBUG(dbgs() << "ARM CGP: Removing unnecessary cast: " << *ZExt
698  << "\n");
699  ReplaceAllUsersOfWith(ZExt, Src);
700  continue;
701  }
702 
703  // For any truncs that we insert to handle zexts, we can replace the
704  // result of the zext with the input to the trunc.
705  if (NewInsts.count(Src) && isa<ZExtInst>(V) && isa<TruncInst>(Src)) {
706  auto *Trunc = cast<TruncInst>(Src);
707  assert(Trunc->getOperand(0)->getType() == ExtTy &&
708  "expected inserted trunc to be operating on i32");
709  ReplaceAllUsersOfWith(ZExt, Trunc->getOperand(0));
710  }
711  }
712 
713  for (auto *I : InstsToRemove) {
714  LLVM_DEBUG(dbgs() << "ARM CGP: Removing " << *I << "\n");
715  I->dropAllReferences();
716  I->eraseFromParent();
717  }
718 
719  InstsToRemove.clear();
720  NewInsts.clear();
721  TruncTysMap.clear();
722  Promoted.clear();
723 }
724 
725 void IRPromoter::ConvertTruncs() {
726  IRBuilder<> Builder{Ctx};
727 
728  for (auto *V : *Visited) {
729  if (!isa<TruncInst>(V) || Sources->count(V))
730  continue;
731 
732  auto *Trunc = cast<TruncInst>(V);
733  assert(LessThanTypeSize(Trunc) && "expected narrow trunc");
734 
735  Builder.SetInsertPoint(Trunc);
736  unsigned NumBits =
737  cast<IntegerType>(Trunc->getType())->getScalarSizeInBits();
739  Value *Masked = Builder.CreateAnd(Trunc->getOperand(0), Mask);
740 
741  if (auto *I = dyn_cast<Instruction>(Masked))
742  NewInsts.insert(I);
743 
744  ReplaceAllUsersOfWith(Trunc, Masked);
745  }
746 }
747 
748 void IRPromoter::Mutate(Type *OrigTy,
749  SmallPtrSetImpl<Value*> &Visited,
750  SmallPtrSetImpl<Value*> &Sources,
752  SmallPtrSetImpl<Instruction*> &SafeToPromote) {
753  LLVM_DEBUG(dbgs() << "ARM CGP: Promoting use-def chains to from "
754  << ARMCodeGenPrepare::TypeSize << " to 32-bits\n");
755 
756  assert(isa<IntegerType>(OrigTy) && "expected integer type");
757  this->OrigTy = cast<IntegerType>(OrigTy);
758  assert(OrigTy->getPrimitiveSizeInBits() < ExtTy->getPrimitiveSizeInBits() &&
759  "original type not smaller than extended type");
760 
761  this->Visited = &Visited;
762  this->Sources = &Sources;
763  this->Sinks = &Sinks;
764  this->SafeToPromote = &SafeToPromote;
765 
766  // Cache original types of the values that will likely need truncating
767  for (auto *I : Sinks) {
768  if (auto *Call = dyn_cast<CallInst>(I)) {
769  for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) {
770  Value *Arg = Call->getArgOperand(i);
771  TruncTysMap[Call].push_back(Arg->getType());
772  }
773  } else if (auto *Switch = dyn_cast<SwitchInst>(I))
774  TruncTysMap[I].push_back(Switch->getCondition()->getType());
775  else {
776  for (unsigned i = 0; i < I->getNumOperands(); ++i)
777  TruncTysMap[I].push_back(I->getOperand(i)->getType());
778  }
779  }
780 
781  // Convert adds and subs using negative immediates to equivalent instructions
782  // that use positive constants.
783  PrepareConstants();
784 
785  // Insert zext instructions between sources and their users.
786  ExtendSources();
787 
788  // Convert any truncs, that aren't sources, into AND masks.
789  ConvertTruncs();
790 
791  // Promote visited instructions, mutating their types in place. Also insert
792  // DSP intrinsics, if enabled, for adds and subs which would be unsafe to
793  // promote.
794  PromoteTree();
795 
796  // Insert trunc instructions for use by calls, stores etc...
797  TruncateSinks();
798 
799  // Finally, remove unecessary zexts and truncs, delete old instructions and
800  // clear the data structures.
801  Cleanup();
802 
803  LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete\n");
804 }
805 
806 /// We accept most instructions, as well as Arguments and ConstantInsts. We
807 /// Disallow casts other than zext and truncs and only allow calls if their
808 /// return value is zeroext. We don't allow opcodes that can introduce sign
809 /// bits.
810 bool ARMCodeGenPrepare::isSupportedValue(Value *V) {
811  if (auto *I = dyn_cast<ICmpInst>(V)) {
812  // Now that we allow small types than TypeSize, only allow icmp of
813  // TypeSize because they will require a trunc to be legalised.
814  // TODO: Allow icmp of smaller types, and calculate at the end
815  // whether the transform would be beneficial.
816  if (isa<PointerType>(I->getOperand(0)->getType()))
817  return true;
818  return EqualTypeSize(I->getOperand(0));
819  }
820 
821  // Memory instructions
822  if (isa<StoreInst>(V) || isa<GetElementPtrInst>(V))
823  return true;
824 
825  // Branches and targets.
826  if( isa<BranchInst>(V) || isa<SwitchInst>(V) || isa<BasicBlock>(V))
827  return true;
828 
829  // Non-instruction values that we can handle.
830  if ((isa<Constant>(V) && !isa<ConstantExpr>(V)) || isa<Argument>(V))
831  return isSupportedType(V);
832 
833  if (isa<PHINode>(V) || isa<SelectInst>(V) || isa<ReturnInst>(V) ||
834  isa<LoadInst>(V))
835  return isSupportedType(V);
836 
837  if (isa<SExtInst>(V))
838  return false;
839 
840  if (auto *Cast = dyn_cast<CastInst>(V))
841  return isSupportedType(Cast) || isSupportedType(Cast->getOperand(0));
842 
843  // Special cases for calls as we need to check for zeroext
844  // TODO We should accept calls even if they don't have zeroext, as they can
845  // still be sinks.
846  if (auto *Call = dyn_cast<CallInst>(V))
847  return isSupportedType(Call) &&
848  Call->hasRetAttr(Attribute::AttrKind::ZExt);
849 
850  if (!isa<BinaryOperator>(V))
851  return false;
852 
853  if (!isSupportedType(V))
854  return false;
855 
856  if (generateSignBits(V)) {
857  LLVM_DEBUG(dbgs() << "ARM CGP: No, instruction can generate sign bits.\n");
858  return false;
859  }
860  return true;
861 }
862 
863 /// Check that the type of V would be promoted and that the original type is
864 /// smaller than the targeted promoted type. Check that we're not trying to
865 /// promote something larger than our base 'TypeSize' type.
867 
868  auto *I = dyn_cast<Instruction>(V);
869  if (!I)
870  return true;
871 
872  if (SafeToPromote.count(I))
873  return true;
874 
875  if (isPromotedResultSafe(V) || isSafeOverflow(I)) {
876  SafeToPromote.insert(I);
877  return true;
878  }
879 
880  if (I->getOpcode() != Instruction::Add && I->getOpcode() != Instruction::Sub)
881  return false;
882 
883  // If promotion is not safe, can we use a DSP instruction to natively
884  // handle the narrow type?
885  if (!ST->hasDSP() || !EnableDSP || !isSupportedType(I))
886  return false;
887 
888  if (ST->isThumb() && !ST->hasThumb2())
889  return false;
890 
891  // TODO
892  // Would it be profitable? For Thumb code, these parallel DSP instructions
893  // are only Thumb-2, so we wouldn't be able to dual issue on Cortex-M33. For
894  // Cortex-A, specifically Cortex-A72, the latency is double and throughput is
895  // halved. They also do not take immediates as operands.
896  for (auto &Op : I->operands()) {
897  if (isa<Constant>(Op)) {
898  if (!EnableDSPWithImms)
899  return false;
900  }
901  }
902  LLVM_DEBUG(dbgs() << "ARM CGP: Will use an intrinsic for: " << *I << "\n");
903  return true;
904 }
905 
906 bool ARMCodeGenPrepare::TryToPromote(Value *V) {
907  OrigTy = V->getType();
908  TypeSize = OrigTy->getPrimitiveSizeInBits();
909  if (TypeSize > 16 || TypeSize < 8)
910  return false;
911 
912  SafeToPromote.clear();
913 
914  if (!isSupportedValue(V) || !shouldPromote(V) || !isLegalToPromote(V))
915  return false;
916 
917  LLVM_DEBUG(dbgs() << "ARM CGP: TryToPromote: " << *V << ", TypeSize = "
918  << TypeSize << "\n");
919 
920  SetVector<Value*> WorkList;
921  SmallPtrSet<Value*, 8> Sources;
923  SmallPtrSet<Value*, 16> CurrentVisited;
924  WorkList.insert(V);
925 
926  // Return true if V was added to the worklist as a supported instruction,
927  // if it was already visited, or if we don't need to explore it (e.g.
928  // pointer values and GEPs), and false otherwise.
929  auto AddLegalInst = [&](Value *V) {
930  if (CurrentVisited.count(V))
931  return true;
932 
933  // Ignore GEPs because they don't need promoting and the constant indices
934  // will prevent the transformation.
935  if (isa<GetElementPtrInst>(V))
936  return true;
937 
938  if (!isSupportedValue(V) || (shouldPromote(V) && !isLegalToPromote(V))) {
939  LLVM_DEBUG(dbgs() << "ARM CGP: Can't handle: " << *V << "\n");
940  return false;
941  }
942 
943  WorkList.insert(V);
944  return true;
945  };
946 
947  // Iterate through, and add to, a tree of operands and users in the use-def.
948  while (!WorkList.empty()) {
949  Value *V = WorkList.back();
950  WorkList.pop_back();
951  if (CurrentVisited.count(V))
952  continue;
953 
954  // Ignore non-instructions, other than arguments.
955  if (!isa<Instruction>(V) && !isSource(V))
956  continue;
957 
958  // If we've already visited this value from somewhere, bail now because
959  // the tree has already been explored.
960  // TODO: This could limit the transform, ie if we try to promote something
961  // from an i8 and fail first, before trying an i16.
962  if (AllVisited.count(V))
963  return false;
964 
965  CurrentVisited.insert(V);
966  AllVisited.insert(V);
967 
968  // Calls can be both sources and sinks.
969  if (isSink(V))
970  Sinks.insert(cast<Instruction>(V));
971 
972  if (isSource(V))
973  Sources.insert(V);
974 
975  if (!isSink(V) && !isSource(V)) {
976  if (auto *I = dyn_cast<Instruction>(V)) {
977  // Visit operands of any instruction visited.
978  for (auto &U : I->operands()) {
979  if (!AddLegalInst(U))
980  return false;
981  }
982  }
983  }
984 
985  // Don't visit users of a node which isn't going to be mutated unless its a
986  // source.
987  if (isSource(V) || shouldPromote(V)) {
988  for (Use &U : V->uses()) {
989  if (!AddLegalInst(U.getUser()))
990  return false;
991  }
992  }
993  }
994 
995  LLVM_DEBUG(dbgs() << "ARM CGP: Visited nodes:\n";
996  for (auto *I : CurrentVisited)
997  I->dump();
998  );
999  unsigned ToPromote = 0;
1000  for (auto *V : CurrentVisited) {
1001  if (Sources.count(V))
1002  continue;
1003  if (Sinks.count(cast<Instruction>(V)))
1004  continue;
1005  ++ToPromote;
1006  }
1007 
1008  if (ToPromote < 2)
1009  return false;
1010 
1011  Promoter->Mutate(OrigTy, CurrentVisited, Sources, Sinks, SafeToPromote);
1012  return true;
1013 }
1014 
1015 bool ARMCodeGenPrepare::doInitialization(Module &M) {
1016  Promoter = new IRPromoter(&M);
1017  return false;
1018 }
1019 
1021  if (skipFunction(F) || DisableCGP)
1022  return false;
1023 
1024  auto *TPC = &getAnalysis<TargetPassConfig>();
1025  if (!TPC)
1026  return false;
1027 
1028  const TargetMachine &TM = TPC->getTM<TargetMachine>();
1029  ST = &TM.getSubtarget<ARMSubtarget>(F);
1030  bool MadeChange = false;
1031  LLVM_DEBUG(dbgs() << "ARM CGP: Running on " << F.getName() << "\n");
1032 
1033  // Search up from icmps to try to promote their operands.
1034  for (BasicBlock &BB : F) {
1035  auto &Insts = BB.getInstList();
1036  for (auto &I : Insts) {
1037  if (AllVisited.count(&I))
1038  continue;
1039 
1040  if (isa<ICmpInst>(I)) {
1041  auto &CI = cast<ICmpInst>(I);
1042 
1043  // Skip signed or pointer compares
1044  if (CI.isSigned() || !isa<IntegerType>(CI.getOperand(0)->getType()))
1045  continue;
1046 
1047  LLVM_DEBUG(dbgs() << "ARM CGP: Searching from: " << CI << "\n");
1048 
1049  for (auto &Op : CI.operands()) {
1050  if (auto *I = dyn_cast<Instruction>(Op))
1051  MadeChange |= TryToPromote(I);
1052  }
1053  }
1054  }
1055  LLVM_DEBUG(if (verifyFunction(F, &dbgs())) {
1056  dbgs() << F;
1057  report_fatal_error("Broken function after type promotion");
1058  });
1059  }
1060  if (MadeChange)
1061  LLVM_DEBUG(dbgs() << "After ARMCodeGenPrepare: " << F << "\n");
1062 
1063  return MadeChange;
1064 }
1065 
1066 bool ARMCodeGenPrepare::doFinalization(Module &M) {
1067  delete Promoter;
1068  return false;
1069 }
1070 
1071 INITIALIZE_PASS_BEGIN(ARMCodeGenPrepare, DEBUG_TYPE,
1072  "ARM IR optimizations", false, false)
1074  false, false)
1075 
1076 char ARMCodeGenPrepare::ID = 0;
1077 unsigned ARMCodeGenPrepare::TypeSize = 0;
1078 
1080  return new ARMCodeGenPrepare();
1081 }
APInt abs() const
Get the absolute value;.
Definition: APInt.h:1799
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
static bool generateSignBits(Value *V)
iterator_range< use_iterator > uses()
Definition: Value.h:354
static bool isSupportedType(Value *V)
Some instructions can use 8- and 16-bit operands, and we don&#39;t need to promote anything larger...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:561
static Intrinsic::ID getNarrowIntrinsic(Instruction *I)
Return the intrinsic for the instruction that can perform the same operation but on a narrow type...
void dropAllReferences()
Drop all references to operands.
Definition: User.h:294
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:857
This class represents a function call, abstracting a target machine&#39;s calling convention.
bool isLegalToPromote(CallSite CS, Function *Callee, const char **FailureReason=nullptr)
Return true if the given indirect call site can be made to call Callee.
F(f)
setjmp/longjmp based exceptions
iv Induction Variable Users
Definition: IVUsers.cpp:51
const T & back() const
Return the last element of the SetVector.
Definition: SetVector.h:128
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1508
void dump() const
Support for debugging, callable in GDB: V->dump()
Definition: AsmWriter.cpp:4297
AnalysisUsage & addRequired()
INITIALIZE_PASS_BEGIN(ARMCodeGenPrepare, DEBUG_TYPE, "ARM IR optimizations", false, false) INITIALIZE_PASS_END(ARMCodeGenPrepare
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
static bool isPromotedResultSafe(Value *V)
Return whether we can safely mutate V&#39;s type to ExtTy without having to be concerned with zero extend...
static Optional< unsigned > getOpcode(ArrayRef< VPValue *> Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:196
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
This file contains the simple types necessary to represent the attributes associated with functions a...
void pop_back()
Remove the last element of the SetVector.
Definition: SetVector.h:221
Target-Independent Code Generator Pass Configuration Options.
static bool shouldPromote(Value *V)
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1664
static bool isSource(Value *V)
Return true if the given value is a source in the use-def chain, producing a narrow &#39;TypeSize&#39; value...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
static bool LessOrEqualTypeSize(Value *V)
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
static bool GreaterThanTypeSize(Value *V)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:428
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1019
Value * getOperand(unsigned i) const
Definition: User.h:169
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
static bool runOnFunction(Function &F, bool PostInlining)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:422
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:216
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
This is an important base class in LLVM.
Definition: Constant.h:41
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
static unsigned getScalarSizeInBits(Type *Ty)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Represent the analysis usage information of a pass.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
op_range operands()
Definition: User.h:237
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
static bool EqualTypeSize(Value *V)
Class to represent integer types.
Definition: DerivedTypes.h:39
static cl::opt< bool > EnableDSPWithImms("arm-enable-scalar-dsp-imms", cl::Hidden, cl::init(false), cl::desc("Use DSP instructions for scalar operations\ with immediate operands"))
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1414
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNegative() const
Definition: Constants.h:187
unsigned getNumOperands() const
Definition: User.h:191
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
BlockVerifier::State From
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:129
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
static cl::opt< bool > DisableCGP("arm-disable-cgp", cl::Hidden, cl::init(true), cl::desc("Disable ARM specific CodeGenPrepare pass"))
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:621
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class for arbitrary precision integers.
Definition: APInt.h:69
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:529
amdgpu Simplify well known AMD library false Value Value * Arg
#define DEBUG_TYPE
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Definition: APInt.h:1254
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:310
void clear()
Definition: ilist.h:308
static bool LessThanTypeSize(Value *V)
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:213
static cl::opt< bool > EnableDSP("arm-enable-scalar-dsp", cl::Hidden, cl::init(false), cl::desc("Use DSP instructions for scalar operations"))
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:4820
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition: Value.h:603
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Definition: Instruction.cpp:90
user_iterator user_begin()
Definition: Value.h:375
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:114
LLVM Value Representation.
Definition: Value.h:72
FunctionPass * createARMCodeGenPreparePass()
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Definition: Instruction.cpp:86
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:58
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:412
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
ARM IR optimizations
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
Statically lint checks LLVM IR
Definition: Lint.cpp:192
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static bool isSink(Value *V)
Return true if V will require any promoted values to be truncated for the the IR to remain valid...