LLVM  6.0.0svn
FastISel.cpp
Go to the documentation of this file.
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
53 #include "llvm/CodeGen/Analysis.h"
65 #include "llvm/CodeGen/StackMaps.h"
70 #include "llvm/IR/Argument.h"
71 #include "llvm/IR/Attributes.h"
72 #include "llvm/IR/BasicBlock.h"
73 #include "llvm/IR/CallSite.h"
74 #include "llvm/IR/CallingConv.h"
75 #include "llvm/IR/Constant.h"
76 #include "llvm/IR/Constants.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfo.h"
79 #include "llvm/IR/DebugLoc.h"
80 #include "llvm/IR/DerivedTypes.h"
81 #include "llvm/IR/Function.h"
83 #include "llvm/IR/GlobalValue.h"
84 #include "llvm/IR/InlineAsm.h"
85 #include "llvm/IR/InstrTypes.h"
86 #include "llvm/IR/Instruction.h"
87 #include "llvm/IR/Instructions.h"
88 #include "llvm/IR/IntrinsicInst.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Mangler.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Operator.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/MC/MCContext.h"
97 #include "llvm/MC/MCInstrDesc.h"
98 #include "llvm/MC/MCRegisterInfo.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/Debug.h"
102 #include "llvm/Support/MathExtras.h"
106 #include <algorithm>
107 #include <cassert>
108 #include <cstdint>
109 #include <iterator>
110 #include <utility>
111 
112 using namespace llvm;
113 
114 #define DEBUG_TYPE "isel"
115 
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117  "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119  "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121 
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
126 
127  // Instructions are appended to FuncInfo.MBB. If the basic block already
128  // contains labels or copies, use the last instruction as the last local
129  // value.
130  EmitStartPt = nullptr;
131  if (!FuncInfo.MBB->empty())
134 }
135 
138  // Fallback to SDISel argument lowering code to deal with sret pointer
139  // parameter.
140  return false;
141 
142  if (!fastLowerArguments())
143  return false;
144 
145  // Enter arguments into ValueMap for uses in non-entry BBs.
147  E = FuncInfo.Fn->arg_end();
148  I != E; ++I) {
150  assert(VI != LocalValueMap.end() && "Missed an argument?");
151  FuncInfo.ValueMap[&*I] = VI->second;
152  }
153  return true;
154 }
155 
156 void FastISel::flushLocalValueMap() {
160  SavedInsertPt = FuncInfo.InsertPt;
161 }
162 
164  // Don't consider constants or arguments to have trivial kills.
165  const Instruction *I = dyn_cast<Instruction>(V);
166  if (!I)
167  return false;
168 
169  // No-op casts are trivially coalesced by fast-isel.
170  if (const auto *Cast = dyn_cast<CastInst>(I))
171  if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
172  return false;
173 
174  // Even the value might have only one use in the LLVM IR, it is possible that
175  // FastISel might fold the use into another instruction and now there is more
176  // than one use at the Machine Instruction level.
177  unsigned Reg = lookUpRegForValue(V);
178  if (Reg && !MRI.use_empty(Reg))
179  return false;
180 
181  // GEPs with all zero indices are trivially coalesced by fast-isel.
182  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
183  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
184  return false;
185 
186  // Only instructions with a single use in the same basic block are considered
187  // to have trivial kills.
188  return I->hasOneUse() &&
189  !(I->getOpcode() == Instruction::BitCast ||
190  I->getOpcode() == Instruction::PtrToInt ||
191  I->getOpcode() == Instruction::IntToPtr) &&
192  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
193 }
194 
195 unsigned FastISel::getRegForValue(const Value *V) {
196  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
197  // Don't handle non-simple values in FastISel.
198  if (!RealVT.isSimple())
199  return 0;
200 
201  // Ignore illegal types. We must do this before looking up the value
202  // in ValueMap because Arguments are given virtual registers regardless
203  // of whether FastISel can handle them.
204  MVT VT = RealVT.getSimpleVT();
205  if (!TLI.isTypeLegal(VT)) {
206  // Handle integer promotions, though, because they're common and easy.
207  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
208  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
209  else
210  return 0;
211  }
212 
213  // Look up the value to see if we already have a register for it.
214  unsigned Reg = lookUpRegForValue(V);
215  if (Reg)
216  return Reg;
217 
218  // In bottom-up mode, just create the virtual register which will be used
219  // to hold the value. It will be materialized later.
220  if (isa<Instruction>(V) &&
221  (!isa<AllocaInst>(V) ||
222  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
224 
225  SavePoint SaveInsertPt = enterLocalValueArea();
226 
227  // Materialize the value in a register. Emit any instructions in the
228  // local value area.
229  Reg = materializeRegForValue(V, VT);
230 
231  leaveLocalValueArea(SaveInsertPt);
232 
233  return Reg;
234 }
235 
236 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
237  unsigned Reg = 0;
238  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
239  if (CI->getValue().getActiveBits() <= 64)
240  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
241  } else if (isa<AllocaInst>(V))
242  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
243  else if (isa<ConstantPointerNull>(V))
244  // Translate this as an integer zero so that it can be
245  // local-CSE'd with actual integer zeros.
246  Reg = getRegForValue(
248  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
249  if (CF->isNullValue())
250  Reg = fastMaterializeFloatZero(CF);
251  else
252  // Try to emit the constant directly.
253  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
254 
255  if (!Reg) {
256  // Try to emit the constant by using an integer constant with a cast.
257  const APFloat &Flt = CF->getValueAPF();
258  EVT IntVT = TLI.getPointerTy(DL);
259  uint32_t IntBitWidth = IntVT.getSizeInBits();
260  APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
261  bool isExact;
262  (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
263  if (isExact) {
264  unsigned IntegerReg =
266  if (IntegerReg != 0)
267  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
268  /*Kill=*/false);
269  }
270  }
271  } else if (const auto *Op = dyn_cast<Operator>(V)) {
272  if (!selectOperator(Op, Op->getOpcode()))
273  if (!isa<Instruction>(Op) ||
274  !fastSelectInstruction(cast<Instruction>(Op)))
275  return 0;
276  Reg = lookUpRegForValue(Op);
277  } else if (isa<UndefValue>(V)) {
280  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
281  }
282  return Reg;
283 }
284 
285 /// Helper for getRegForValue. This function is called when the value isn't
286 /// already available in a register and must be materialized with new
287 /// instructions.
288 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
289  unsigned Reg = 0;
290  // Give the target-specific code a try first.
291  if (isa<Constant>(V))
292  Reg = fastMaterializeConstant(cast<Constant>(V));
293 
294  // If target-specific code couldn't or didn't want to handle the value, then
295  // give target-independent code a try.
296  if (!Reg)
297  Reg = materializeConstant(V, VT);
298 
299  // Don't cache constant materializations in the general ValueMap.
300  // To do so would require tracking what uses they dominate.
301  if (Reg) {
302  LocalValueMap[V] = Reg;
304  }
305  return Reg;
306 }
307 
308 unsigned FastISel::lookUpRegForValue(const Value *V) {
309  // Look up the value to see if we already have a register for it. We
310  // cache values defined by Instructions across blocks, and other values
311  // only locally. This is because Instructions already have the SSA
312  // def-dominates-use requirement enforced.
314  if (I != FuncInfo.ValueMap.end())
315  return I->second;
316  return LocalValueMap[V];
317 }
318 
319 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
320  if (!isa<Instruction>(I)) {
321  LocalValueMap[I] = Reg;
322  return;
323  }
324 
325  unsigned &AssignedReg = FuncInfo.ValueMap[I];
326  if (AssignedReg == 0)
327  // Use the new register.
328  AssignedReg = Reg;
329  else if (Reg != AssignedReg) {
330  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
331  for (unsigned i = 0; i < NumRegs; i++)
332  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
333 
334  AssignedReg = Reg;
335  }
336 }
337 
338 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
339  unsigned IdxN = getRegForValue(Idx);
340  if (IdxN == 0)
341  // Unhandled operand. Halt "fast" selection and bail.
342  return std::pair<unsigned, bool>(0, false);
343 
344  bool IdxNIsKill = hasTrivialKill(Idx);
345 
346  // If the index is smaller or larger than intptr_t, truncate or extend it.
347  MVT PtrVT = TLI.getPointerTy(DL);
348  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
349  if (IdxVT.bitsLT(PtrVT)) {
350  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
351  IdxNIsKill);
352  IdxNIsKill = true;
353  } else if (IdxVT.bitsGT(PtrVT)) {
354  IdxN =
355  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
356  IdxNIsKill = true;
357  }
358  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
359 }
360 
362  if (getLastLocalValue()) {
364  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
365  ++FuncInfo.InsertPt;
366  } else
368 
369  // Now skip past any EH_LABELs, which must remain at the beginning.
370  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
371  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
372  ++FuncInfo.InsertPt;
373 }
374 
377  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
378  "Invalid iterator!");
379  while (I != E) {
380  MachineInstr *Dead = &*I;
381  ++I;
382  Dead->eraseFromParent();
383  ++NumFastIselDead;
384  }
386 }
387 
390  DebugLoc OldDL = DbgLoc;
392  DbgLoc = DebugLoc();
393  SavePoint SP = {OldInsertPt, OldDL};
394  return SP;
395 }
396 
398  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
399  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
400 
401  // Restore the previous insert position.
402  FuncInfo.InsertPt = OldInsertPt.InsertPt;
403  DbgLoc = OldInsertPt.DL;
404 }
405 
406 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
407  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
408  if (VT == MVT::Other || !VT.isSimple())
409  // Unhandled type. Halt "fast" selection and bail.
410  return false;
411 
412  // We only handle legal types. For example, on x86-32 the instruction
413  // selector contains all of the 64-bit instructions from x86-64,
414  // under the assumption that i64 won't be used if the target doesn't
415  // support it.
416  if (!TLI.isTypeLegal(VT)) {
417  // MVT::i1 is special. Allow AND, OR, or XOR because they
418  // don't require additional zeroing, which makes them easy.
419  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
420  ISDOpcode == ISD::XOR))
421  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
422  else
423  return false;
424  }
425 
426  // Check if the first operand is a constant, and handle it as "ri". At -O0,
427  // we don't have anything that canonicalizes operand order.
428  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
429  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
430  unsigned Op1 = getRegForValue(I->getOperand(1));
431  if (!Op1)
432  return false;
433  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
434 
435  unsigned ResultReg =
436  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
437  CI->getZExtValue(), VT.getSimpleVT());
438  if (!ResultReg)
439  return false;
440 
441  // We successfully emitted code for the given LLVM Instruction.
442  updateValueMap(I, ResultReg);
443  return true;
444  }
445 
446  unsigned Op0 = getRegForValue(I->getOperand(0));
447  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
448  return false;
449  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
450 
451  // Check if the second operand is a constant and handle it appropriately.
452  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
453  uint64_t Imm = CI->getSExtValue();
454 
455  // Transform "sdiv exact X, 8" -> "sra X, 3".
456  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
457  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
458  Imm = Log2_64(Imm);
459  ISDOpcode = ISD::SRA;
460  }
461 
462  // Transform "urem x, pow2" -> "and x, pow2-1".
463  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
464  isPowerOf2_64(Imm)) {
465  --Imm;
466  ISDOpcode = ISD::AND;
467  }
468 
469  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
470  Op0IsKill, Imm, VT.getSimpleVT());
471  if (!ResultReg)
472  return false;
473 
474  // We successfully emitted code for the given LLVM Instruction.
475  updateValueMap(I, ResultReg);
476  return true;
477  }
478 
479  unsigned Op1 = getRegForValue(I->getOperand(1));
480  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
481  return false;
482  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
483 
484  // Now we have both operands in registers. Emit the instruction.
485  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
486  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
487  if (!ResultReg)
488  // Target-specific code wasn't able to find a machine opcode for
489  // the given ISD opcode and type. Halt "fast" selection and bail.
490  return false;
491 
492  // We successfully emitted code for the given LLVM Instruction.
493  updateValueMap(I, ResultReg);
494  return true;
495 }
496 
498  unsigned N = getRegForValue(I->getOperand(0));
499  if (!N) // Unhandled operand. Halt "fast" selection and bail.
500  return false;
501  bool NIsKill = hasTrivialKill(I->getOperand(0));
502 
503  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
504  // into a single N = N + TotalOffset.
505  uint64_t TotalOffs = 0;
506  // FIXME: What's a good SWAG number for MaxOffs?
507  uint64_t MaxOffs = 2048;
508  MVT VT = TLI.getPointerTy(DL);
509  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
510  GTI != E; ++GTI) {
511  const Value *Idx = GTI.getOperand();
512  if (StructType *StTy = GTI.getStructTypeOrNull()) {
513  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
514  if (Field) {
515  // N = N + Offset
516  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
517  if (TotalOffs >= MaxOffs) {
518  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
519  if (!N) // Unhandled operand. Halt "fast" selection and bail.
520  return false;
521  NIsKill = true;
522  TotalOffs = 0;
523  }
524  }
525  } else {
526  Type *Ty = GTI.getIndexedType();
527 
528  // If this is a constant subscript, handle it quickly.
529  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
530  if (CI->isZero())
531  continue;
532  // N = N + Offset
533  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
534  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
535  if (TotalOffs >= MaxOffs) {
536  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
537  if (!N) // Unhandled operand. Halt "fast" selection and bail.
538  return false;
539  NIsKill = true;
540  TotalOffs = 0;
541  }
542  continue;
543  }
544  if (TotalOffs) {
545  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
546  if (!N) // Unhandled operand. Halt "fast" selection and bail.
547  return false;
548  NIsKill = true;
549  TotalOffs = 0;
550  }
551 
552  // N = N + Idx * ElementSize;
553  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
554  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
555  unsigned IdxN = Pair.first;
556  bool IdxNIsKill = Pair.second;
557  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
558  return false;
559 
560  if (ElementSize != 1) {
561  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
562  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
563  return false;
564  IdxNIsKill = true;
565  }
566  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
567  if (!N) // Unhandled operand. Halt "fast" selection and bail.
568  return false;
569  }
570  }
571  if (TotalOffs) {
572  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
573  if (!N) // Unhandled operand. Halt "fast" selection and bail.
574  return false;
575  }
576 
577  // We successfully emitted code for the given LLVM Instruction.
578  updateValueMap(I, N);
579  return true;
580 }
581 
582 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
583  const CallInst *CI, unsigned StartIdx) {
584  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
585  Value *Val = CI->getArgOperand(i);
586  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
587  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
588  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
589  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
590  } else if (isa<ConstantPointerNull>(Val)) {
591  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
593  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
594  // Values coming from a stack location also require a special encoding,
595  // but that is added later on by the target specific frame index
596  // elimination implementation.
597  auto SI = FuncInfo.StaticAllocaMap.find(AI);
598  if (SI != FuncInfo.StaticAllocaMap.end())
599  Ops.push_back(MachineOperand::CreateFI(SI->second));
600  else
601  return false;
602  } else {
603  unsigned Reg = getRegForValue(Val);
604  if (!Reg)
605  return false;
606  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
607  }
608  }
609  return true;
610 }
611 
613  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
614  // [live variables...])
616  "Stackmap cannot return a value.");
617 
618  // The stackmap intrinsic only records the live variables (the arguments
619  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
620  // intrinsic, this won't be lowered to a function call. This means we don't
621  // have to worry about calling conventions and target-specific lowering code.
622  // Instead we perform the call lowering right here.
623  //
624  // CALLSEQ_START(0, 0...)
625  // STACKMAP(id, nbytes, ...)
626  // CALLSEQ_END(0, 0)
627  //
629 
630  // Add the <id> and <numBytes> constants.
631  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
632  "Expected a constant integer.");
633  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
634  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
635 
636  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
637  "Expected a constant integer.");
638  const auto *NumBytes =
639  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
640  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
641 
642  // Push live variables for the stack map (skipping the first two arguments
643  // <id> and <numBytes>).
644  if (!addStackMapLiveVars(Ops, I, 2))
645  return false;
646 
647  // We are not adding any register mask info here, because the stackmap doesn't
648  // clobber anything.
649 
650  // Add scratch registers as implicit def and early clobber.
652  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
653  for (unsigned i = 0; ScratchRegs[i]; ++i)
654  Ops.push_back(MachineOperand::CreateReg(
655  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
656  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
657 
658  // Issue CALLSEQ_START
659  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
660  auto Builder =
661  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
662  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
663  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
664  Builder.addImm(0);
665 
666  // Issue STACKMAP.
668  TII.get(TargetOpcode::STACKMAP));
669  for (auto const &MO : Ops)
670  MIB.add(MO);
671 
672  // Issue CALLSEQ_END
673  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
674  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
675  .addImm(0)
676  .addImm(0);
677 
678  // Inform the Frame Information that we have a stackmap in this function.
680 
681  return true;
682 }
683 
684 /// \brief Lower an argument list according to the target calling convention.
685 ///
686 /// This is a helper for lowering intrinsics that follow a target calling
687 /// convention or require stack pointer adjustment. Only a subset of the
688 /// intrinsic's operands need to participate in the calling convention.
689 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
690  unsigned NumArgs, const Value *Callee,
691  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
692  ArgListTy Args;
693  Args.reserve(NumArgs);
694 
695  // Populate the argument list.
696  ImmutableCallSite CS(CI);
697  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
698  Value *V = CI->getOperand(ArgI);
699 
700  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
701 
702  ArgListEntry Entry;
703  Entry.Val = V;
704  Entry.Ty = V->getType();
705  Entry.setAttributes(&CS, ArgIdx);
706  Args.push_back(Entry);
707  }
708 
709  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
710  : CI->getType();
711  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
712 
713  return lowerCallTo(CLI);
714 }
715 
717  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
718  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
719  SmallString<32> MangledName;
720  Mangler::getNameWithPrefix(MangledName, Target, DL);
721  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
722  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
723 }
724 
726  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
727  // i32 <numBytes>,
728  // i8* <target>,
729  // i32 <numArgs>,
730  // [Args...],
731  // [live variables...])
733  bool IsAnyRegCC = CC == CallingConv::AnyReg;
734  bool HasDef = !I->getType()->isVoidTy();
736 
737  // Get the real number of arguments participating in the call <numArgs>
738  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
739  "Expected a constant integer.");
740  const auto *NumArgsVal =
741  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
742  unsigned NumArgs = NumArgsVal->getZExtValue();
743 
744  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
745  // This includes all meta-operands up to but not including CC.
746  unsigned NumMetaOpers = PatchPointOpers::CCPos;
747  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
748  "Not enough arguments provided to the patchpoint intrinsic");
749 
750  // For AnyRegCC the arguments are lowered later on manually.
751  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
752  CallLoweringInfo CLI;
753  CLI.setIsPatchPoint();
754  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
755  return false;
756 
757  assert(CLI.Call && "No call instruction specified.");
758 
760 
761  // Add an explicit result reg if we use the anyreg calling convention.
762  if (IsAnyRegCC && HasDef) {
763  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
765  CLI.NumResultRegs = 1;
766  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
767  }
768 
769  // Add the <id> and <numBytes> constants.
770  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
771  "Expected a constant integer.");
772  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
773  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
774 
775  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
776  "Expected a constant integer.");
777  const auto *NumBytes =
778  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
779  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
780 
781  // Add the call target.
782  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
783  uint64_t CalleeConstAddr =
784  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
785  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
786  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
787  if (C->getOpcode() == Instruction::IntToPtr) {
788  uint64_t CalleeConstAddr =
789  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
790  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
791  } else
792  llvm_unreachable("Unsupported ConstantExpr.");
793  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
795  } else if (isa<ConstantPointerNull>(Callee))
797  else
798  llvm_unreachable("Unsupported callee address.");
799 
800  // Adjust <numArgs> to account for any arguments that have been passed on
801  // the stack instead.
802  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
803  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
804 
805  // Add the calling convention
806  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
807 
808  // Add the arguments we omitted previously. The register allocator should
809  // place these in any free register.
810  if (IsAnyRegCC) {
811  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
812  unsigned Reg = getRegForValue(I->getArgOperand(i));
813  if (!Reg)
814  return false;
815  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
816  }
817  }
818 
819  // Push the arguments from the call instruction.
820  for (auto Reg : CLI.OutRegs)
821  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
822 
823  // Push live variables for the stack map.
824  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
825  return false;
826 
827  // Push the register mask info.
830 
831  // Add scratch registers as implicit def and early clobber.
832  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
833  for (unsigned i = 0; ScratchRegs[i]; ++i)
835  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
836  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
837 
838  // Add implicit defs (return values).
839  for (auto Reg : CLI.InRegs)
840  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
841  /*IsImpl=*/true));
842 
843  // Insert the patchpoint instruction before the call generated by the target.
845  TII.get(TargetOpcode::PATCHPOINT));
846 
847  for (auto &MO : Ops)
848  MIB.add(MO);
849 
850  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
851 
852  // Delete the original call instruction.
853  CLI.Call->eraseFromParent();
854 
855  // Inform the Frame Information that we have a patchpoint in this function.
857 
858  if (CLI.NumResultRegs)
860  return true;
861 }
862 
864  const auto &Triple = TM.getTargetTriple();
866  return true; // don't do anything to this instruction.
869  /*IsDef=*/false));
871  /*IsDef=*/false));
872  MachineInstrBuilder MIB =
874  TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
875  for (auto &MO : Ops)
876  MIB.add(MO);
877  // Insert the Patchable Event Call instruction, that gets lowered properly.
878  return true;
879 }
880 
881 
882 /// Returns an AttributeList representing the attributes applied to the return
883 /// value of the given call.
886  if (CLI.RetSExt)
887  Attrs.push_back(Attribute::SExt);
888  if (CLI.RetZExt)
889  Attrs.push_back(Attribute::ZExt);
890  if (CLI.IsInReg)
891  Attrs.push_back(Attribute::InReg);
892 
894  Attrs);
895 }
896 
897 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
898  unsigned NumArgs) {
899  MCContext &Ctx = MF->getContext();
900  SmallString<32> MangledName;
901  Mangler::getNameWithPrefix(MangledName, SymName, DL);
902  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
903  return lowerCallTo(CI, Sym, NumArgs);
904 }
905 
907  unsigned NumArgs) {
908  ImmutableCallSite CS(CI);
909 
910  FunctionType *FTy = CS.getFunctionType();
911  Type *RetTy = CS.getType();
912 
913  ArgListTy Args;
914  Args.reserve(NumArgs);
915 
916  // Populate the argument list.
917  // Attributes for args start at offset 1, after the return attribute.
918  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
919  Value *V = CI->getOperand(ArgI);
920 
921  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
922 
923  ArgListEntry Entry;
924  Entry.Val = V;
925  Entry.Ty = V->getType();
926  Entry.setAttributes(&CS, ArgI);
927  Args.push_back(Entry);
928  }
930 
931  CallLoweringInfo CLI;
932  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
933 
934  return lowerCallTo(CLI);
935 }
936 
938  // Handle the incoming return values from the call.
939  CLI.clearIns();
940  SmallVector<EVT, 4> RetTys;
941  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
942 
944  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
945 
946  bool CanLowerReturn = TLI.CanLowerReturn(
947  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
948 
949  // FIXME: sret demotion isn't supported yet - bail out.
950  if (!CanLowerReturn)
951  return false;
952 
953  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
954  EVT VT = RetTys[I];
955  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
956  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
957  for (unsigned i = 0; i != NumRegs; ++i) {
958  ISD::InputArg MyFlags;
959  MyFlags.VT = RegisterVT;
960  MyFlags.ArgVT = VT;
961  MyFlags.Used = CLI.IsReturnValueUsed;
962  if (CLI.RetSExt)
963  MyFlags.Flags.setSExt();
964  if (CLI.RetZExt)
965  MyFlags.Flags.setZExt();
966  if (CLI.IsInReg)
967  MyFlags.Flags.setInReg();
968  CLI.Ins.push_back(MyFlags);
969  }
970  }
971 
972  // Handle all of the outgoing arguments.
973  CLI.clearOuts();
974  for (auto &Arg : CLI.getArgs()) {
975  Type *FinalType = Arg.Ty;
976  if (Arg.IsByVal)
977  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
978  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
979  FinalType, CLI.CallConv, CLI.IsVarArg);
980 
981  ISD::ArgFlagsTy Flags;
982  if (Arg.IsZExt)
983  Flags.setZExt();
984  if (Arg.IsSExt)
985  Flags.setSExt();
986  if (Arg.IsInReg)
987  Flags.setInReg();
988  if (Arg.IsSRet)
989  Flags.setSRet();
990  if (Arg.IsSwiftSelf)
991  Flags.setSwiftSelf();
992  if (Arg.IsSwiftError)
993  Flags.setSwiftError();
994  if (Arg.IsByVal)
995  Flags.setByVal();
996  if (Arg.IsInAlloca) {
997  Flags.setInAlloca();
998  // Set the byval flag for CCAssignFn callbacks that don't know about
999  // inalloca. This way we can know how many bytes we should've allocated
1000  // and how many bytes a callee cleanup function will pop. If we port
1001  // inalloca to more targets, we'll have to add custom inalloca handling in
1002  // the various CC lowering callbacks.
1003  Flags.setByVal();
1004  }
1005  if (Arg.IsByVal || Arg.IsInAlloca) {
1006  PointerType *Ty = cast<PointerType>(Arg.Ty);
1007  Type *ElementTy = Ty->getElementType();
1008  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1009  // For ByVal, alignment should come from FE. BE will guess if this info is
1010  // not there, but there are cases it cannot get right.
1011  unsigned FrameAlign = Arg.Alignment;
1012  if (!FrameAlign)
1013  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1014  Flags.setByValSize(FrameSize);
1015  Flags.setByValAlign(FrameAlign);
1016  }
1017  if (Arg.IsNest)
1018  Flags.setNest();
1019  if (NeedsRegBlock)
1020  Flags.setInConsecutiveRegs();
1021  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1022  Flags.setOrigAlign(OriginalAlignment);
1023 
1024  CLI.OutVals.push_back(Arg.Val);
1025  CLI.OutFlags.push_back(Flags);
1026  }
1027 
1028  if (!fastLowerCall(CLI))
1029  return false;
1030 
1031  // Set all unused physreg defs as dead.
1032  assert(CLI.Call && "No call instruction specified.");
1033  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1034 
1035  if (CLI.NumResultRegs && CLI.CS)
1037 
1038  return true;
1039 }
1040 
1042  ImmutableCallSite CS(CI);
1043 
1044  FunctionType *FuncTy = CS.getFunctionType();
1045  Type *RetTy = CS.getType();
1046 
1047  ArgListTy Args;
1048  ArgListEntry Entry;
1049  Args.reserve(CS.arg_size());
1050 
1051  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1052  i != e; ++i) {
1053  Value *V = *i;
1054 
1055  // Skip empty types
1056  if (V->getType()->isEmptyTy())
1057  continue;
1058 
1059  Entry.Val = V;
1060  Entry.Ty = V->getType();
1061 
1062  // Skip the first return-type Attribute to get to params.
1063  Entry.setAttributes(&CS, i - CS.arg_begin());
1064  Args.push_back(Entry);
1065  }
1066 
1067  // Check if target-independent constraints permit a tail call here.
1068  // Target-dependent constraints are checked within fastLowerCall.
1069  bool IsTailCall = CI->isTailCall();
1070  if (IsTailCall && !isInTailCallPosition(CS, TM))
1071  IsTailCall = false;
1072 
1073  CallLoweringInfo CLI;
1074  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1075  .setTailCall(IsTailCall);
1076 
1077  return lowerCallTo(CLI);
1078 }
1079 
1081  const CallInst *Call = cast<CallInst>(I);
1082 
1083  // Handle simple inline asms.
1084  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1085  // If the inline asm has side effects, then make sure that no local value
1086  // lives across by flushing the local value map.
1087  if (IA->hasSideEffects())
1088  flushLocalValueMap();
1089 
1090  // Don't attempt to handle constraints.
1091  if (!IA->getConstraintString().empty())
1092  return false;
1093 
1094  unsigned ExtraInfo = 0;
1095  if (IA->hasSideEffects())
1096  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1097  if (IA->isAlignStack())
1098  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1099 
1102  .addExternalSymbol(IA->getAsmString().c_str())
1103  .addImm(ExtraInfo);
1104  return true;
1105  }
1106 
1107  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1108  computeUsesVAFloatArgument(*Call, MMI);
1109 
1110  // Handle intrinsic function calls.
1111  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1112  return selectIntrinsicCall(II);
1113 
1114  // Usually, it does not make sense to initialize a value,
1115  // make an unrelated function call and use the value, because
1116  // it tends to be spilled on the stack. So, we move the pointer
1117  // to the last local value to the beginning of the block, so that
1118  // all the values which have already been materialized,
1119  // appear after the call. It also makes sense to skip intrinsics
1120  // since they tend to be inlined.
1121  flushLocalValueMap();
1122 
1123  return lowerCall(Call);
1124 }
1125 
1127  switch (II->getIntrinsicID()) {
1128  default:
1129  break;
1130  // At -O0 we don't care about the lifetime intrinsics.
1131  case Intrinsic::lifetime_start:
1132  case Intrinsic::lifetime_end:
1133  // The donothing intrinsic does, well, nothing.
1134  case Intrinsic::donothing:
1135  // Neither does the sideeffect intrinsic.
1136  case Intrinsic::sideeffect:
1137  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1138  case Intrinsic::assume:
1139  return true;
1140  case Intrinsic::dbg_declare: {
1141  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1142  assert(DI->getVariable() && "Missing variable");
1143  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1144  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1145  return true;
1146  }
1147 
1148  const Value *Address = DI->getAddress();
1149  if (!Address || isa<UndefValue>(Address)) {
1150  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1151  return true;
1152  }
1153 
1154  // Byval arguments with frame indices were already handled after argument
1155  // lowering and before isel.
1156  const auto *Arg =
1158  if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1159  return true;
1160 
1162  if (unsigned Reg = lookUpRegForValue(Address))
1163  Op = MachineOperand::CreateReg(Reg, false);
1164 
1165  // If we have a VLA that has a "use" in a metadata node that's then used
1166  // here but it has no other uses, then we have a problem. E.g.,
1167  //
1168  // int foo (const int *x) {
1169  // char a[*x];
1170  // return 0;
1171  // }
1172  //
1173  // If we assign 'a' a vreg and fast isel later on has to use the selection
1174  // DAG isel, it will want to copy the value to the vreg. However, there are
1175  // no uses, which goes counter to what selection DAG isel expects.
1176  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1177  (!isa<AllocaInst>(Address) ||
1178  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1180  false);
1181 
1182  if (Op) {
1184  "Expected inlined-at fields to agree");
1185  if (Op->isReg()) {
1186  Op->setIsDebug(true);
1187  // A dbg.declare describes the address of a source variable, so lower it
1188  // into an indirect DBG_VALUE.
1190  TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1191  Op->getReg(), DI->getVariable(), DI->getExpression());
1192  } else
1194  TII.get(TargetOpcode::DBG_VALUE))
1195  .add(*Op)
1196  .addImm(0)
1197  .addMetadata(DI->getVariable())
1198  .addMetadata(DI->getExpression());
1199  } else {
1200  // We can't yet handle anything else here because it would require
1201  // generating code, thus altering codegen because of debug info.
1202  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1203  }
1204  return true;
1205  }
1206  case Intrinsic::dbg_value: {
1207  // This form of DBG_VALUE is target-independent.
1208  const DbgValueInst *DI = cast<DbgValueInst>(II);
1209  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1210  const Value *V = DI->getValue();
1212  "Expected inlined-at fields to agree");
1213  if (!V) {
1214  // Currently the optimizer can produce this; insert an undef to
1215  // help debugging. Probably the optimizer should not do this.
1216  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1217  DI->getVariable(), DI->getExpression());
1218  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1219  if (CI->getBitWidth() > 64)
1221  .addCImm(CI)
1222  .addImm(0U)
1223  .addMetadata(DI->getVariable())
1224  .addMetadata(DI->getExpression());
1225  else
1227  .addImm(CI->getZExtValue())
1228  .addImm(0U)
1229  .addMetadata(DI->getVariable())
1230  .addMetadata(DI->getExpression());
1231  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1233  .addFPImm(CF)
1234  .addImm(0U)
1235  .addMetadata(DI->getVariable())
1236  .addMetadata(DI->getExpression());
1237  } else if (unsigned Reg = lookUpRegForValue(V)) {
1238  // FIXME: This does not handle register-indirect values at offset 0.
1239  bool IsIndirect = false;
1240  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1241  DI->getVariable(), DI->getExpression());
1242  } else {
1243  // We can't yet handle anything else here because it would require
1244  // generating code, thus altering codegen because of debug info.
1245  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1246  }
1247  return true;
1248  }
1249  case Intrinsic::objectsize: {
1250  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1251  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1252  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1253  unsigned ResultReg = getRegForValue(ResCI);
1254  if (!ResultReg)
1255  return false;
1256  updateValueMap(II, ResultReg);
1257  return true;
1258  }
1259  case Intrinsic::invariant_group_barrier:
1260  case Intrinsic::expect: {
1261  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1262  if (!ResultReg)
1263  return false;
1264  updateValueMap(II, ResultReg);
1265  return true;
1266  }
1267  case Intrinsic::experimental_stackmap:
1268  return selectStackmap(II);
1269  case Intrinsic::experimental_patchpoint_void:
1270  case Intrinsic::experimental_patchpoint_i64:
1271  return selectPatchpoint(II);
1272 
1273  case Intrinsic::xray_customevent:
1274  return selectXRayCustomEvent(II);
1275  }
1276 
1277  return fastLowerIntrinsicCall(II);
1278 }
1279 
1280 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1281  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1282  EVT DstVT = TLI.getValueType(DL, I->getType());
1283 
1284  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1285  !DstVT.isSimple())
1286  // Unhandled type. Halt "fast" selection and bail.
1287  return false;
1288 
1289  // Check if the destination type is legal.
1290  if (!TLI.isTypeLegal(DstVT))
1291  return false;
1292 
1293  // Check if the source operand is legal.
1294  if (!TLI.isTypeLegal(SrcVT))
1295  return false;
1296 
1297  unsigned InputReg = getRegForValue(I->getOperand(0));
1298  if (!InputReg)
1299  // Unhandled operand. Halt "fast" selection and bail.
1300  return false;
1301 
1302  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1303 
1304  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1305  Opcode, InputReg, InputRegIsKill);
1306  if (!ResultReg)
1307  return false;
1308 
1309  updateValueMap(I, ResultReg);
1310  return true;
1311 }
1312 
1314  // If the bitcast doesn't change the type, just use the operand value.
1315  if (I->getType() == I->getOperand(0)->getType()) {
1316  unsigned Reg = getRegForValue(I->getOperand(0));
1317  if (!Reg)
1318  return false;
1319  updateValueMap(I, Reg);
1320  return true;
1321  }
1322 
1323  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1324  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1325  EVT DstEVT = TLI.getValueType(DL, I->getType());
1326  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1327  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1328  // Unhandled type. Halt "fast" selection and bail.
1329  return false;
1330 
1331  MVT SrcVT = SrcEVT.getSimpleVT();
1332  MVT DstVT = DstEVT.getSimpleVT();
1333  unsigned Op0 = getRegForValue(I->getOperand(0));
1334  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1335  return false;
1336  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1337 
1338  // First, try to perform the bitcast by inserting a reg-reg copy.
1339  unsigned ResultReg = 0;
1340  if (SrcVT == DstVT) {
1341  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1342  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1343  // Don't attempt a cross-class copy. It will likely fail.
1344  if (SrcClass == DstClass) {
1345  ResultReg = createResultReg(DstClass);
1347  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1348  }
1349  }
1350 
1351  // If the reg-reg copy failed, select a BITCAST opcode.
1352  if (!ResultReg)
1353  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1354 
1355  if (!ResultReg)
1356  return false;
1357 
1358  updateValueMap(I, ResultReg);
1359  return true;
1360 }
1361 
1362 // Remove local value instructions starting from the instruction after
1363 // SavedLastLocalValue to the current function insert point.
1364 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1365 {
1366  MachineInstr *CurLastLocalValue = getLastLocalValue();
1367  if (CurLastLocalValue != SavedLastLocalValue) {
1368  // Find the first local value instruction to be deleted.
1369  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1370  // Otherwise it's the first instruction in the block.
1371  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1372  if (SavedLastLocalValue)
1373  ++FirstDeadInst;
1374  else
1375  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1376  setLastLocalValue(SavedLastLocalValue);
1377  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1378  }
1379 }
1380 
1382  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1383  // Just before the terminator instruction, insert instructions to
1384  // feed PHI nodes in successor blocks.
1385  if (isa<TerminatorInst>(I)) {
1386  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1387  // PHI node handling may have generated local value instructions,
1388  // even though it failed to handle all PHI nodes.
1389  // We remove these instructions because SelectionDAGISel will generate
1390  // them again.
1391  removeDeadLocalValueCode(SavedLastLocalValue);
1392  return false;
1393  }
1394  }
1395 
1396  // FastISel does not handle any operand bundles except OB_funclet.
1398  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1399  if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1400  return false;
1401 
1402  DbgLoc = I->getDebugLoc();
1403 
1404  SavedInsertPt = FuncInfo.InsertPt;
1405 
1406  if (const auto *Call = dyn_cast<CallInst>(I)) {
1407  const Function *F = Call->getCalledFunction();
1408  LibFunc Func;
1409 
1410  // As a special case, don't handle calls to builtin library functions that
1411  // may be translated directly to target instructions.
1412  if (F && !F->hasLocalLinkage() && F->hasName() &&
1413  LibInfo->getLibFunc(F->getName(), Func) &&
1415  return false;
1416 
1417  // Don't handle Intrinsic::trap if a trap function is specified.
1418  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1419  Call->hasFnAttr("trap-func-name"))
1420  return false;
1421  }
1422 
1423  // First, try doing target-independent selection.
1425  if (selectOperator(I, I->getOpcode())) {
1426  ++NumFastIselSuccessIndependent;
1427  DbgLoc = DebugLoc();
1428  return true;
1429  }
1430  // Remove dead code.
1432  if (SavedInsertPt != FuncInfo.InsertPt)
1433  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1434  SavedInsertPt = FuncInfo.InsertPt;
1435  }
1436  // Next, try calling the target to attempt to handle the instruction.
1437  if (fastSelectInstruction(I)) {
1438  ++NumFastIselSuccessTarget;
1439  DbgLoc = DebugLoc();
1440  return true;
1441  }
1442  // Remove dead code.
1444  if (SavedInsertPt != FuncInfo.InsertPt)
1445  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1446 
1447  DbgLoc = DebugLoc();
1448  // Undo phi node updates, because they will be added again by SelectionDAG.
1449  if (isa<TerminatorInst>(I)) {
1450  // PHI node handling may have generated local value instructions.
1451  // We remove them because SelectionDAGISel will generate them again.
1452  removeDeadLocalValueCode(SavedLastLocalValue);
1454  }
1455  return false;
1456 }
1457 
1458 /// Emit an unconditional branch to the given block, unless it is the immediate
1459 /// (fall-through) successor, and update the CFG.
1461  const DebugLoc &DbgLoc) {
1462  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1463  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1464  // For more accurate line information if this is the only instruction
1465  // in the block then emit it, otherwise we have the unconditional
1466  // fall-through case, which needs no instructions.
1467  } else {
1468  // The unconditional branch case.
1469  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1470  SmallVector<MachineOperand, 0>(), DbgLoc);
1471  }
1472  if (FuncInfo.BPI) {
1474  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1476  } else
1478 }
1479 
1481  MachineBasicBlock *TrueMBB,
1482  MachineBasicBlock *FalseMBB) {
1483  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1484  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1485  // successor/predecessor lists.
1486  if (TrueMBB != FalseMBB) {
1487  if (FuncInfo.BPI) {
1488  auto BranchProbability =
1489  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1491  } else
1493  }
1494 
1495  fastEmitBranch(FalseMBB, DbgLoc);
1496 }
1497 
1498 /// Emit an FNeg operation.
1500  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1501  if (!OpReg)
1502  return false;
1503  bool OpRegIsKill = hasTrivialKill(I);
1504 
1505  // If the target has ISD::FNEG, use it.
1506  EVT VT = TLI.getValueType(DL, I->getType());
1507  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1508  OpReg, OpRegIsKill);
1509  if (ResultReg) {
1510  updateValueMap(I, ResultReg);
1511  return true;
1512  }
1513 
1514  // Bitcast the value to integer, twiddle the sign bit with xor,
1515  // and then bitcast it back to floating-point.
1516  if (VT.getSizeInBits() > 64)
1517  return false;
1518  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1519  if (!TLI.isTypeLegal(IntVT))
1520  return false;
1521 
1522  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1523  ISD::BITCAST, OpReg, OpRegIsKill);
1524  if (!IntReg)
1525  return false;
1526 
1527  unsigned IntResultReg = fastEmit_ri_(
1528  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1529  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1530  if (!IntResultReg)
1531  return false;
1532 
1533  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1534  IntResultReg, /*IsKill=*/true);
1535  if (!ResultReg)
1536  return false;
1537 
1538  updateValueMap(I, ResultReg);
1539  return true;
1540 }
1541 
1543  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1544  if (!EVI)
1545  return false;
1546 
1547  // Make sure we only try to handle extracts with a legal result. But also
1548  // allow i1 because it's easy.
1549  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1550  if (!RealVT.isSimple())
1551  return false;
1552  MVT VT = RealVT.getSimpleVT();
1553  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1554  return false;
1555 
1556  const Value *Op0 = EVI->getOperand(0);
1557  Type *AggTy = Op0->getType();
1558 
1559  // Get the base result register.
1560  unsigned ResultReg;
1562  if (I != FuncInfo.ValueMap.end())
1563  ResultReg = I->second;
1564  else if (isa<Instruction>(Op0))
1565  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1566  else
1567  return false; // fast-isel can't handle aggregate constants at the moment
1568 
1569  // Get the actual result register, which is an offset from the base register.
1570  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1571 
1572  SmallVector<EVT, 4> AggValueVTs;
1573  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1574 
1575  for (unsigned i = 0; i < VTIndex; i++)
1576  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1577 
1578  updateValueMap(EVI, ResultReg);
1579  return true;
1580 }
1581 
1582 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1583  switch (Opcode) {
1584  case Instruction::Add:
1585  return selectBinaryOp(I, ISD::ADD);
1586  case Instruction::FAdd:
1587  return selectBinaryOp(I, ISD::FADD);
1588  case Instruction::Sub:
1589  return selectBinaryOp(I, ISD::SUB);
1590  case Instruction::FSub:
1591  // FNeg is currently represented in LLVM IR as a special case of FSub.
1592  if (BinaryOperator::isFNeg(I))
1593  return selectFNeg(I);
1594  return selectBinaryOp(I, ISD::FSUB);
1595  case Instruction::Mul:
1596  return selectBinaryOp(I, ISD::MUL);
1597  case Instruction::FMul:
1598  return selectBinaryOp(I, ISD::FMUL);
1599  case Instruction::SDiv:
1600  return selectBinaryOp(I, ISD::SDIV);
1601  case Instruction::UDiv:
1602  return selectBinaryOp(I, ISD::UDIV);
1603  case Instruction::FDiv:
1604  return selectBinaryOp(I, ISD::FDIV);
1605  case Instruction::SRem:
1606  return selectBinaryOp(I, ISD::SREM);
1607  case Instruction::URem:
1608  return selectBinaryOp(I, ISD::UREM);
1609  case Instruction::FRem:
1610  return selectBinaryOp(I, ISD::FREM);
1611  case Instruction::Shl:
1612  return selectBinaryOp(I, ISD::SHL);
1613  case Instruction::LShr:
1614  return selectBinaryOp(I, ISD::SRL);
1615  case Instruction::AShr:
1616  return selectBinaryOp(I, ISD::SRA);
1617  case Instruction::And:
1618  return selectBinaryOp(I, ISD::AND);
1619  case Instruction::Or:
1620  return selectBinaryOp(I, ISD::OR);
1621  case Instruction::Xor:
1622  return selectBinaryOp(I, ISD::XOR);
1623 
1624  case Instruction::GetElementPtr:
1625  return selectGetElementPtr(I);
1626 
1627  case Instruction::Br: {
1628  const BranchInst *BI = cast<BranchInst>(I);
1629 
1630  if (BI->isUnconditional()) {
1631  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1632  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1633  fastEmitBranch(MSucc, BI->getDebugLoc());
1634  return true;
1635  }
1636 
1637  // Conditional branches are not handed yet.
1638  // Halt "fast" selection and bail.
1639  return false;
1640  }
1641 
1642  case Instruction::Unreachable:
1644  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1645  else
1646  return true;
1647 
1648  case Instruction::Alloca:
1649  // FunctionLowering has the static-sized case covered.
1650  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1651  return true;
1652 
1653  // Dynamic-sized alloca is not handled yet.
1654  return false;
1655 
1656  case Instruction::Call:
1657  return selectCall(I);
1658 
1659  case Instruction::BitCast:
1660  return selectBitCast(I);
1661 
1662  case Instruction::FPToSI:
1663  return selectCast(I, ISD::FP_TO_SINT);
1664  case Instruction::ZExt:
1665  return selectCast(I, ISD::ZERO_EXTEND);
1666  case Instruction::SExt:
1667  return selectCast(I, ISD::SIGN_EXTEND);
1668  case Instruction::Trunc:
1669  return selectCast(I, ISD::TRUNCATE);
1670  case Instruction::SIToFP:
1671  return selectCast(I, ISD::SINT_TO_FP);
1672 
1673  case Instruction::IntToPtr: // Deliberate fall-through.
1674  case Instruction::PtrToInt: {
1675  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1676  EVT DstVT = TLI.getValueType(DL, I->getType());
1677  if (DstVT.bitsGT(SrcVT))
1678  return selectCast(I, ISD::ZERO_EXTEND);
1679  if (DstVT.bitsLT(SrcVT))
1680  return selectCast(I, ISD::TRUNCATE);
1681  unsigned Reg = getRegForValue(I->getOperand(0));
1682  if (!Reg)
1683  return false;
1684  updateValueMap(I, Reg);
1685  return true;
1686  }
1687 
1688  case Instruction::ExtractValue:
1689  return selectExtractValue(I);
1690 
1691  case Instruction::PHI:
1692  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1693 
1694  default:
1695  // Unhandled instruction. Halt "fast" selection and bail.
1696  return false;
1697  }
1698 }
1699 
1701  const TargetLibraryInfo *LibInfo,
1703  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1704  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1705  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1706  TII(*MF->getSubtarget().getInstrInfo()),
1707  TLI(*MF->getSubtarget().getTargetLowering()),
1708  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1709  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1710 
1711 FastISel::~FastISel() = default;
1712 
1713 bool FastISel::fastLowerArguments() { return false; }
1714 
1715 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1716 
1718  return false;
1719 }
1720 
1721 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1722 
1723 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1724  bool /*Op0IsKill*/) {
1725  return 0;
1726 }
1727 
1728 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1729  bool /*Op0IsKill*/, unsigned /*Op1*/,
1730  bool /*Op1IsKill*/) {
1731  return 0;
1732 }
1733 
1734 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1735  return 0;
1736 }
1737 
1738 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1739  const ConstantFP * /*FPImm*/) {
1740  return 0;
1741 }
1742 
1743 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1744  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1745  return 0;
1746 }
1747 
1748 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1749 /// instruction with an immediate operand using fastEmit_ri.
1750 /// If that fails, it materializes the immediate into a register and try
1751 /// fastEmit_rr instead.
1752 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1753  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1754  // If this is a multiply by a power of two, emit this as a shift left.
1755  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1756  Opcode = ISD::SHL;
1757  Imm = Log2_64(Imm);
1758  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1759  // div x, 8 -> srl x, 3
1760  Opcode = ISD::SRL;
1761  Imm = Log2_64(Imm);
1762  }
1763 
1764  // Horrible hack (to be removed), check to make sure shift amounts are
1765  // in-range.
1766  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1767  Imm >= VT.getSizeInBits())
1768  return 0;
1769 
1770  // First check if immediate type is legal. If not, we can't use the ri form.
1771  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1772  if (ResultReg)
1773  return ResultReg;
1774  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1775  bool IsImmKill = true;
1776  if (!MaterialReg) {
1777  // This is a bit ugly/slow, but failing here means falling out of
1778  // fast-isel, which would be very slow.
1779  IntegerType *ITy =
1781  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1782  if (!MaterialReg)
1783  return 0;
1784  // FIXME: If the materialized register here has no uses yet then this
1785  // will be the first use and we should be able to mark it as killed.
1786  // However, the local value area for materialising constant expressions
1787  // grows down, not up, which means that any constant expressions we generate
1788  // later which also use 'Imm' could be after this instruction and therefore
1789  // after this kill.
1790  IsImmKill = false;
1791  }
1792  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1793 }
1794 
1796  return MRI.createVirtualRegister(RC);
1797 }
1798 
1800  unsigned OpNum) {
1802  const TargetRegisterClass *RegClass =
1803  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1804  if (!MRI.constrainRegClass(Op, RegClass)) {
1805  // If it's not legal to COPY between the register classes, something
1806  // has gone very wrong before we got here.
1807  unsigned NewOp = createResultReg(RegClass);
1809  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1810  return NewOp;
1811  }
1812  }
1813  return Op;
1814 }
1815 
1816 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1817  const TargetRegisterClass *RC) {
1818  unsigned ResultReg = createResultReg(RC);
1819  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1820 
1821  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1822  return ResultReg;
1823 }
1824 
1825 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1826  const TargetRegisterClass *RC, unsigned Op0,
1827  bool Op0IsKill) {
1828  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1829 
1830  unsigned ResultReg = createResultReg(RC);
1831  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1832 
1833  if (II.getNumDefs() >= 1)
1834  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1835  .addReg(Op0, getKillRegState(Op0IsKill));
1836  else {
1838  .addReg(Op0, getKillRegState(Op0IsKill));
1840  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1841  }
1842 
1843  return ResultReg;
1844 }
1845 
1846 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1847  const TargetRegisterClass *RC, unsigned Op0,
1848  bool Op0IsKill, unsigned Op1,
1849  bool Op1IsKill) {
1850  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1851 
1852  unsigned ResultReg = createResultReg(RC);
1853  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1854  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1855 
1856  if (II.getNumDefs() >= 1)
1857  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1858  .addReg(Op0, getKillRegState(Op0IsKill))
1859  .addReg(Op1, getKillRegState(Op1IsKill));
1860  else {
1862  .addReg(Op0, getKillRegState(Op0IsKill))
1863  .addReg(Op1, getKillRegState(Op1IsKill));
1865  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1866  }
1867  return ResultReg;
1868 }
1869 
1870 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1871  const TargetRegisterClass *RC, unsigned Op0,
1872  bool Op0IsKill, unsigned Op1,
1873  bool Op1IsKill, unsigned Op2,
1874  bool Op2IsKill) {
1875  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1876 
1877  unsigned ResultReg = createResultReg(RC);
1878  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1879  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1880  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1881 
1882  if (II.getNumDefs() >= 1)
1883  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1884  .addReg(Op0, getKillRegState(Op0IsKill))
1885  .addReg(Op1, getKillRegState(Op1IsKill))
1886  .addReg(Op2, getKillRegState(Op2IsKill));
1887  else {
1889  .addReg(Op0, getKillRegState(Op0IsKill))
1890  .addReg(Op1, getKillRegState(Op1IsKill))
1891  .addReg(Op2, getKillRegState(Op2IsKill));
1893  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1894  }
1895  return ResultReg;
1896 }
1897 
1898 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1899  const TargetRegisterClass *RC, unsigned Op0,
1900  bool Op0IsKill, uint64_t Imm) {
1901  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1902 
1903  unsigned ResultReg = createResultReg(RC);
1904  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1905 
1906  if (II.getNumDefs() >= 1)
1907  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1908  .addReg(Op0, getKillRegState(Op0IsKill))
1909  .addImm(Imm);
1910  else {
1912  .addReg(Op0, getKillRegState(Op0IsKill))
1913  .addImm(Imm);
1915  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1916  }
1917  return ResultReg;
1918 }
1919 
1920 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1921  const TargetRegisterClass *RC, unsigned Op0,
1922  bool Op0IsKill, uint64_t Imm1,
1923  uint64_t Imm2) {
1924  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1925 
1926  unsigned ResultReg = createResultReg(RC);
1927  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1928 
1929  if (II.getNumDefs() >= 1)
1930  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1931  .addReg(Op0, getKillRegState(Op0IsKill))
1932  .addImm(Imm1)
1933  .addImm(Imm2);
1934  else {
1936  .addReg(Op0, getKillRegState(Op0IsKill))
1937  .addImm(Imm1)
1938  .addImm(Imm2);
1940  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1941  }
1942  return ResultReg;
1943 }
1944 
1945 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1946  const TargetRegisterClass *RC,
1947  const ConstantFP *FPImm) {
1948  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1949 
1950  unsigned ResultReg = createResultReg(RC);
1951 
1952  if (II.getNumDefs() >= 1)
1953  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1954  .addFPImm(FPImm);
1955  else {
1957  .addFPImm(FPImm);
1959  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1960  }
1961  return ResultReg;
1962 }
1963 
1964 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1965  const TargetRegisterClass *RC, unsigned Op0,
1966  bool Op0IsKill, unsigned Op1,
1967  bool Op1IsKill, uint64_t Imm) {
1968  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1969 
1970  unsigned ResultReg = createResultReg(RC);
1971  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1972  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1973 
1974  if (II.getNumDefs() >= 1)
1975  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1976  .addReg(Op0, getKillRegState(Op0IsKill))
1977  .addReg(Op1, getKillRegState(Op1IsKill))
1978  .addImm(Imm);
1979  else {
1981  .addReg(Op0, getKillRegState(Op0IsKill))
1982  .addReg(Op1, getKillRegState(Op1IsKill))
1983  .addImm(Imm);
1985  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1986  }
1987  return ResultReg;
1988 }
1989 
1990 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1991  const TargetRegisterClass *RC, uint64_t Imm) {
1992  unsigned ResultReg = createResultReg(RC);
1993  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1994 
1995  if (II.getNumDefs() >= 1)
1996  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1997  .addImm(Imm);
1998  else {
2001  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2002  }
2003  return ResultReg;
2004 }
2005 
2006 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2007  bool Op0IsKill, uint32_t Idx) {
2008  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2010  "Cannot yet extract from physregs");
2011  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2013  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2014  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2015  return ResultReg;
2016 }
2017 
2018 /// Emit MachineInstrs to compute the value of Op with all but the least
2019 /// significant bit set to zero.
2020 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2021  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2022 }
2023 
2024 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2025 /// Emit code to ensure constants are copied into registers when needed.
2026 /// Remember the virtual registers that need to be added to the Machine PHI
2027 /// nodes as input. We cannot just directly add them, because expansion
2028 /// might result in multiple MBB's for one BB. As such, the start of the
2029 /// BB might correspond to a different MBB than the end.
2030 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2031  const TerminatorInst *TI = LLVMBB->getTerminator();
2032 
2035 
2036  // Check successor nodes' PHI nodes that expect a constant to be available
2037  // from this block.
2038  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2039  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2040  if (!isa<PHINode>(SuccBB->begin()))
2041  continue;
2042  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2043 
2044  // If this terminator has multiple identical successors (common for
2045  // switches), only handle each succ once.
2046  if (!SuccsHandled.insert(SuccMBB).second)
2047  continue;
2048 
2049  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2050 
2051  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2052  // nodes and Machine PHI nodes, but the incoming operands have not been
2053  // emitted yet.
2054  for (BasicBlock::const_iterator I = SuccBB->begin();
2055  const auto *PN = dyn_cast<PHINode>(I); ++I) {
2056 
2057  // Ignore dead phi's.
2058  if (PN->use_empty())
2059  continue;
2060 
2061  // Only handle legal types. Two interesting things to note here. First,
2062  // by bailing out early, we may leave behind some dead instructions,
2063  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2064  // own moves. Second, this check is necessary because FastISel doesn't
2065  // use CreateRegs to create registers, so it always creates
2066  // exactly one register for each non-void instruction.
2067  EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2068  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2069  // Handle integer promotions, though, because they're common and easy.
2070  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2072  return false;
2073  }
2074  }
2075 
2076  const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2077 
2078  // Set the DebugLoc for the copy. Prefer the location of the operand
2079  // if there is one; use the location of the PHI otherwise.
2080  DbgLoc = PN->getDebugLoc();
2081  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2082  DbgLoc = Inst->getDebugLoc();
2083 
2084  unsigned Reg = getRegForValue(PHIOp);
2085  if (!Reg) {
2087  return false;
2088  }
2089  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2090  DbgLoc = DebugLoc();
2091  }
2092  }
2093 
2094  return true;
2095 }
2096 
2097 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2098  assert(LI->hasOneUse() &&
2099  "tryToFoldLoad expected a LoadInst with a single use");
2100  // We know that the load has a single use, but don't know what it is. If it
2101  // isn't one of the folded instructions, then we can't succeed here. Handle
2102  // this by scanning the single-use users of the load until we get to FoldInst.
2103  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2104 
2105  const Instruction *TheUser = LI->user_back();
2106  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2107  // Stay in the right block.
2108  TheUser->getParent() == FoldInst->getParent() &&
2109  --MaxUsers) { // Don't scan too far.
2110  // If there are multiple or no uses of this instruction, then bail out.
2111  if (!TheUser->hasOneUse())
2112  return false;
2113 
2114  TheUser = TheUser->user_back();
2115  }
2116 
2117  // If we didn't find the fold instruction, then we failed to collapse the
2118  // sequence.
2119  if (TheUser != FoldInst)
2120  return false;
2121 
2122  // Don't try to fold volatile loads. Target has to deal with alignment
2123  // constraints.
2124  if (LI->isVolatile())
2125  return false;
2126 
2127  // Figure out which vreg this is going into. If there is no assigned vreg yet
2128  // then there actually was no reference to it. Perhaps the load is referenced
2129  // by a dead instruction.
2130  unsigned LoadReg = getRegForValue(LI);
2131  if (!LoadReg)
2132  return false;
2133 
2134  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2135  // may mean that the instruction got lowered to multiple MIs, or the use of
2136  // the loaded value ended up being multiple operands of the result.
2137  if (!MRI.hasOneUse(LoadReg))
2138  return false;
2139 
2141  MachineInstr *User = RI->getParent();
2142 
2143  // Set the insertion point properly. Folding the load can cause generation of
2144  // other random instructions (like sign extends) for addressing modes; make
2145  // sure they get inserted in a logical place before the new instruction.
2146  FuncInfo.InsertPt = User;
2147  FuncInfo.MBB = User->getParent();
2148 
2149  // Ask the target to try folding the load.
2150  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2151 }
2152 
2154  // Must be an add.
2155  if (!isa<AddOperator>(Add))
2156  return false;
2157  // Type size needs to match.
2158  if (DL.getTypeSizeInBits(GEP->getType()) !=
2159  DL.getTypeSizeInBits(Add->getType()))
2160  return false;
2161  // Must be in the same basic block.
2162  if (isa<Instruction>(Add) &&
2163  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2164  return false;
2165  // Must have a constant operand.
2166  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2167 }
2168 
2171  const Value *Ptr;
2172  Type *ValTy;
2173  unsigned Alignment;
2175  bool IsVolatile;
2176 
2177  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2178  Alignment = LI->getAlignment();
2179  IsVolatile = LI->isVolatile();
2180  Flags = MachineMemOperand::MOLoad;
2181  Ptr = LI->getPointerOperand();
2182  ValTy = LI->getType();
2183  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2184  Alignment = SI->getAlignment();
2185  IsVolatile = SI->isVolatile();
2187  Ptr = SI->getPointerOperand();
2188  ValTy = SI->getValueOperand()->getType();
2189  } else
2190  return nullptr;
2191 
2192  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2193  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2194  bool IsDereferenceable =
2196  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2197 
2198  AAMDNodes AAInfo;
2199  I->getAAMetadata(AAInfo);
2200 
2201  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2202  Alignment = DL.getABITypeAlignment(ValTy);
2203 
2204  unsigned Size = DL.getTypeStoreSize(ValTy);
2205 
2206  if (IsVolatile)
2208  if (IsNonTemporal)
2210  if (IsDereferenceable)
2212  if (IsInvariant)
2214 
2215  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2216  Alignment, AAInfo, Ranges);
2217 }
2218 
2220  // If both operands are the same, then try to optimize or fold the cmp.
2222  if (CI->getOperand(0) != CI->getOperand(1))
2223  return Predicate;
2224 
2225  switch (Predicate) {
2226  default: llvm_unreachable("Invalid predicate!");
2227  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2228  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2229  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2230  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2231  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2232  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2233  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2234  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2235  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2236  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2237  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2238  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2239  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2240  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2241  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2242  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2243 
2244  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2245  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2246  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2247  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2248  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2249  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2250  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2251  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2252  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2253  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2254  }
2255 
2256  return Predicate;
2257 }
void setHasStackMap(bool s=true)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:545
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1870
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void setByValAlign(unsigned A)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:2020
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool hasLocalLinkage() const
Definition: GlobalValue.h:427
This instruction extracts a struct member or array element value from an aggregate value...
MachineConstantPool & MCP
Definition: FastISel.h:208
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1041
unsigned arg_size() const
Definition: CallSite.h:219
static const Value * getFNegArgument(const Value *BinOp)
bool hasDebugInfo() const
Returns true if valid debug info is present.
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Definition: CallSite.h:312
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:475
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
ImmutableCallSite * CS
Definition: FastISel.h:90
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:497
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:397
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:612
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:298
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
gep_type_iterator gep_type_end(const User *GEP)
unsigned less or equal
Definition: InstrTypes.h:879
unsigned less than
Definition: InstrTypes.h:878
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:859
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2170
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:728
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:869
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1721
BasicBlock * getSuccessor(unsigned i) const
arg_iterator arg_end()
Definition: Function.h:612
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
F(f)
MachineModuleInfo & getMMI() const
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:99
unsigned getCallFrameDestroyOpcode() const
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function&#39;s return value can be lowered to registers.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1734
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:482
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo&#39;s usesVAFloatArgument flag if so.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1715
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1381
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1920
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1069
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:864
MachineFunction * MF
Definition: FastISel.h:205
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:203
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1898
ArrayRef< unsigned > getIndices() const
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:238
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:863
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1964
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2219
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
A description of a memory reference used in the backend.
void setHasPatchPoint(bool s=true)
unsigned getNumArgOperands() const
Return the number of call arguments.
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:70
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Definition: ISDOpcodes.h:379
Class to represent struct types.
Definition: DerivedTypes.h:201
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1990
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2153
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
IterTy arg_end() const
Definition: CallSite.h:575
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1825
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:227
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:860
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
void setByValSize(unsigned S)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetMachine & TM
Definition: FastISel.h:210
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:634
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1126
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1280
unsigned getSizeInBits() const
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Context object for machine code objects.
Definition: MCContext.h:59
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:98
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1799
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:455
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1582
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:285
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:195
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1816
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:163
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void setOrigAlign(unsigned A)
amdgpu Simplify well known AMD library false Value * Callee
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:234
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:85
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Value * getOperand(unsigned i) const
Definition: User.h:154
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getKillRegState(bool B)
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:498
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:308
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:229
MCContext & getContext() const
void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:702
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:150
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
virtual ~FastISel()
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:105
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
Simple binary floating point operators.
Definition: ISDOpcodes.h:259
Conditional or Unconditional Branch instruction.
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:375
Value * getValue() const
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:96
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
size_t size() const
Definition: BasicBlock.h:262
MachineFrameInfo & MFI
Definition: FastISel.h:207
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1723
bool SkipTargetIndependentISel
Definition: FastISel.h:216
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2097
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:136
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:734
const Triple & getTargetTriple() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:862
arg_iterator arg_begin()
Definition: Function.h:603
The memory access is non-temporal.
Class to represent integer types.
Definition: DerivedTypes.h:40
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:863
const TargetRegisterInfo & TRI
Definition: FastISel.h:214
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:870
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
Extended Value Type.
Definition: ValueTypes.h:34
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:558
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1499
This class contains a discriminated union of information about pointers in memory operands...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:868
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:95
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:884
const TargetInstrInfo & TII
Definition: FastISel.h:212
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:472
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
signed greater than
Definition: InstrTypes.h:880
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:222
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:639
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge&#39;s probability, relative to other out-edges of the Src.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:361
The memory access writes data.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:857
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1728
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1743
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
IterTy arg_begin() const
Definition: CallSite.h:571
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:867
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
Provides information about what library functions are available for the current target.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1480
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:215
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:572
signed less than
Definition: InstrTypes.h:882
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
reg_iterator reg_begin(unsigned RegNo) const
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1846
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:319
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:175
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:124
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
signed less or equal
Definition: InstrTypes.h:883
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1313
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1738
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:97
const DataLayout & DL
Definition: FastISel.h:211
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:406
BranchProbabilityInfo * BPI
This file defines the FastISel class.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:445
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
bool isTailCall() const
DebugLoc DbgLoc
Definition: FastISel.h:209
bool selectCall(const User *Call)
Definition: FastISel.cpp:1080
Flags
Flags values. These may be or&#39;d together.
amdgpu Simplify well known AMD library false Value Value * Arg
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
The memory access reads data.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:530
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:388
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:403
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Representation of each machine instruction.
Definition: MachineInstr.h:59
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:927
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1717
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:725
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1542
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:284
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:362
MachineRegisterInfo & MRI
Definition: FastISel.h:206
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:513
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:121
unsigned greater or equal
Definition: InstrTypes.h:877
This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:906
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:220
TargetOptions Options
Definition: TargetMachine.h:96
Establish a view to a call site for examination.
Definition: CallSite.h:713
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The memory access always returns the same value (or traps).
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:478
iterator end()
Definition: DenseMap.h:79
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:861
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isUnconditional() const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:865
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:311
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
Type * getType() const
Return the type of the instruction that generated this call site.
Definition: CallSite.h:264
const TargetLowering & TLI
Definition: FastISel.h:213
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1795
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:183
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1752
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2006
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:312
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
user_iterator user_begin()
Definition: Value.h:377
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1700
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1713
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:856
LLVM Value Representation.
Definition: Value.h:73
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:866
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:386
FunctionType * getFunctionType() const
Definition: CallSite.h:320
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
static const Function * getParent(const Value *V)
#define DEBUG(X)
Definition: Debug.h:118
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable&#39;s name.
Definition: Mangler.cpp:109
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:414
unsigned greater than
Definition: InstrTypes.h:876
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned TrapUnreachable
Emit target-specific trap instruction for &#39;unreachable&#39; IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
unsigned fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:1945
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
Definition: Type.cpp:98
Conversion operators.
Definition: ISDOpcodes.h:442
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:204
const Value * stripInBoundsConstantOffsets() const
Strip off pointer casts and all-constant inbounds GEPs.
Definition: Value.cpp:566
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:451
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:858
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:37
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1460
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
This represents the llvm.dbg.declare instruction.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:561
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
static MachineOperand CreateFI(int Idx)
bool use_empty() const
Definition: Value.h:328
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:537
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getElementType() const
Definition: DerivedTypes.h:486
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:868
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:855
signed greater or equal
Definition: InstrTypes.h:881
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
unsigned InitializeRegForValue(const Value *V)
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:338