LLVM  7.0.0svn
FastISel.cpp
Go to the documentation of this file.
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
53 #include "llvm/CodeGen/Analysis.h"
65 #include "llvm/CodeGen/StackMaps.h"
70 #include "llvm/IR/Argument.h"
71 #include "llvm/IR/Attributes.h"
72 #include "llvm/IR/BasicBlock.h"
73 #include "llvm/IR/CallSite.h"
74 #include "llvm/IR/CallingConv.h"
75 #include "llvm/IR/Constant.h"
76 #include "llvm/IR/Constants.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfo.h"
79 #include "llvm/IR/DebugLoc.h"
80 #include "llvm/IR/DerivedTypes.h"
81 #include "llvm/IR/Function.h"
83 #include "llvm/IR/GlobalValue.h"
84 #include "llvm/IR/InlineAsm.h"
85 #include "llvm/IR/InstrTypes.h"
86 #include "llvm/IR/Instruction.h"
87 #include "llvm/IR/Instructions.h"
88 #include "llvm/IR/IntrinsicInst.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Mangler.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Operator.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/MC/MCContext.h"
97 #include "llvm/MC/MCInstrDesc.h"
98 #include "llvm/MC/MCRegisterInfo.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/Debug.h"
102 #include "llvm/Support/MathExtras.h"
106 #include <algorithm>
107 #include <cassert>
108 #include <cstdint>
109 #include <iterator>
110 #include <utility>
111 
112 using namespace llvm;
113 
114 #define DEBUG_TYPE "isel"
115 
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117  "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119  "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121 
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
126 
127  // Instructions are appended to FuncInfo.MBB. If the basic block already
128  // contains labels or copies, use the last instruction as the last local
129  // value.
130  EmitStartPt = nullptr;
131  if (!FuncInfo.MBB->empty())
134 }
135 
138  // Fallback to SDISel argument lowering code to deal with sret pointer
139  // parameter.
140  return false;
141 
142  if (!fastLowerArguments())
143  return false;
144 
145  // Enter arguments into ValueMap for uses in non-entry BBs.
147  E = FuncInfo.Fn->arg_end();
148  I != E; ++I) {
150  assert(VI != LocalValueMap.end() && "Missed an argument?");
151  FuncInfo.ValueMap[&*I] = VI->second;
152  }
153  return true;
154 }
155 
156 void FastISel::flushLocalValueMap() {
160  SavedInsertPt = FuncInfo.InsertPt;
161 }
162 
164  // Don't consider constants or arguments to have trivial kills.
165  const Instruction *I = dyn_cast<Instruction>(V);
166  if (!I)
167  return false;
168 
169  // No-op casts are trivially coalesced by fast-isel.
170  if (const auto *Cast = dyn_cast<CastInst>(I))
171  if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
172  return false;
173 
174  // Even the value might have only one use in the LLVM IR, it is possible that
175  // FastISel might fold the use into another instruction and now there is more
176  // than one use at the Machine Instruction level.
177  unsigned Reg = lookUpRegForValue(V);
178  if (Reg && !MRI.use_empty(Reg))
179  return false;
180 
181  // GEPs with all zero indices are trivially coalesced by fast-isel.
182  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
183  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
184  return false;
185 
186  // Only instructions with a single use in the same basic block are considered
187  // to have trivial kills.
188  return I->hasOneUse() &&
189  !(I->getOpcode() == Instruction::BitCast ||
190  I->getOpcode() == Instruction::PtrToInt ||
191  I->getOpcode() == Instruction::IntToPtr) &&
192  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
193 }
194 
195 unsigned FastISel::getRegForValue(const Value *V) {
196  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
197  // Don't handle non-simple values in FastISel.
198  if (!RealVT.isSimple())
199  return 0;
200 
201  // Ignore illegal types. We must do this before looking up the value
202  // in ValueMap because Arguments are given virtual registers regardless
203  // of whether FastISel can handle them.
204  MVT VT = RealVT.getSimpleVT();
205  if (!TLI.isTypeLegal(VT)) {
206  // Handle integer promotions, though, because they're common and easy.
207  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
208  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
209  else
210  return 0;
211  }
212 
213  // Look up the value to see if we already have a register for it.
214  unsigned Reg = lookUpRegForValue(V);
215  if (Reg)
216  return Reg;
217 
218  // In bottom-up mode, just create the virtual register which will be used
219  // to hold the value. It will be materialized later.
220  if (isa<Instruction>(V) &&
221  (!isa<AllocaInst>(V) ||
222  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
224 
225  SavePoint SaveInsertPt = enterLocalValueArea();
226 
227  // Materialize the value in a register. Emit any instructions in the
228  // local value area.
229  Reg = materializeRegForValue(V, VT);
230 
231  leaveLocalValueArea(SaveInsertPt);
232 
233  return Reg;
234 }
235 
236 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
237  unsigned Reg = 0;
238  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
239  if (CI->getValue().getActiveBits() <= 64)
240  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
241  } else if (isa<AllocaInst>(V))
242  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
243  else if (isa<ConstantPointerNull>(V))
244  // Translate this as an integer zero so that it can be
245  // local-CSE'd with actual integer zeros.
246  Reg = getRegForValue(
248  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
249  if (CF->isNullValue())
250  Reg = fastMaterializeFloatZero(CF);
251  else
252  // Try to emit the constant directly.
253  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
254 
255  if (!Reg) {
256  // Try to emit the constant by using an integer constant with a cast.
257  const APFloat &Flt = CF->getValueAPF();
258  EVT IntVT = TLI.getPointerTy(DL);
259  uint32_t IntBitWidth = IntVT.getSizeInBits();
260  APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
261  bool isExact;
262  (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
263  if (isExact) {
264  unsigned IntegerReg =
266  if (IntegerReg != 0)
267  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
268  /*Kill=*/false);
269  }
270  }
271  } else if (const auto *Op = dyn_cast<Operator>(V)) {
272  if (!selectOperator(Op, Op->getOpcode()))
273  if (!isa<Instruction>(Op) ||
274  !fastSelectInstruction(cast<Instruction>(Op)))
275  return 0;
276  Reg = lookUpRegForValue(Op);
277  } else if (isa<UndefValue>(V)) {
280  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
281  }
282  return Reg;
283 }
284 
285 /// Helper for getRegForValue. This function is called when the value isn't
286 /// already available in a register and must be materialized with new
287 /// instructions.
288 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
289  unsigned Reg = 0;
290  // Give the target-specific code a try first.
291  if (isa<Constant>(V))
292  Reg = fastMaterializeConstant(cast<Constant>(V));
293 
294  // If target-specific code couldn't or didn't want to handle the value, then
295  // give target-independent code a try.
296  if (!Reg)
297  Reg = materializeConstant(V, VT);
298 
299  // Don't cache constant materializations in the general ValueMap.
300  // To do so would require tracking what uses they dominate.
301  if (Reg) {
302  LocalValueMap[V] = Reg;
304  }
305  return Reg;
306 }
307 
308 unsigned FastISel::lookUpRegForValue(const Value *V) {
309  // Look up the value to see if we already have a register for it. We
310  // cache values defined by Instructions across blocks, and other values
311  // only locally. This is because Instructions already have the SSA
312  // def-dominates-use requirement enforced.
314  if (I != FuncInfo.ValueMap.end())
315  return I->second;
316  return LocalValueMap[V];
317 }
318 
319 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
320  if (!isa<Instruction>(I)) {
321  LocalValueMap[I] = Reg;
322  return;
323  }
324 
325  unsigned &AssignedReg = FuncInfo.ValueMap[I];
326  if (AssignedReg == 0)
327  // Use the new register.
328  AssignedReg = Reg;
329  else if (Reg != AssignedReg) {
330  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
331  for (unsigned i = 0; i < NumRegs; i++)
332  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
333 
334  AssignedReg = Reg;
335  }
336 }
337 
338 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
339  unsigned IdxN = getRegForValue(Idx);
340  if (IdxN == 0)
341  // Unhandled operand. Halt "fast" selection and bail.
342  return std::pair<unsigned, bool>(0, false);
343 
344  bool IdxNIsKill = hasTrivialKill(Idx);
345 
346  // If the index is smaller or larger than intptr_t, truncate or extend it.
347  MVT PtrVT = TLI.getPointerTy(DL);
348  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
349  if (IdxVT.bitsLT(PtrVT)) {
350  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
351  IdxNIsKill);
352  IdxNIsKill = true;
353  } else if (IdxVT.bitsGT(PtrVT)) {
354  IdxN =
355  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
356  IdxNIsKill = true;
357  }
358  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
359 }
360 
362  if (getLastLocalValue()) {
364  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
365  ++FuncInfo.InsertPt;
366  } else
368 
369  // Now skip past any EH_LABELs, which must remain at the beginning.
370  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
371  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
372  ++FuncInfo.InsertPt;
373 }
374 
377  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
378  "Invalid iterator!");
379  while (I != E) {
380  MachineInstr *Dead = &*I;
381  ++I;
382  Dead->eraseFromParent();
383  ++NumFastIselDead;
384  }
386 }
387 
390  DebugLoc OldDL = DbgLoc;
392  DbgLoc = DebugLoc();
393  SavePoint SP = {OldInsertPt, OldDL};
394  return SP;
395 }
396 
398  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
399  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
400 
401  // Restore the previous insert position.
402  FuncInfo.InsertPt = OldInsertPt.InsertPt;
403  DbgLoc = OldInsertPt.DL;
404 }
405 
406 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
407  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
408  if (VT == MVT::Other || !VT.isSimple())
409  // Unhandled type. Halt "fast" selection and bail.
410  return false;
411 
412  // We only handle legal types. For example, on x86-32 the instruction
413  // selector contains all of the 64-bit instructions from x86-64,
414  // under the assumption that i64 won't be used if the target doesn't
415  // support it.
416  if (!TLI.isTypeLegal(VT)) {
417  // MVT::i1 is special. Allow AND, OR, or XOR because they
418  // don't require additional zeroing, which makes them easy.
419  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
420  ISDOpcode == ISD::XOR))
421  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
422  else
423  return false;
424  }
425 
426  // Check if the first operand is a constant, and handle it as "ri". At -O0,
427  // we don't have anything that canonicalizes operand order.
428  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
429  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
430  unsigned Op1 = getRegForValue(I->getOperand(1));
431  if (!Op1)
432  return false;
433  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
434 
435  unsigned ResultReg =
436  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
437  CI->getZExtValue(), VT.getSimpleVT());
438  if (!ResultReg)
439  return false;
440 
441  // We successfully emitted code for the given LLVM Instruction.
442  updateValueMap(I, ResultReg);
443  return true;
444  }
445 
446  unsigned Op0 = getRegForValue(I->getOperand(0));
447  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
448  return false;
449  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
450 
451  // Check if the second operand is a constant and handle it appropriately.
452  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
453  uint64_t Imm = CI->getSExtValue();
454 
455  // Transform "sdiv exact X, 8" -> "sra X, 3".
456  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
457  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
458  Imm = Log2_64(Imm);
459  ISDOpcode = ISD::SRA;
460  }
461 
462  // Transform "urem x, pow2" -> "and x, pow2-1".
463  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
464  isPowerOf2_64(Imm)) {
465  --Imm;
466  ISDOpcode = ISD::AND;
467  }
468 
469  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
470  Op0IsKill, Imm, VT.getSimpleVT());
471  if (!ResultReg)
472  return false;
473 
474  // We successfully emitted code for the given LLVM Instruction.
475  updateValueMap(I, ResultReg);
476  return true;
477  }
478 
479  unsigned Op1 = getRegForValue(I->getOperand(1));
480  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
481  return false;
482  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
483 
484  // Now we have both operands in registers. Emit the instruction.
485  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
486  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
487  if (!ResultReg)
488  // Target-specific code wasn't able to find a machine opcode for
489  // the given ISD opcode and type. Halt "fast" selection and bail.
490  return false;
491 
492  // We successfully emitted code for the given LLVM Instruction.
493  updateValueMap(I, ResultReg);
494  return true;
495 }
496 
498  unsigned N = getRegForValue(I->getOperand(0));
499  if (!N) // Unhandled operand. Halt "fast" selection and bail.
500  return false;
501  bool NIsKill = hasTrivialKill(I->getOperand(0));
502 
503  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
504  // into a single N = N + TotalOffset.
505  uint64_t TotalOffs = 0;
506  // FIXME: What's a good SWAG number for MaxOffs?
507  uint64_t MaxOffs = 2048;
508  MVT VT = TLI.getPointerTy(DL);
509  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
510  GTI != E; ++GTI) {
511  const Value *Idx = GTI.getOperand();
512  if (StructType *StTy = GTI.getStructTypeOrNull()) {
513  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
514  if (Field) {
515  // N = N + Offset
516  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
517  if (TotalOffs >= MaxOffs) {
518  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
519  if (!N) // Unhandled operand. Halt "fast" selection and bail.
520  return false;
521  NIsKill = true;
522  TotalOffs = 0;
523  }
524  }
525  } else {
526  Type *Ty = GTI.getIndexedType();
527 
528  // If this is a constant subscript, handle it quickly.
529  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
530  if (CI->isZero())
531  continue;
532  // N = N + Offset
533  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
534  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
535  if (TotalOffs >= MaxOffs) {
536  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
537  if (!N) // Unhandled operand. Halt "fast" selection and bail.
538  return false;
539  NIsKill = true;
540  TotalOffs = 0;
541  }
542  continue;
543  }
544  if (TotalOffs) {
545  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
546  if (!N) // Unhandled operand. Halt "fast" selection and bail.
547  return false;
548  NIsKill = true;
549  TotalOffs = 0;
550  }
551 
552  // N = N + Idx * ElementSize;
553  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
554  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
555  unsigned IdxN = Pair.first;
556  bool IdxNIsKill = Pair.second;
557  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
558  return false;
559 
560  if (ElementSize != 1) {
561  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
562  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
563  return false;
564  IdxNIsKill = true;
565  }
566  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
567  if (!N) // Unhandled operand. Halt "fast" selection and bail.
568  return false;
569  }
570  }
571  if (TotalOffs) {
572  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
573  if (!N) // Unhandled operand. Halt "fast" selection and bail.
574  return false;
575  }
576 
577  // We successfully emitted code for the given LLVM Instruction.
578  updateValueMap(I, N);
579  return true;
580 }
581 
582 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
583  const CallInst *CI, unsigned StartIdx) {
584  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
585  Value *Val = CI->getArgOperand(i);
586  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
587  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
588  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
589  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
590  } else if (isa<ConstantPointerNull>(Val)) {
591  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
593  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
594  // Values coming from a stack location also require a special encoding,
595  // but that is added later on by the target specific frame index
596  // elimination implementation.
597  auto SI = FuncInfo.StaticAllocaMap.find(AI);
598  if (SI != FuncInfo.StaticAllocaMap.end())
599  Ops.push_back(MachineOperand::CreateFI(SI->second));
600  else
601  return false;
602  } else {
603  unsigned Reg = getRegForValue(Val);
604  if (!Reg)
605  return false;
606  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
607  }
608  }
609  return true;
610 }
611 
613  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
614  // [live variables...])
616  "Stackmap cannot return a value.");
617 
618  // The stackmap intrinsic only records the live variables (the arguments
619  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
620  // intrinsic, this won't be lowered to a function call. This means we don't
621  // have to worry about calling conventions and target-specific lowering code.
622  // Instead we perform the call lowering right here.
623  //
624  // CALLSEQ_START(0, 0...)
625  // STACKMAP(id, nbytes, ...)
626  // CALLSEQ_END(0, 0)
627  //
629 
630  // Add the <id> and <numBytes> constants.
631  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
632  "Expected a constant integer.");
633  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
634  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
635 
636  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
637  "Expected a constant integer.");
638  const auto *NumBytes =
639  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
640  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
641 
642  // Push live variables for the stack map (skipping the first two arguments
643  // <id> and <numBytes>).
644  if (!addStackMapLiveVars(Ops, I, 2))
645  return false;
646 
647  // We are not adding any register mask info here, because the stackmap doesn't
648  // clobber anything.
649 
650  // Add scratch registers as implicit def and early clobber.
652  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
653  for (unsigned i = 0; ScratchRegs[i]; ++i)
654  Ops.push_back(MachineOperand::CreateReg(
655  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
656  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
657 
658  // Issue CALLSEQ_START
659  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
660  auto Builder =
661  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
662  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
663  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
664  Builder.addImm(0);
665 
666  // Issue STACKMAP.
668  TII.get(TargetOpcode::STACKMAP));
669  for (auto const &MO : Ops)
670  MIB.add(MO);
671 
672  // Issue CALLSEQ_END
673  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
674  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
675  .addImm(0)
676  .addImm(0);
677 
678  // Inform the Frame Information that we have a stackmap in this function.
680 
681  return true;
682 }
683 
684 /// \brief Lower an argument list according to the target calling convention.
685 ///
686 /// This is a helper for lowering intrinsics that follow a target calling
687 /// convention or require stack pointer adjustment. Only a subset of the
688 /// intrinsic's operands need to participate in the calling convention.
689 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
690  unsigned NumArgs, const Value *Callee,
691  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
692  ArgListTy Args;
693  Args.reserve(NumArgs);
694 
695  // Populate the argument list.
696  ImmutableCallSite CS(CI);
697  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
698  Value *V = CI->getOperand(ArgI);
699 
700  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
701 
702  ArgListEntry Entry;
703  Entry.Val = V;
704  Entry.Ty = V->getType();
705  Entry.setAttributes(&CS, ArgIdx);
706  Args.push_back(Entry);
707  }
708 
709  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
710  : CI->getType();
711  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
712 
713  return lowerCallTo(CLI);
714 }
715 
717  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
718  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
719  SmallString<32> MangledName;
720  Mangler::getNameWithPrefix(MangledName, Target, DL);
721  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
722  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
723 }
724 
726  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
727  // i32 <numBytes>,
728  // i8* <target>,
729  // i32 <numArgs>,
730  // [Args...],
731  // [live variables...])
733  bool IsAnyRegCC = CC == CallingConv::AnyReg;
734  bool HasDef = !I->getType()->isVoidTy();
736 
737  // Get the real number of arguments participating in the call <numArgs>
738  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
739  "Expected a constant integer.");
740  const auto *NumArgsVal =
741  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
742  unsigned NumArgs = NumArgsVal->getZExtValue();
743 
744  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
745  // This includes all meta-operands up to but not including CC.
746  unsigned NumMetaOpers = PatchPointOpers::CCPos;
747  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
748  "Not enough arguments provided to the patchpoint intrinsic");
749 
750  // For AnyRegCC the arguments are lowered later on manually.
751  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
752  CallLoweringInfo CLI;
753  CLI.setIsPatchPoint();
754  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
755  return false;
756 
757  assert(CLI.Call && "No call instruction specified.");
758 
760 
761  // Add an explicit result reg if we use the anyreg calling convention.
762  if (IsAnyRegCC && HasDef) {
763  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
765  CLI.NumResultRegs = 1;
766  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
767  }
768 
769  // Add the <id> and <numBytes> constants.
770  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
771  "Expected a constant integer.");
772  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
773  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
774 
775  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
776  "Expected a constant integer.");
777  const auto *NumBytes =
778  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
779  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
780 
781  // Add the call target.
782  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
783  uint64_t CalleeConstAddr =
784  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
785  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
786  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
787  if (C->getOpcode() == Instruction::IntToPtr) {
788  uint64_t CalleeConstAddr =
789  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
790  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
791  } else
792  llvm_unreachable("Unsupported ConstantExpr.");
793  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
795  } else if (isa<ConstantPointerNull>(Callee))
797  else
798  llvm_unreachable("Unsupported callee address.");
799 
800  // Adjust <numArgs> to account for any arguments that have been passed on
801  // the stack instead.
802  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
803  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
804 
805  // Add the calling convention
806  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
807 
808  // Add the arguments we omitted previously. The register allocator should
809  // place these in any free register.
810  if (IsAnyRegCC) {
811  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
812  unsigned Reg = getRegForValue(I->getArgOperand(i));
813  if (!Reg)
814  return false;
815  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
816  }
817  }
818 
819  // Push the arguments from the call instruction.
820  for (auto Reg : CLI.OutRegs)
821  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
822 
823  // Push live variables for the stack map.
824  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
825  return false;
826 
827  // Push the register mask info.
830 
831  // Add scratch registers as implicit def and early clobber.
832  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
833  for (unsigned i = 0; ScratchRegs[i]; ++i)
835  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
836  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
837 
838  // Add implicit defs (return values).
839  for (auto Reg : CLI.InRegs)
840  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
841  /*IsImpl=*/true));
842 
843  // Insert the patchpoint instruction before the call generated by the target.
845  TII.get(TargetOpcode::PATCHPOINT));
846 
847  for (auto &MO : Ops)
848  MIB.add(MO);
849 
850  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
851 
852  // Delete the original call instruction.
853  CLI.Call->eraseFromParent();
854 
855  // Inform the Frame Information that we have a patchpoint in this function.
857 
858  if (CLI.NumResultRegs)
860  return true;
861 }
862 
864  const auto &Triple = TM.getTargetTriple();
866  return true; // don't do anything to this instruction.
869  /*IsDef=*/false));
871  /*IsDef=*/false));
872  MachineInstrBuilder MIB =
874  TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
875  for (auto &MO : Ops)
876  MIB.add(MO);
877 
878  // Insert the Patchable Event Call instruction, that gets lowered properly.
879  return true;
880 }
881 
882 
883 /// Returns an AttributeList representing the attributes applied to the return
884 /// value of the given call.
887  if (CLI.RetSExt)
888  Attrs.push_back(Attribute::SExt);
889  if (CLI.RetZExt)
890  Attrs.push_back(Attribute::ZExt);
891  if (CLI.IsInReg)
892  Attrs.push_back(Attribute::InReg);
893 
895  Attrs);
896 }
897 
898 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
899  unsigned NumArgs) {
900  MCContext &Ctx = MF->getContext();
901  SmallString<32> MangledName;
902  Mangler::getNameWithPrefix(MangledName, SymName, DL);
903  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
904  return lowerCallTo(CI, Sym, NumArgs);
905 }
906 
908  unsigned NumArgs) {
909  ImmutableCallSite CS(CI);
910 
911  FunctionType *FTy = CS.getFunctionType();
912  Type *RetTy = CS.getType();
913 
914  ArgListTy Args;
915  Args.reserve(NumArgs);
916 
917  // Populate the argument list.
918  // Attributes for args start at offset 1, after the return attribute.
919  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
920  Value *V = CI->getOperand(ArgI);
921 
922  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
923 
924  ArgListEntry Entry;
925  Entry.Val = V;
926  Entry.Ty = V->getType();
927  Entry.setAttributes(&CS, ArgI);
928  Args.push_back(Entry);
929  }
931 
932  CallLoweringInfo CLI;
933  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
934 
935  return lowerCallTo(CLI);
936 }
937 
939  // Handle the incoming return values from the call.
940  CLI.clearIns();
941  SmallVector<EVT, 4> RetTys;
942  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
943 
945  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
946 
947  bool CanLowerReturn = TLI.CanLowerReturn(
948  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
949 
950  // FIXME: sret demotion isn't supported yet - bail out.
951  if (!CanLowerReturn)
952  return false;
953 
954  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
955  EVT VT = RetTys[I];
956  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
957  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
958  for (unsigned i = 0; i != NumRegs; ++i) {
959  ISD::InputArg MyFlags;
960  MyFlags.VT = RegisterVT;
961  MyFlags.ArgVT = VT;
962  MyFlags.Used = CLI.IsReturnValueUsed;
963  if (CLI.RetSExt)
964  MyFlags.Flags.setSExt();
965  if (CLI.RetZExt)
966  MyFlags.Flags.setZExt();
967  if (CLI.IsInReg)
968  MyFlags.Flags.setInReg();
969  CLI.Ins.push_back(MyFlags);
970  }
971  }
972 
973  // Handle all of the outgoing arguments.
974  CLI.clearOuts();
975  for (auto &Arg : CLI.getArgs()) {
976  Type *FinalType = Arg.Ty;
977  if (Arg.IsByVal)
978  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
979  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
980  FinalType, CLI.CallConv, CLI.IsVarArg);
981 
982  ISD::ArgFlagsTy Flags;
983  if (Arg.IsZExt)
984  Flags.setZExt();
985  if (Arg.IsSExt)
986  Flags.setSExt();
987  if (Arg.IsInReg)
988  Flags.setInReg();
989  if (Arg.IsSRet)
990  Flags.setSRet();
991  if (Arg.IsSwiftSelf)
992  Flags.setSwiftSelf();
993  if (Arg.IsSwiftError)
994  Flags.setSwiftError();
995  if (Arg.IsByVal)
996  Flags.setByVal();
997  if (Arg.IsInAlloca) {
998  Flags.setInAlloca();
999  // Set the byval flag for CCAssignFn callbacks that don't know about
1000  // inalloca. This way we can know how many bytes we should've allocated
1001  // and how many bytes a callee cleanup function will pop. If we port
1002  // inalloca to more targets, we'll have to add custom inalloca handling in
1003  // the various CC lowering callbacks.
1004  Flags.setByVal();
1005  }
1006  if (Arg.IsByVal || Arg.IsInAlloca) {
1007  PointerType *Ty = cast<PointerType>(Arg.Ty);
1008  Type *ElementTy = Ty->getElementType();
1009  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1010  // For ByVal, alignment should come from FE. BE will guess if this info is
1011  // not there, but there are cases it cannot get right.
1012  unsigned FrameAlign = Arg.Alignment;
1013  if (!FrameAlign)
1014  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1015  Flags.setByValSize(FrameSize);
1016  Flags.setByValAlign(FrameAlign);
1017  }
1018  if (Arg.IsNest)
1019  Flags.setNest();
1020  if (NeedsRegBlock)
1021  Flags.setInConsecutiveRegs();
1022  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1023  Flags.setOrigAlign(OriginalAlignment);
1024 
1025  CLI.OutVals.push_back(Arg.Val);
1026  CLI.OutFlags.push_back(Flags);
1027  }
1028 
1029  if (!fastLowerCall(CLI))
1030  return false;
1031 
1032  // Set all unused physreg defs as dead.
1033  assert(CLI.Call && "No call instruction specified.");
1034  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1035 
1036  if (CLI.NumResultRegs && CLI.CS)
1038 
1039  return true;
1040 }
1041 
1043  ImmutableCallSite CS(CI);
1044 
1045  FunctionType *FuncTy = CS.getFunctionType();
1046  Type *RetTy = CS.getType();
1047 
1048  ArgListTy Args;
1049  ArgListEntry Entry;
1050  Args.reserve(CS.arg_size());
1051 
1052  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1053  i != e; ++i) {
1054  Value *V = *i;
1055 
1056  // Skip empty types
1057  if (V->getType()->isEmptyTy())
1058  continue;
1059 
1060  Entry.Val = V;
1061  Entry.Ty = V->getType();
1062 
1063  // Skip the first return-type Attribute to get to params.
1064  Entry.setAttributes(&CS, i - CS.arg_begin());
1065  Args.push_back(Entry);
1066  }
1067 
1068  // Check if target-independent constraints permit a tail call here.
1069  // Target-dependent constraints are checked within fastLowerCall.
1070  bool IsTailCall = CI->isTailCall();
1071  if (IsTailCall && !isInTailCallPosition(CS, TM))
1072  IsTailCall = false;
1073 
1074  CallLoweringInfo CLI;
1075  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1076  .setTailCall(IsTailCall);
1077 
1078  return lowerCallTo(CLI);
1079 }
1080 
1082  const CallInst *Call = cast<CallInst>(I);
1083 
1084  // Handle simple inline asms.
1085  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1086  // If the inline asm has side effects, then make sure that no local value
1087  // lives across by flushing the local value map.
1088  if (IA->hasSideEffects())
1089  flushLocalValueMap();
1090 
1091  // Don't attempt to handle constraints.
1092  if (!IA->getConstraintString().empty())
1093  return false;
1094 
1095  unsigned ExtraInfo = 0;
1096  if (IA->hasSideEffects())
1097  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1098  if (IA->isAlignStack())
1099  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1100 
1103  .addExternalSymbol(IA->getAsmString().c_str())
1104  .addImm(ExtraInfo);
1105  return true;
1106  }
1107 
1108  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1109  computeUsesVAFloatArgument(*Call, MMI);
1110 
1111  // Handle intrinsic function calls.
1112  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1113  return selectIntrinsicCall(II);
1114 
1115  // Usually, it does not make sense to initialize a value,
1116  // make an unrelated function call and use the value, because
1117  // it tends to be spilled on the stack. So, we move the pointer
1118  // to the last local value to the beginning of the block, so that
1119  // all the values which have already been materialized,
1120  // appear after the call. It also makes sense to skip intrinsics
1121  // since they tend to be inlined.
1122  flushLocalValueMap();
1123 
1124  return lowerCall(Call);
1125 }
1126 
1128  switch (II->getIntrinsicID()) {
1129  default:
1130  break;
1131  // At -O0 we don't care about the lifetime intrinsics.
1132  case Intrinsic::lifetime_start:
1133  case Intrinsic::lifetime_end:
1134  // The donothing intrinsic does, well, nothing.
1135  case Intrinsic::donothing:
1136  // Neither does the sideeffect intrinsic.
1137  case Intrinsic::sideeffect:
1138  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1139  case Intrinsic::assume:
1140  return true;
1141  case Intrinsic::dbg_declare: {
1142  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1143  assert(DI->getVariable() && "Missing variable");
1144  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1145  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1146  return true;
1147  }
1148 
1149  const Value *Address = DI->getAddress();
1150  if (!Address || isa<UndefValue>(Address)) {
1151  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1152  return true;
1153  }
1154 
1155  // Byval arguments with frame indices were already handled after argument
1156  // lowering and before isel.
1157  const auto *Arg =
1159  if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1160  return true;
1161 
1163  if (unsigned Reg = lookUpRegForValue(Address))
1164  Op = MachineOperand::CreateReg(Reg, false);
1165 
1166  // If we have a VLA that has a "use" in a metadata node that's then used
1167  // here but it has no other uses, then we have a problem. E.g.,
1168  //
1169  // int foo (const int *x) {
1170  // char a[*x];
1171  // return 0;
1172  // }
1173  //
1174  // If we assign 'a' a vreg and fast isel later on has to use the selection
1175  // DAG isel, it will want to copy the value to the vreg. However, there are
1176  // no uses, which goes counter to what selection DAG isel expects.
1177  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1178  (!isa<AllocaInst>(Address) ||
1179  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1181  false);
1182 
1183  if (Op) {
1185  "Expected inlined-at fields to agree");
1186  if (Op->isReg()) {
1187  Op->setIsDebug(true);
1188  // A dbg.declare describes the address of a source variable, so lower it
1189  // into an indirect DBG_VALUE.
1191  TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1192  Op->getReg(), DI->getVariable(), DI->getExpression());
1193  } else
1195  TII.get(TargetOpcode::DBG_VALUE))
1196  .add(*Op)
1197  .addImm(0)
1198  .addMetadata(DI->getVariable())
1199  .addMetadata(DI->getExpression());
1200  } else {
1201  // We can't yet handle anything else here because it would require
1202  // generating code, thus altering codegen because of debug info.
1203  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1204  }
1205  return true;
1206  }
1207  case Intrinsic::dbg_value: {
1208  // This form of DBG_VALUE is target-independent.
1209  const DbgValueInst *DI = cast<DbgValueInst>(II);
1210  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1211  const Value *V = DI->getValue();
1213  "Expected inlined-at fields to agree");
1214  if (!V) {
1215  // Currently the optimizer can produce this; insert an undef to
1216  // help debugging. Probably the optimizer should not do this.
1217  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1218  DI->getVariable(), DI->getExpression());
1219  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1220  if (CI->getBitWidth() > 64)
1222  .addCImm(CI)
1223  .addImm(0U)
1224  .addMetadata(DI->getVariable())
1225  .addMetadata(DI->getExpression());
1226  else
1228  .addImm(CI->getZExtValue())
1229  .addImm(0U)
1230  .addMetadata(DI->getVariable())
1231  .addMetadata(DI->getExpression());
1232  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1234  .addFPImm(CF)
1235  .addImm(0U)
1236  .addMetadata(DI->getVariable())
1237  .addMetadata(DI->getExpression());
1238  } else if (unsigned Reg = getRegForValue(V)) {
1239  // FIXME: This does not handle register-indirect values at offset 0.
1240  bool IsIndirect = false;
1241  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1242  DI->getVariable(), DI->getExpression());
1243  } else {
1244  // We can't yet handle anything else here because it would require
1245  // generating code, thus altering codegen because of debug info.
1246  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1247  }
1248  return true;
1249  }
1250  case Intrinsic::objectsize: {
1251  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1252  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1253  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1254  unsigned ResultReg = getRegForValue(ResCI);
1255  if (!ResultReg)
1256  return false;
1257  updateValueMap(II, ResultReg);
1258  return true;
1259  }
1260  case Intrinsic::invariant_group_barrier:
1261  case Intrinsic::expect: {
1262  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1263  if (!ResultReg)
1264  return false;
1265  updateValueMap(II, ResultReg);
1266  return true;
1267  }
1268  case Intrinsic::experimental_stackmap:
1269  return selectStackmap(II);
1270  case Intrinsic::experimental_patchpoint_void:
1271  case Intrinsic::experimental_patchpoint_i64:
1272  return selectPatchpoint(II);
1273 
1274  case Intrinsic::xray_customevent:
1275  return selectXRayCustomEvent(II);
1276  }
1277 
1278  return fastLowerIntrinsicCall(II);
1279 }
1280 
1281 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1282  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1283  EVT DstVT = TLI.getValueType(DL, I->getType());
1284 
1285  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1286  !DstVT.isSimple())
1287  // Unhandled type. Halt "fast" selection and bail.
1288  return false;
1289 
1290  // Check if the destination type is legal.
1291  if (!TLI.isTypeLegal(DstVT))
1292  return false;
1293 
1294  // Check if the source operand is legal.
1295  if (!TLI.isTypeLegal(SrcVT))
1296  return false;
1297 
1298  unsigned InputReg = getRegForValue(I->getOperand(0));
1299  if (!InputReg)
1300  // Unhandled operand. Halt "fast" selection and bail.
1301  return false;
1302 
1303  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1304 
1305  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1306  Opcode, InputReg, InputRegIsKill);
1307  if (!ResultReg)
1308  return false;
1309 
1310  updateValueMap(I, ResultReg);
1311  return true;
1312 }
1313 
1315  // If the bitcast doesn't change the type, just use the operand value.
1316  if (I->getType() == I->getOperand(0)->getType()) {
1317  unsigned Reg = getRegForValue(I->getOperand(0));
1318  if (!Reg)
1319  return false;
1320  updateValueMap(I, Reg);
1321  return true;
1322  }
1323 
1324  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1325  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1326  EVT DstEVT = TLI.getValueType(DL, I->getType());
1327  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1328  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1329  // Unhandled type. Halt "fast" selection and bail.
1330  return false;
1331 
1332  MVT SrcVT = SrcEVT.getSimpleVT();
1333  MVT DstVT = DstEVT.getSimpleVT();
1334  unsigned Op0 = getRegForValue(I->getOperand(0));
1335  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1336  return false;
1337  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1338 
1339  // First, try to perform the bitcast by inserting a reg-reg copy.
1340  unsigned ResultReg = 0;
1341  if (SrcVT == DstVT) {
1342  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1343  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1344  // Don't attempt a cross-class copy. It will likely fail.
1345  if (SrcClass == DstClass) {
1346  ResultReg = createResultReg(DstClass);
1348  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1349  }
1350  }
1351 
1352  // If the reg-reg copy failed, select a BITCAST opcode.
1353  if (!ResultReg)
1354  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1355 
1356  if (!ResultReg)
1357  return false;
1358 
1359  updateValueMap(I, ResultReg);
1360  return true;
1361 }
1362 
1363 // Remove local value instructions starting from the instruction after
1364 // SavedLastLocalValue to the current function insert point.
1365 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1366 {
1367  MachineInstr *CurLastLocalValue = getLastLocalValue();
1368  if (CurLastLocalValue != SavedLastLocalValue) {
1369  // Find the first local value instruction to be deleted.
1370  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1371  // Otherwise it's the first instruction in the block.
1372  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1373  if (SavedLastLocalValue)
1374  ++FirstDeadInst;
1375  else
1376  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1377  setLastLocalValue(SavedLastLocalValue);
1378  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1379  }
1380 }
1381 
1383  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1384  // Just before the terminator instruction, insert instructions to
1385  // feed PHI nodes in successor blocks.
1386  if (isa<TerminatorInst>(I)) {
1387  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1388  // PHI node handling may have generated local value instructions,
1389  // even though it failed to handle all PHI nodes.
1390  // We remove these instructions because SelectionDAGISel will generate
1391  // them again.
1392  removeDeadLocalValueCode(SavedLastLocalValue);
1393  return false;
1394  }
1395  }
1396 
1397  // FastISel does not handle any operand bundles except OB_funclet.
1399  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1400  if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1401  return false;
1402 
1403  DbgLoc = I->getDebugLoc();
1404 
1405  SavedInsertPt = FuncInfo.InsertPt;
1406 
1407  if (const auto *Call = dyn_cast<CallInst>(I)) {
1408  const Function *F = Call->getCalledFunction();
1409  LibFunc Func;
1410 
1411  // As a special case, don't handle calls to builtin library functions that
1412  // may be translated directly to target instructions.
1413  if (F && !F->hasLocalLinkage() && F->hasName() &&
1414  LibInfo->getLibFunc(F->getName(), Func) &&
1416  return false;
1417 
1418  // Don't handle Intrinsic::trap if a trap function is specified.
1419  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1420  Call->hasFnAttr("trap-func-name"))
1421  return false;
1422  }
1423 
1424  // First, try doing target-independent selection.
1426  if (selectOperator(I, I->getOpcode())) {
1427  ++NumFastIselSuccessIndependent;
1428  DbgLoc = DebugLoc();
1429  return true;
1430  }
1431  // Remove dead code.
1433  if (SavedInsertPt != FuncInfo.InsertPt)
1434  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1435  SavedInsertPt = FuncInfo.InsertPt;
1436  }
1437  // Next, try calling the target to attempt to handle the instruction.
1438  if (fastSelectInstruction(I)) {
1439  ++NumFastIselSuccessTarget;
1440  DbgLoc = DebugLoc();
1441  return true;
1442  }
1443  // Remove dead code.
1445  if (SavedInsertPt != FuncInfo.InsertPt)
1446  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1447 
1448  DbgLoc = DebugLoc();
1449  // Undo phi node updates, because they will be added again by SelectionDAG.
1450  if (isa<TerminatorInst>(I)) {
1451  // PHI node handling may have generated local value instructions.
1452  // We remove them because SelectionDAGISel will generate them again.
1453  removeDeadLocalValueCode(SavedLastLocalValue);
1455  }
1456  return false;
1457 }
1458 
1459 /// Emit an unconditional branch to the given block, unless it is the immediate
1460 /// (fall-through) successor, and update the CFG.
1462  const DebugLoc &DbgLoc) {
1463  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1464  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1465  // For more accurate line information if this is the only instruction
1466  // in the block then emit it, otherwise we have the unconditional
1467  // fall-through case, which needs no instructions.
1468  } else {
1469  // The unconditional branch case.
1470  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1471  SmallVector<MachineOperand, 0>(), DbgLoc);
1472  }
1473  if (FuncInfo.BPI) {
1475  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1477  } else
1479 }
1480 
1482  MachineBasicBlock *TrueMBB,
1483  MachineBasicBlock *FalseMBB) {
1484  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1485  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1486  // successor/predecessor lists.
1487  if (TrueMBB != FalseMBB) {
1488  if (FuncInfo.BPI) {
1489  auto BranchProbability =
1490  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1492  } else
1494  }
1495 
1496  fastEmitBranch(FalseMBB, DbgLoc);
1497 }
1498 
1499 /// Emit an FNeg operation.
1501  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1502  if (!OpReg)
1503  return false;
1504  bool OpRegIsKill = hasTrivialKill(I);
1505 
1506  // If the target has ISD::FNEG, use it.
1507  EVT VT = TLI.getValueType(DL, I->getType());
1508  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1509  OpReg, OpRegIsKill);
1510  if (ResultReg) {
1511  updateValueMap(I, ResultReg);
1512  return true;
1513  }
1514 
1515  // Bitcast the value to integer, twiddle the sign bit with xor,
1516  // and then bitcast it back to floating-point.
1517  if (VT.getSizeInBits() > 64)
1518  return false;
1519  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1520  if (!TLI.isTypeLegal(IntVT))
1521  return false;
1522 
1523  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1524  ISD::BITCAST, OpReg, OpRegIsKill);
1525  if (!IntReg)
1526  return false;
1527 
1528  unsigned IntResultReg = fastEmit_ri_(
1529  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1530  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1531  if (!IntResultReg)
1532  return false;
1533 
1534  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1535  IntResultReg, /*IsKill=*/true);
1536  if (!ResultReg)
1537  return false;
1538 
1539  updateValueMap(I, ResultReg);
1540  return true;
1541 }
1542 
1544  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1545  if (!EVI)
1546  return false;
1547 
1548  // Make sure we only try to handle extracts with a legal result. But also
1549  // allow i1 because it's easy.
1550  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1551  if (!RealVT.isSimple())
1552  return false;
1553  MVT VT = RealVT.getSimpleVT();
1554  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1555  return false;
1556 
1557  const Value *Op0 = EVI->getOperand(0);
1558  Type *AggTy = Op0->getType();
1559 
1560  // Get the base result register.
1561  unsigned ResultReg;
1563  if (I != FuncInfo.ValueMap.end())
1564  ResultReg = I->second;
1565  else if (isa<Instruction>(Op0))
1566  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1567  else
1568  return false; // fast-isel can't handle aggregate constants at the moment
1569 
1570  // Get the actual result register, which is an offset from the base register.
1571  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1572 
1573  SmallVector<EVT, 4> AggValueVTs;
1574  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1575 
1576  for (unsigned i = 0; i < VTIndex; i++)
1577  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1578 
1579  updateValueMap(EVI, ResultReg);
1580  return true;
1581 }
1582 
1583 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1584  switch (Opcode) {
1585  case Instruction::Add:
1586  return selectBinaryOp(I, ISD::ADD);
1587  case Instruction::FAdd:
1588  return selectBinaryOp(I, ISD::FADD);
1589  case Instruction::Sub:
1590  return selectBinaryOp(I, ISD::SUB);
1591  case Instruction::FSub:
1592  // FNeg is currently represented in LLVM IR as a special case of FSub.
1593  if (BinaryOperator::isFNeg(I))
1594  return selectFNeg(I);
1595  return selectBinaryOp(I, ISD::FSUB);
1596  case Instruction::Mul:
1597  return selectBinaryOp(I, ISD::MUL);
1598  case Instruction::FMul:
1599  return selectBinaryOp(I, ISD::FMUL);
1600  case Instruction::SDiv:
1601  return selectBinaryOp(I, ISD::SDIV);
1602  case Instruction::UDiv:
1603  return selectBinaryOp(I, ISD::UDIV);
1604  case Instruction::FDiv:
1605  return selectBinaryOp(I, ISD::FDIV);
1606  case Instruction::SRem:
1607  return selectBinaryOp(I, ISD::SREM);
1608  case Instruction::URem:
1609  return selectBinaryOp(I, ISD::UREM);
1610  case Instruction::FRem:
1611  return selectBinaryOp(I, ISD::FREM);
1612  case Instruction::Shl:
1613  return selectBinaryOp(I, ISD::SHL);
1614  case Instruction::LShr:
1615  return selectBinaryOp(I, ISD::SRL);
1616  case Instruction::AShr:
1617  return selectBinaryOp(I, ISD::SRA);
1618  case Instruction::And:
1619  return selectBinaryOp(I, ISD::AND);
1620  case Instruction::Or:
1621  return selectBinaryOp(I, ISD::OR);
1622  case Instruction::Xor:
1623  return selectBinaryOp(I, ISD::XOR);
1624 
1625  case Instruction::GetElementPtr:
1626  return selectGetElementPtr(I);
1627 
1628  case Instruction::Br: {
1629  const BranchInst *BI = cast<BranchInst>(I);
1630 
1631  if (BI->isUnconditional()) {
1632  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1633  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1634  fastEmitBranch(MSucc, BI->getDebugLoc());
1635  return true;
1636  }
1637 
1638  // Conditional branches are not handed yet.
1639  // Halt "fast" selection and bail.
1640  return false;
1641  }
1642 
1643  case Instruction::Unreachable:
1645  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1646  else
1647  return true;
1648 
1649  case Instruction::Alloca:
1650  // FunctionLowering has the static-sized case covered.
1651  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1652  return true;
1653 
1654  // Dynamic-sized alloca is not handled yet.
1655  return false;
1656 
1657  case Instruction::Call:
1658  return selectCall(I);
1659 
1660  case Instruction::BitCast:
1661  return selectBitCast(I);
1662 
1663  case Instruction::FPToSI:
1664  return selectCast(I, ISD::FP_TO_SINT);
1665  case Instruction::ZExt:
1666  return selectCast(I, ISD::ZERO_EXTEND);
1667  case Instruction::SExt:
1668  return selectCast(I, ISD::SIGN_EXTEND);
1669  case Instruction::Trunc:
1670  return selectCast(I, ISD::TRUNCATE);
1671  case Instruction::SIToFP:
1672  return selectCast(I, ISD::SINT_TO_FP);
1673 
1674  case Instruction::IntToPtr: // Deliberate fall-through.
1675  case Instruction::PtrToInt: {
1676  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1677  EVT DstVT = TLI.getValueType(DL, I->getType());
1678  if (DstVT.bitsGT(SrcVT))
1679  return selectCast(I, ISD::ZERO_EXTEND);
1680  if (DstVT.bitsLT(SrcVT))
1681  return selectCast(I, ISD::TRUNCATE);
1682  unsigned Reg = getRegForValue(I->getOperand(0));
1683  if (!Reg)
1684  return false;
1685  updateValueMap(I, Reg);
1686  return true;
1687  }
1688 
1689  case Instruction::ExtractValue:
1690  return selectExtractValue(I);
1691 
1692  case Instruction::PHI:
1693  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1694 
1695  default:
1696  // Unhandled instruction. Halt "fast" selection and bail.
1697  return false;
1698  }
1699 }
1700 
1702  const TargetLibraryInfo *LibInfo,
1704  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1705  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1706  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1707  TII(*MF->getSubtarget().getInstrInfo()),
1708  TLI(*MF->getSubtarget().getTargetLowering()),
1709  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1710  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1711 
1712 FastISel::~FastISel() = default;
1713 
1714 bool FastISel::fastLowerArguments() { return false; }
1715 
1716 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1717 
1719  return false;
1720 }
1721 
1722 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1723 
1724 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1725  bool /*Op0IsKill*/) {
1726  return 0;
1727 }
1728 
1729 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1730  bool /*Op0IsKill*/, unsigned /*Op1*/,
1731  bool /*Op1IsKill*/) {
1732  return 0;
1733 }
1734 
1735 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1736  return 0;
1737 }
1738 
1739 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1740  const ConstantFP * /*FPImm*/) {
1741  return 0;
1742 }
1743 
1744 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1745  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1746  return 0;
1747 }
1748 
1749 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1750 /// instruction with an immediate operand using fastEmit_ri.
1751 /// If that fails, it materializes the immediate into a register and try
1752 /// fastEmit_rr instead.
1753 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1754  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1755  // If this is a multiply by a power of two, emit this as a shift left.
1756  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1757  Opcode = ISD::SHL;
1758  Imm = Log2_64(Imm);
1759  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1760  // div x, 8 -> srl x, 3
1761  Opcode = ISD::SRL;
1762  Imm = Log2_64(Imm);
1763  }
1764 
1765  // Horrible hack (to be removed), check to make sure shift amounts are
1766  // in-range.
1767  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1768  Imm >= VT.getSizeInBits())
1769  return 0;
1770 
1771  // First check if immediate type is legal. If not, we can't use the ri form.
1772  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1773  if (ResultReg)
1774  return ResultReg;
1775  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1776  bool IsImmKill = true;
1777  if (!MaterialReg) {
1778  // This is a bit ugly/slow, but failing here means falling out of
1779  // fast-isel, which would be very slow.
1780  IntegerType *ITy =
1782  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1783  if (!MaterialReg)
1784  return 0;
1785  // FIXME: If the materialized register here has no uses yet then this
1786  // will be the first use and we should be able to mark it as killed.
1787  // However, the local value area for materialising constant expressions
1788  // grows down, not up, which means that any constant expressions we generate
1789  // later which also use 'Imm' could be after this instruction and therefore
1790  // after this kill.
1791  IsImmKill = false;
1792  }
1793  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1794 }
1795 
1797  return MRI.createVirtualRegister(RC);
1798 }
1799 
1801  unsigned OpNum) {
1803  const TargetRegisterClass *RegClass =
1804  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1805  if (!MRI.constrainRegClass(Op, RegClass)) {
1806  // If it's not legal to COPY between the register classes, something
1807  // has gone very wrong before we got here.
1808  unsigned NewOp = createResultReg(RegClass);
1810  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1811  return NewOp;
1812  }
1813  }
1814  return Op;
1815 }
1816 
1817 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1818  const TargetRegisterClass *RC) {
1819  unsigned ResultReg = createResultReg(RC);
1820  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1821 
1822  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1823  return ResultReg;
1824 }
1825 
1826 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1827  const TargetRegisterClass *RC, unsigned Op0,
1828  bool Op0IsKill) {
1829  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1830 
1831  unsigned ResultReg = createResultReg(RC);
1832  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1833 
1834  if (II.getNumDefs() >= 1)
1835  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1836  .addReg(Op0, getKillRegState(Op0IsKill));
1837  else {
1839  .addReg(Op0, getKillRegState(Op0IsKill));
1841  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1842  }
1843 
1844  return ResultReg;
1845 }
1846 
1847 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1848  const TargetRegisterClass *RC, unsigned Op0,
1849  bool Op0IsKill, unsigned Op1,
1850  bool Op1IsKill) {
1851  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1852 
1853  unsigned ResultReg = createResultReg(RC);
1854  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1855  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1856 
1857  if (II.getNumDefs() >= 1)
1858  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1859  .addReg(Op0, getKillRegState(Op0IsKill))
1860  .addReg(Op1, getKillRegState(Op1IsKill));
1861  else {
1863  .addReg(Op0, getKillRegState(Op0IsKill))
1864  .addReg(Op1, getKillRegState(Op1IsKill));
1866  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1867  }
1868  return ResultReg;
1869 }
1870 
1871 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1872  const TargetRegisterClass *RC, unsigned Op0,
1873  bool Op0IsKill, unsigned Op1,
1874  bool Op1IsKill, unsigned Op2,
1875  bool Op2IsKill) {
1876  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1877 
1878  unsigned ResultReg = createResultReg(RC);
1879  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1880  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1881  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1882 
1883  if (II.getNumDefs() >= 1)
1884  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1885  .addReg(Op0, getKillRegState(Op0IsKill))
1886  .addReg(Op1, getKillRegState(Op1IsKill))
1887  .addReg(Op2, getKillRegState(Op2IsKill));
1888  else {
1890  .addReg(Op0, getKillRegState(Op0IsKill))
1891  .addReg(Op1, getKillRegState(Op1IsKill))
1892  .addReg(Op2, getKillRegState(Op2IsKill));
1894  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1895  }
1896  return ResultReg;
1897 }
1898 
1899 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1900  const TargetRegisterClass *RC, unsigned Op0,
1901  bool Op0IsKill, uint64_t Imm) {
1902  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1903 
1904  unsigned ResultReg = createResultReg(RC);
1905  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1906 
1907  if (II.getNumDefs() >= 1)
1908  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1909  .addReg(Op0, getKillRegState(Op0IsKill))
1910  .addImm(Imm);
1911  else {
1913  .addReg(Op0, getKillRegState(Op0IsKill))
1914  .addImm(Imm);
1916  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1917  }
1918  return ResultReg;
1919 }
1920 
1921 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1922  const TargetRegisterClass *RC, unsigned Op0,
1923  bool Op0IsKill, uint64_t Imm1,
1924  uint64_t Imm2) {
1925  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1926 
1927  unsigned ResultReg = createResultReg(RC);
1928  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1929 
1930  if (II.getNumDefs() >= 1)
1931  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1932  .addReg(Op0, getKillRegState(Op0IsKill))
1933  .addImm(Imm1)
1934  .addImm(Imm2);
1935  else {
1937  .addReg(Op0, getKillRegState(Op0IsKill))
1938  .addImm(Imm1)
1939  .addImm(Imm2);
1941  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1942  }
1943  return ResultReg;
1944 }
1945 
1946 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1947  const TargetRegisterClass *RC,
1948  const ConstantFP *FPImm) {
1949  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1950 
1951  unsigned ResultReg = createResultReg(RC);
1952 
1953  if (II.getNumDefs() >= 1)
1954  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1955  .addFPImm(FPImm);
1956  else {
1958  .addFPImm(FPImm);
1960  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1961  }
1962  return ResultReg;
1963 }
1964 
1965 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1966  const TargetRegisterClass *RC, unsigned Op0,
1967  bool Op0IsKill, unsigned Op1,
1968  bool Op1IsKill, uint64_t Imm) {
1969  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1970 
1971  unsigned ResultReg = createResultReg(RC);
1972  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1973  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1974 
1975  if (II.getNumDefs() >= 1)
1976  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1977  .addReg(Op0, getKillRegState(Op0IsKill))
1978  .addReg(Op1, getKillRegState(Op1IsKill))
1979  .addImm(Imm);
1980  else {
1982  .addReg(Op0, getKillRegState(Op0IsKill))
1983  .addReg(Op1, getKillRegState(Op1IsKill))
1984  .addImm(Imm);
1986  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1987  }
1988  return ResultReg;
1989 }
1990 
1991 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1992  const TargetRegisterClass *RC, uint64_t Imm) {
1993  unsigned ResultReg = createResultReg(RC);
1994  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1995 
1996  if (II.getNumDefs() >= 1)
1997  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1998  .addImm(Imm);
1999  else {
2002  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2003  }
2004  return ResultReg;
2005 }
2006 
2007 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2008  bool Op0IsKill, uint32_t Idx) {
2009  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2011  "Cannot yet extract from physregs");
2012  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2014  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2015  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2016  return ResultReg;
2017 }
2018 
2019 /// Emit MachineInstrs to compute the value of Op with all but the least
2020 /// significant bit set to zero.
2021 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2022  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2023 }
2024 
2025 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2026 /// Emit code to ensure constants are copied into registers when needed.
2027 /// Remember the virtual registers that need to be added to the Machine PHI
2028 /// nodes as input. We cannot just directly add them, because expansion
2029 /// might result in multiple MBB's for one BB. As such, the start of the
2030 /// BB might correspond to a different MBB than the end.
2031 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2032  const TerminatorInst *TI = LLVMBB->getTerminator();
2033 
2036 
2037  // Check successor nodes' PHI nodes that expect a constant to be available
2038  // from this block.
2039  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2040  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2041  if (!isa<PHINode>(SuccBB->begin()))
2042  continue;
2043  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2044 
2045  // If this terminator has multiple identical successors (common for
2046  // switches), only handle each succ once.
2047  if (!SuccsHandled.insert(SuccMBB).second)
2048  continue;
2049 
2050  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2051 
2052  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2053  // nodes and Machine PHI nodes, but the incoming operands have not been
2054  // emitted yet.
2055  for (const PHINode &PN : SuccBB->phis()) {
2056  // Ignore dead phi's.
2057  if (PN.use_empty())
2058  continue;
2059 
2060  // Only handle legal types. Two interesting things to note here. First,
2061  // by bailing out early, we may leave behind some dead instructions,
2062  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2063  // own moves. Second, this check is necessary because FastISel doesn't
2064  // use CreateRegs to create registers, so it always creates
2065  // exactly one register for each non-void instruction.
2066  EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2067  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2068  // Handle integer promotions, though, because they're common and easy.
2069  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2071  return false;
2072  }
2073  }
2074 
2075  const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2076 
2077  // Set the DebugLoc for the copy. Prefer the location of the operand
2078  // if there is one; use the location of the PHI otherwise.
2079  DbgLoc = PN.getDebugLoc();
2080  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2081  DbgLoc = Inst->getDebugLoc();
2082 
2083  unsigned Reg = getRegForValue(PHIOp);
2084  if (!Reg) {
2086  return false;
2087  }
2088  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2089  DbgLoc = DebugLoc();
2090  }
2091  }
2092 
2093  return true;
2094 }
2095 
2096 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2097  assert(LI->hasOneUse() &&
2098  "tryToFoldLoad expected a LoadInst with a single use");
2099  // We know that the load has a single use, but don't know what it is. If it
2100  // isn't one of the folded instructions, then we can't succeed here. Handle
2101  // this by scanning the single-use users of the load until we get to FoldInst.
2102  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2103 
2104  const Instruction *TheUser = LI->user_back();
2105  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2106  // Stay in the right block.
2107  TheUser->getParent() == FoldInst->getParent() &&
2108  --MaxUsers) { // Don't scan too far.
2109  // If there are multiple or no uses of this instruction, then bail out.
2110  if (!TheUser->hasOneUse())
2111  return false;
2112 
2113  TheUser = TheUser->user_back();
2114  }
2115 
2116  // If we didn't find the fold instruction, then we failed to collapse the
2117  // sequence.
2118  if (TheUser != FoldInst)
2119  return false;
2120 
2121  // Don't try to fold volatile loads. Target has to deal with alignment
2122  // constraints.
2123  if (LI->isVolatile())
2124  return false;
2125 
2126  // Figure out which vreg this is going into. If there is no assigned vreg yet
2127  // then there actually was no reference to it. Perhaps the load is referenced
2128  // by a dead instruction.
2129  unsigned LoadReg = getRegForValue(LI);
2130  if (!LoadReg)
2131  return false;
2132 
2133  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2134  // may mean that the instruction got lowered to multiple MIs, or the use of
2135  // the loaded value ended up being multiple operands of the result.
2136  if (!MRI.hasOneUse(LoadReg))
2137  return false;
2138 
2140  MachineInstr *User = RI->getParent();
2141 
2142  // Set the insertion point properly. Folding the load can cause generation of
2143  // other random instructions (like sign extends) for addressing modes; make
2144  // sure they get inserted in a logical place before the new instruction.
2145  FuncInfo.InsertPt = User;
2146  FuncInfo.MBB = User->getParent();
2147 
2148  // Ask the target to try folding the load.
2149  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2150 }
2151 
2153  // Must be an add.
2154  if (!isa<AddOperator>(Add))
2155  return false;
2156  // Type size needs to match.
2157  if (DL.getTypeSizeInBits(GEP->getType()) !=
2158  DL.getTypeSizeInBits(Add->getType()))
2159  return false;
2160  // Must be in the same basic block.
2161  if (isa<Instruction>(Add) &&
2162  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2163  return false;
2164  // Must have a constant operand.
2165  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2166 }
2167 
2170  const Value *Ptr;
2171  Type *ValTy;
2172  unsigned Alignment;
2174  bool IsVolatile;
2175 
2176  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2177  Alignment = LI->getAlignment();
2178  IsVolatile = LI->isVolatile();
2179  Flags = MachineMemOperand::MOLoad;
2180  Ptr = LI->getPointerOperand();
2181  ValTy = LI->getType();
2182  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2183  Alignment = SI->getAlignment();
2184  IsVolatile = SI->isVolatile();
2186  Ptr = SI->getPointerOperand();
2187  ValTy = SI->getValueOperand()->getType();
2188  } else
2189  return nullptr;
2190 
2191  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2192  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2193  bool IsDereferenceable =
2195  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2196 
2197  AAMDNodes AAInfo;
2198  I->getAAMetadata(AAInfo);
2199 
2200  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2201  Alignment = DL.getABITypeAlignment(ValTy);
2202 
2203  unsigned Size = DL.getTypeStoreSize(ValTy);
2204 
2205  if (IsVolatile)
2207  if (IsNonTemporal)
2209  if (IsDereferenceable)
2211  if (IsInvariant)
2213 
2214  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2215  Alignment, AAInfo, Ranges);
2216 }
2217 
2219  // If both operands are the same, then try to optimize or fold the cmp.
2221  if (CI->getOperand(0) != CI->getOperand(1))
2222  return Predicate;
2223 
2224  switch (Predicate) {
2225  default: llvm_unreachable("Invalid predicate!");
2226  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2227  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2228  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2229  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2230  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2231  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2232  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2233  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2234  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2235  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2236  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2237  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2238  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2239  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2240  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2241  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2242 
2243  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2244  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2245  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2246  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2247  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2248  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2249  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2250  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2251  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2252  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2253  }
2254 
2255  return Predicate;
2256 }
void setHasStackMap(bool s=true)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:546
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1871
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void setByValAlign(unsigned A)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:2021
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool hasLocalLinkage() const
Definition: GlobalValue.h:435
This instruction extracts a struct member or array element value from an aggregate value...
MachineConstantPool & MCP
Definition: FastISel.h:208
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1042
unsigned arg_size() const
Definition: CallSite.h:219
static const Value * getFNegArgument(const Value *BinOp)
bool hasDebugInfo() const
Returns true if valid debug info is present.
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Definition: CallSite.h:312
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:475
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
ImmutableCallSite * CS
Definition: FastISel.h:90
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:497
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:397
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:577
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:612
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:298
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
gep_type_iterator gep_type_end(const User *GEP)
unsigned less or equal
Definition: InstrTypes.h:879
unsigned less than
Definition: InstrTypes.h:878
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:859
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2169
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:738
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:869
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1722
BasicBlock * getSuccessor(unsigned i) const
arg_iterator arg_end()
Definition: Function.h:658
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
F(f)
MachineModuleInfo & getMMI() const
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:99
unsigned getCallFrameDestroyOpcode() const
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function&#39;s return value can be lowered to registers.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1735
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:482
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo&#39;s usesVAFloatArgument flag if so.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1716
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:232
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1382
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1921
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1069
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:864
MachineFunction * MF
Definition: FastISel.h:205
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:203
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1899
ArrayRef< unsigned > getIndices() const
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:238
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:863
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1965
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2218
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
A description of a memory reference used in the backend.
void setHasPatchPoint(bool s=true)
unsigned getNumArgOperands() const
Return the number of call arguments.
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:70
Shift and rotation operations.
Definition: ISDOpcodes.h:380
Class to represent struct types.
Definition: DerivedTypes.h:201
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1991
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2152
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
IterTy arg_end() const
Definition: CallSite.h:575
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1826
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:227
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:860
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
void setByValSize(unsigned S)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetMachine & TM
Definition: FastISel.h:210
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:635
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1127
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1281
unsigned getSizeInBits() const
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Context object for machine code objects.
Definition: MCContext.h:61
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:98
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1800
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:456
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1583
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:285
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:195
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1817
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:195
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:126
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:163
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void setOrigAlign(unsigned A)
amdgpu Simplify well known AMD library false Value * Callee
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:234
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:84
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Value * getOperand(unsigned i) const
Definition: User.h:154
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getKillRegState(bool B)
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:499
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:308
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:229
MCContext & getContext() const
void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:733
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:150
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
virtual ~FastISel()
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:105
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
Simple binary floating point operators.
Definition: ISDOpcodes.h:260
Conditional or Unconditional Branch instruction.
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:375
Value * getValue() const
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:96
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:371
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
size_t size() const
Definition: BasicBlock.h:262
MachineFrameInfo & MFI
Definition: FastISel.h:207
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1724
bool SkipTargetIndependentISel
Definition: FastISel.h:216
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2096
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:136
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:853
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:735
const Triple & getTargetTriple() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:862
arg_iterator arg_begin()
Definition: Function.h:649
The memory access is non-temporal.
Class to represent integer types.
Definition: DerivedTypes.h:40
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:863
const TargetRegisterInfo & TRI
Definition: FastISel.h:214
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:870
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
Extended Value Type.
Definition: ValueTypes.h:34
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:567
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1500
This class contains a discriminated union of information about pointers in memory operands...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:868
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:95
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:885
const TargetInstrInfo & TII
Definition: FastISel.h:212
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:471
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
signed greater than
Definition: InstrTypes.h:880
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:222
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:640
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge&#39;s probability, relative to other out-edges of the Src.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:361
The memory access writes data.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:857
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1729
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:418
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1744
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
IterTy arg_begin() const
Definition: CallSite.h:571
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:867
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:64
Provides information about what library functions are available for the current target.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1481
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:215
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:713
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:572
signed less than
Definition: InstrTypes.h:882
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
reg_iterator reg_begin(unsigned RegNo) const
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1847
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:585
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:319
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:175
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:124
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
signed less or equal
Definition: InstrTypes.h:883
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1314
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1739
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:97
const DataLayout & DL
Definition: FastISel.h:211
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:406
BranchProbabilityInfo * BPI
This file defines the FastISel class.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:446
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
bool isTailCall() const
DebugLoc DbgLoc
Definition: FastISel.h:209
bool selectCall(const User *Call)
Definition: FastISel.cpp:1081
Flags
Flags values. These may be or&#39;d together.
amdgpu Simplify well known AMD library false Value Value * Arg
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:142
The memory access reads data.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:550
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:388
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:418
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Representation of each machine instruction.
Definition: MachineInstr.h:60
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:927
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1718
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:725
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1543
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:285
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:363
MachineRegisterInfo & MRI
Definition: FastISel.h:206
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:533
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:121
unsigned greater or equal
Definition: InstrTypes.h:877
This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:907
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
TargetOptions Options
Definition: TargetMachine.h:98
Establish a view to a call site for examination.
Definition: CallSite.h:713
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The memory access always returns the same value (or traps).
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:478
iterator end()
Definition: DenseMap.h:79
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:861
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:308
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isUnconditional() const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:865
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:312
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
Type * getType() const
Return the type of the instruction that generated this call site.
Definition: CallSite.h:264
const TargetLowering & TLI
Definition: FastISel.h:213
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1796
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:183
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1753
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2007
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:312
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
user_iterator user_begin()
Definition: Value.h:381
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1701
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1714
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:856
LLVM Value Representation.
Definition: Value.h:73
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:866
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:401
FunctionType * getFunctionType() const
Definition: CallSite.h:320
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
static const Function * getParent(const Value *V)
#define DEBUG(X)
Definition: Debug.h:118
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable&#39;s name.
Definition: Mangler.cpp:109
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:418
unsigned greater than
Definition: InstrTypes.h:876
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned TrapUnreachable
Emit target-specific trap instruction for &#39;unreachable&#39; IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
unsigned fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:1946
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
Definition: Type.cpp:98
Conversion operators.
Definition: ISDOpcodes.h:443
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:204
const Value * stripInBoundsConstantOffsets() const
Strip off pointer casts and all-constant inbounds GEPs.
Definition: Value.cpp:575
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:452
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:858
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:36
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1461
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
This represents the llvm.dbg.declare instruction.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:562
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
static MachineOperand CreateFI(int Idx)
bool use_empty() const
Definition: Value.h:328
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:537
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getElementType() const
Definition: DerivedTypes.h:486
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:865
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:855
signed greater or equal
Definition: InstrTypes.h:881
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:67
unsigned InitializeRegForValue(const Value *V)
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:338