LLVM  3.7.0
FastISel.cpp
Go to the documentation of this file.
1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/ADT/Optional.h"
44 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Analysis/Loads.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Mangler.h"
63 #include "llvm/IR/Operator.h"
64 #include "llvm/Support/Debug.h"
71 using namespace llvm;
72 
73 #define DEBUG_TYPE "isel"
74 
75 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
76  "target-independent selector");
77 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
78  "target-specific selector");
79 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
80 
82  unsigned AttrIdx) {
83  IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
84  IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
85  IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
87  IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
88  IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
91  Alignment = CS->getParamAlignment(AttrIdx);
92 }
93 
94 /// Set the current block to which generated machine instructions will be
95 /// appended, and clear the local CSE map.
98 
99  // Instructions are appended to FuncInfo.MBB. If the basic block already
100  // contains labels or copies, use the last instruction as the last local
101  // value.
102  EmitStartPt = nullptr;
103  if (!FuncInfo.MBB->empty())
106 }
107 
110  // Fallback to SDISel argument lowering code to deal with sret pointer
111  // parameter.
112  return false;
113 
114  if (!fastLowerArguments())
115  return false;
116 
117  // Enter arguments into ValueMap for uses in non-entry BBs.
119  E = FuncInfo.Fn->arg_end();
120  I != E; ++I) {
122  assert(VI != LocalValueMap.end() && "Missed an argument?");
123  FuncInfo.ValueMap[I] = VI->second;
124  }
125  return true;
126 }
127 
128 void FastISel::flushLocalValueMap() {
132  SavedInsertPt = FuncInfo.InsertPt;
133 }
134 
136  // Don't consider constants or arguments to have trivial kills.
137  const Instruction *I = dyn_cast<Instruction>(V);
138  if (!I)
139  return false;
140 
141  // No-op casts are trivially coalesced by fast-isel.
142  if (const auto *Cast = dyn_cast<CastInst>(I))
143  if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
144  !hasTrivialKill(Cast->getOperand(0)))
145  return false;
146 
147  // Even the value might have only one use in the LLVM IR, it is possible that
148  // FastISel might fold the use into another instruction and now there is more
149  // than one use at the Machine Instruction level.
150  unsigned Reg = lookUpRegForValue(V);
151  if (Reg && !MRI.use_empty(Reg))
152  return false;
153 
154  // GEPs with all zero indices are trivially coalesced by fast-isel.
155  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
156  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
157  return false;
158 
159  // Only instructions with a single use in the same basic block are considered
160  // to have trivial kills.
161  return I->hasOneUse() &&
162  !(I->getOpcode() == Instruction::BitCast ||
163  I->getOpcode() == Instruction::PtrToInt ||
164  I->getOpcode() == Instruction::IntToPtr) &&
165  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
166 }
167 
168 unsigned FastISel::getRegForValue(const Value *V) {
169  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
170  // Don't handle non-simple values in FastISel.
171  if (!RealVT.isSimple())
172  return 0;
173 
174  // Ignore illegal types. We must do this before looking up the value
175  // in ValueMap because Arguments are given virtual registers regardless
176  // of whether FastISel can handle them.
177  MVT VT = RealVT.getSimpleVT();
178  if (!TLI.isTypeLegal(VT)) {
179  // Handle integer promotions, though, because they're common and easy.
180  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
181  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
182  else
183  return 0;
184  }
185 
186  // Look up the value to see if we already have a register for it.
187  unsigned Reg = lookUpRegForValue(V);
188  if (Reg)
189  return Reg;
190 
191  // In bottom-up mode, just create the virtual register which will be used
192  // to hold the value. It will be materialized later.
193  if (isa<Instruction>(V) &&
194  (!isa<AllocaInst>(V) ||
195  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
197 
198  SavePoint SaveInsertPt = enterLocalValueArea();
199 
200  // Materialize the value in a register. Emit any instructions in the
201  // local value area.
202  Reg = materializeRegForValue(V, VT);
203 
204  leaveLocalValueArea(SaveInsertPt);
205 
206  return Reg;
207 }
208 
209 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
210  unsigned Reg = 0;
211  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
212  if (CI->getValue().getActiveBits() <= 64)
213  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
214  } else if (isa<AllocaInst>(V))
215  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
216  else if (isa<ConstantPointerNull>(V))
217  // Translate this as an integer zero so that it can be
218  // local-CSE'd with actual integer zeros.
219  Reg = getRegForValue(
221  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
222  if (CF->isNullValue())
223  Reg = fastMaterializeFloatZero(CF);
224  else
225  // Try to emit the constant directly.
226  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
227 
228  if (!Reg) {
229  // Try to emit the constant by using an integer constant with a cast.
230  const APFloat &Flt = CF->getValueAPF();
231  EVT IntVT = TLI.getPointerTy(DL);
232 
233  uint64_t x[2];
234  uint32_t IntBitWidth = IntVT.getSizeInBits();
235  bool isExact;
236  (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
237  APFloat::rmTowardZero, &isExact);
238  if (isExact) {
239  APInt IntVal(IntBitWidth, x);
240 
241  unsigned IntegerReg =
243  if (IntegerReg != 0)
244  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
245  /*Kill=*/false);
246  }
247  }
248  } else if (const auto *Op = dyn_cast<Operator>(V)) {
249  if (!selectOperator(Op, Op->getOpcode()))
250  if (!isa<Instruction>(Op) ||
251  !fastSelectInstruction(cast<Instruction>(Op)))
252  return 0;
253  Reg = lookUpRegForValue(Op);
254  } else if (isa<UndefValue>(V)) {
258  }
259  return Reg;
260 }
261 
262 /// Helper for getRegForValue. This function is called when the value isn't
263 /// already available in a register and must be materialized with new
264 /// instructions.
265 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
266  unsigned Reg = 0;
267  // Give the target-specific code a try first.
268  if (isa<Constant>(V))
269  Reg = fastMaterializeConstant(cast<Constant>(V));
270 
271  // If target-specific code couldn't or didn't want to handle the value, then
272  // give target-independent code a try.
273  if (!Reg)
274  Reg = materializeConstant(V, VT);
275 
276  // Don't cache constant materializations in the general ValueMap.
277  // To do so would require tracking what uses they dominate.
278  if (Reg) {
279  LocalValueMap[V] = Reg;
281  }
282  return Reg;
283 }
284 
285 unsigned FastISel::lookUpRegForValue(const Value *V) {
286  // Look up the value to see if we already have a register for it. We
287  // cache values defined by Instructions across blocks, and other values
288  // only locally. This is because Instructions already have the SSA
289  // def-dominates-use requirement enforced.
291  if (I != FuncInfo.ValueMap.end())
292  return I->second;
293  return LocalValueMap[V];
294 }
295 
296 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
297  if (!isa<Instruction>(I)) {
298  LocalValueMap[I] = Reg;
299  return;
300  }
301 
302  unsigned &AssignedReg = FuncInfo.ValueMap[I];
303  if (AssignedReg == 0)
304  // Use the new register.
305  AssignedReg = Reg;
306  else if (Reg != AssignedReg) {
307  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
308  for (unsigned i = 0; i < NumRegs; i++)
309  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
310 
311  AssignedReg = Reg;
312  }
313 }
314 
315 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
316  unsigned IdxN = getRegForValue(Idx);
317  if (IdxN == 0)
318  // Unhandled operand. Halt "fast" selection and bail.
319  return std::pair<unsigned, bool>(0, false);
320 
321  bool IdxNIsKill = hasTrivialKill(Idx);
322 
323  // If the index is smaller or larger than intptr_t, truncate or extend it.
324  MVT PtrVT = TLI.getPointerTy(DL);
325  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
326  if (IdxVT.bitsLT(PtrVT)) {
327  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
328  IdxNIsKill);
329  IdxNIsKill = true;
330  } else if (IdxVT.bitsGT(PtrVT)) {
331  IdxN =
332  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
333  IdxNIsKill = true;
334  }
335  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
336 }
337 
339  if (getLastLocalValue()) {
341  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
342  ++FuncInfo.InsertPt;
343  } else
345 
346  // Now skip past any EH_LABELs, which must remain at the beginning.
347  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
348  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
349  ++FuncInfo.InsertPt;
350 }
351 
354  assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
355  while (I != E) {
356  MachineInstr *Dead = &*I;
357  ++I;
358  Dead->eraseFromParent();
359  ++NumFastIselDead;
360  }
362 }
363 
366  DebugLoc OldDL = DbgLoc;
368  DbgLoc = DebugLoc();
369  SavePoint SP = {OldInsertPt, OldDL};
370  return SP;
371 }
372 
374  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
375  LastLocalValue = std::prev(FuncInfo.InsertPt);
376 
377  // Restore the previous insert position.
378  FuncInfo.InsertPt = OldInsertPt.InsertPt;
379  DbgLoc = OldInsertPt.DL;
380 }
381 
382 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
383  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
384  if (VT == MVT::Other || !VT.isSimple())
385  // Unhandled type. Halt "fast" selection and bail.
386  return false;
387 
388  // We only handle legal types. For example, on x86-32 the instruction
389  // selector contains all of the 64-bit instructions from x86-64,
390  // under the assumption that i64 won't be used if the target doesn't
391  // support it.
392  if (!TLI.isTypeLegal(VT)) {
393  // MVT::i1 is special. Allow AND, OR, or XOR because they
394  // don't require additional zeroing, which makes them easy.
395  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
396  ISDOpcode == ISD::XOR))
397  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
398  else
399  return false;
400  }
401 
402  // Check if the first operand is a constant, and handle it as "ri". At -O0,
403  // we don't have anything that canonicalizes operand order.
404  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
405  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
406  unsigned Op1 = getRegForValue(I->getOperand(1));
407  if (!Op1)
408  return false;
409  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
410 
411  unsigned ResultReg =
412  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
413  CI->getZExtValue(), VT.getSimpleVT());
414  if (!ResultReg)
415  return false;
416 
417  // We successfully emitted code for the given LLVM Instruction.
418  updateValueMap(I, ResultReg);
419  return true;
420  }
421 
422  unsigned Op0 = getRegForValue(I->getOperand(0));
423  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
424  return false;
425  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
426 
427  // Check if the second operand is a constant and handle it appropriately.
428  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
429  uint64_t Imm = CI->getSExtValue();
430 
431  // Transform "sdiv exact X, 8" -> "sra X, 3".
432  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
433  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
434  Imm = Log2_64(Imm);
435  ISDOpcode = ISD::SRA;
436  }
437 
438  // Transform "urem x, pow2" -> "and x, pow2-1".
439  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
440  isPowerOf2_64(Imm)) {
441  --Imm;
442  ISDOpcode = ISD::AND;
443  }
444 
445  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
446  Op0IsKill, Imm, VT.getSimpleVT());
447  if (!ResultReg)
448  return false;
449 
450  // We successfully emitted code for the given LLVM Instruction.
451  updateValueMap(I, ResultReg);
452  return true;
453  }
454 
455  // Check if the second operand is a constant float.
456  if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
457  unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
458  ISDOpcode, Op0, Op0IsKill, CF);
459  if (ResultReg) {
460  // We successfully emitted code for the given LLVM Instruction.
461  updateValueMap(I, ResultReg);
462  return true;
463  }
464  }
465 
466  unsigned Op1 = getRegForValue(I->getOperand(1));
467  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
468  return false;
469  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
470 
471  // Now we have both operands in registers. Emit the instruction.
472  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
473  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
474  if (!ResultReg)
475  // Target-specific code wasn't able to find a machine opcode for
476  // the given ISD opcode and type. Halt "fast" selection and bail.
477  return false;
478 
479  // We successfully emitted code for the given LLVM Instruction.
480  updateValueMap(I, ResultReg);
481  return true;
482 }
483 
485  unsigned N = getRegForValue(I->getOperand(0));
486  if (!N) // Unhandled operand. Halt "fast" selection and bail.
487  return false;
488  bool NIsKill = hasTrivialKill(I->getOperand(0));
489 
490  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
491  // into a single N = N + TotalOffset.
492  uint64_t TotalOffs = 0;
493  // FIXME: What's a good SWAG number for MaxOffs?
494  uint64_t MaxOffs = 2048;
495  Type *Ty = I->getOperand(0)->getType();
496  MVT VT = TLI.getPointerTy(DL);
498  E = I->op_end();
499  OI != E; ++OI) {
500  const Value *Idx = *OI;
501  if (auto *StTy = dyn_cast<StructType>(Ty)) {
502  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
503  if (Field) {
504  // N = N + Offset
505  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
506  if (TotalOffs >= MaxOffs) {
507  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
508  if (!N) // Unhandled operand. Halt "fast" selection and bail.
509  return false;
510  NIsKill = true;
511  TotalOffs = 0;
512  }
513  }
514  Ty = StTy->getElementType(Field);
515  } else {
516  Ty = cast<SequentialType>(Ty)->getElementType();
517 
518  // If this is a constant subscript, handle it quickly.
519  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
520  if (CI->isZero())
521  continue;
522  // N = N + Offset
523  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
524  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
525  if (TotalOffs >= MaxOffs) {
526  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
527  if (!N) // Unhandled operand. Halt "fast" selection and bail.
528  return false;
529  NIsKill = true;
530  TotalOffs = 0;
531  }
532  continue;
533  }
534  if (TotalOffs) {
535  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
536  if (!N) // Unhandled operand. Halt "fast" selection and bail.
537  return false;
538  NIsKill = true;
539  TotalOffs = 0;
540  }
541 
542  // N = N + Idx * ElementSize;
543  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
544  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
545  unsigned IdxN = Pair.first;
546  bool IdxNIsKill = Pair.second;
547  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
548  return false;
549 
550  if (ElementSize != 1) {
551  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
552  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
553  return false;
554  IdxNIsKill = true;
555  }
556  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
557  if (!N) // Unhandled operand. Halt "fast" selection and bail.
558  return false;
559  }
560  }
561  if (TotalOffs) {
562  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
563  if (!N) // Unhandled operand. Halt "fast" selection and bail.
564  return false;
565  }
566 
567  // We successfully emitted code for the given LLVM Instruction.
568  updateValueMap(I, N);
569  return true;
570 }
571 
572 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
573  const CallInst *CI, unsigned StartIdx) {
574  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
575  Value *Val = CI->getArgOperand(i);
576  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
577  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
579  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
580  } else if (isa<ConstantPointerNull>(Val)) {
583  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
584  // Values coming from a stack location also require a sepcial encoding,
585  // but that is added later on by the target specific frame index
586  // elimination implementation.
587  auto SI = FuncInfo.StaticAllocaMap.find(AI);
588  if (SI != FuncInfo.StaticAllocaMap.end())
589  Ops.push_back(MachineOperand::CreateFI(SI->second));
590  else
591  return false;
592  } else {
593  unsigned Reg = getRegForValue(Val);
594  if (!Reg)
595  return false;
596  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
597  }
598  }
599  return true;
600 }
601 
603  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
604  // [live variables...])
605  assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
606  "Stackmap cannot return a value.");
607 
608  // The stackmap intrinsic only records the live variables (the arguments
609  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
610  // intrinsic, this won't be lowered to a function call. This means we don't
611  // have to worry about calling conventions and target-specific lowering code.
612  // Instead we perform the call lowering right here.
613  //
614  // CALLSEQ_START(0)
615  // STACKMAP(id, nbytes, ...)
616  // CALLSEQ_END(0, 0)
617  //
619 
620  // Add the <id> and <numBytes> constants.
621  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
622  "Expected a constant integer.");
623  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
624  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
625 
626  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
627  "Expected a constant integer.");
628  const auto *NumBytes =
629  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
630  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
631 
632  // Push live variables for the stack map (skipping the first two arguments
633  // <id> and <numBytes>).
634  if (!addStackMapLiveVars(Ops, I, 2))
635  return false;
636 
637  // We are not adding any register mask info here, because the stackmap doesn't
638  // clobber anything.
639 
640  // Add scratch registers as implicit def and early clobber.
642  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
643  for (unsigned i = 0; ScratchRegs[i]; ++i)
644  Ops.push_back(MachineOperand::CreateReg(
645  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
646  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
647 
648  // Issue CALLSEQ_START
649  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
650  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
651  .addImm(0);
652 
653  // Issue STACKMAP.
656  for (auto const &MO : Ops)
657  MIB.addOperand(MO);
658 
659  // Issue CALLSEQ_END
660  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
661  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
662  .addImm(0)
663  .addImm(0);
664 
665  // Inform the Frame Information that we have a stackmap in this function.
667 
668  return true;
669 }
670 
671 /// \brief Lower an argument list according to the target calling convention.
672 ///
673 /// This is a helper for lowering intrinsics that follow a target calling
674 /// convention or require stack pointer adjustment. Only a subset of the
675 /// intrinsic's operands need to participate in the calling convention.
676 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
677  unsigned NumArgs, const Value *Callee,
678  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
679  ArgListTy Args;
680  Args.reserve(NumArgs);
681 
682  // Populate the argument list.
683  // Attributes for args start at offset 1, after the return attribute.
684  ImmutableCallSite CS(CI);
685  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
686  ArgI != ArgE; ++ArgI) {
687  Value *V = CI->getOperand(ArgI);
688 
689  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
690 
691  ArgListEntry Entry;
692  Entry.Val = V;
693  Entry.Ty = V->getType();
694  Entry.setAttributes(&CS, AttrI);
695  Args.push_back(Entry);
696  }
697 
698  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
699  : CI->getType();
700  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
701 
702  return lowerCallTo(CLI);
703 }
704 
706  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
707  const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
708  SmallString<32> MangledName;
709  Mangler::getNameWithPrefix(MangledName, Target, DL);
710  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
711  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
712 }
713 
715  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
716  // i32 <numBytes>,
717  // i8* <target>,
718  // i32 <numArgs>,
719  // [Args...],
720  // [live variables...])
722  bool IsAnyRegCC = CC == CallingConv::AnyReg;
723  bool HasDef = !I->getType()->isVoidTy();
725 
726  // Get the real number of arguments participating in the call <numArgs>
727  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
728  "Expected a constant integer.");
729  const auto *NumArgsVal =
730  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
731  unsigned NumArgs = NumArgsVal->getZExtValue();
732 
733  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
734  // This includes all meta-operands up to but not including CC.
735  unsigned NumMetaOpers = PatchPointOpers::CCPos;
736  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
737  "Not enough arguments provided to the patchpoint intrinsic");
738 
739  // For AnyRegCC the arguments are lowered later on manually.
740  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
741  CallLoweringInfo CLI;
742  CLI.setIsPatchPoint();
743  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
744  return false;
745 
746  assert(CLI.Call && "No call instruction specified.");
747 
749 
750  // Add an explicit result reg if we use the anyreg calling convention.
751  if (IsAnyRegCC && HasDef) {
752  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
754  CLI.NumResultRegs = 1;
755  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
756  }
757 
758  // Add the <id> and <numBytes> constants.
759  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
760  "Expected a constant integer.");
761  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
762  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
763 
764  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
765  "Expected a constant integer.");
766  const auto *NumBytes =
767  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
768  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
769 
770  // Add the call target.
771  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
772  uint64_t CalleeConstAddr =
773  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
774  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
775  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
776  if (C->getOpcode() == Instruction::IntToPtr) {
777  uint64_t CalleeConstAddr =
778  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
779  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
780  } else
781  llvm_unreachable("Unsupported ConstantExpr.");
782  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
784  } else if (isa<ConstantPointerNull>(Callee))
786  else
787  llvm_unreachable("Unsupported callee address.");
788 
789  // Adjust <numArgs> to account for any arguments that have been passed on
790  // the stack instead.
791  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
792  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
793 
794  // Add the calling convention
795  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
796 
797  // Add the arguments we omitted previously. The register allocator should
798  // place these in any free register.
799  if (IsAnyRegCC) {
800  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
801  unsigned Reg = getRegForValue(I->getArgOperand(i));
802  if (!Reg)
803  return false;
804  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
805  }
806  }
807 
808  // Push the arguments from the call instruction.
809  for (auto Reg : CLI.OutRegs)
810  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
811 
812  // Push live variables for the stack map.
813  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
814  return false;
815 
816  // Push the register mask info.
819 
820  // Add scratch registers as implicit def and early clobber.
821  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
822  for (unsigned i = 0; ScratchRegs[i]; ++i)
824  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
825  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
826 
827  // Add implicit defs (return values).
828  for (auto Reg : CLI.InRegs)
829  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
830  /*IsImpl=*/true));
831 
832  // Insert the patchpoint instruction before the call generated by the target.
835 
836  for (auto &MO : Ops)
837  MIB.addOperand(MO);
838 
839  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
840 
841  // Delete the original call instruction.
842  CLI.Call->eraseFromParent();
843 
844  // Inform the Frame Information that we have a patchpoint in this function.
846 
847  if (CLI.NumResultRegs)
849  return true;
850 }
851 
852 /// Returns an AttributeSet representing the attributes applied to the return
853 /// value of the given call.
856  if (CLI.RetSExt)
857  Attrs.push_back(Attribute::SExt);
858  if (CLI.RetZExt)
859  Attrs.push_back(Attribute::ZExt);
860  if (CLI.IsInReg)
862 
863  return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
864  Attrs);
865 }
866 
867 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
868  unsigned NumArgs) {
869  MCContext &Ctx = MF->getContext();
870  SmallString<32> MangledName;
871  Mangler::getNameWithPrefix(MangledName, SymName, DL);
872  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
873  return lowerCallTo(CI, Sym, NumArgs);
874 }
875 
877  unsigned NumArgs) {
878  ImmutableCallSite CS(CI);
879 
880  PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
881  FunctionType *FTy = cast<FunctionType>(PT->getElementType());
882  Type *RetTy = FTy->getReturnType();
883 
884  ArgListTy Args;
885  Args.reserve(NumArgs);
886 
887  // Populate the argument list.
888  // Attributes for args start at offset 1, after the return attribute.
889  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
890  Value *V = CI->getOperand(ArgI);
891 
892  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
893 
894  ArgListEntry Entry;
895  Entry.Val = V;
896  Entry.Ty = V->getType();
897  Entry.setAttributes(&CS, ArgI + 1);
898  Args.push_back(Entry);
899  }
900 
901  CallLoweringInfo CLI;
902  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
903 
904  return lowerCallTo(CLI);
905 }
906 
908  // Handle the incoming return values from the call.
909  CLI.clearIns();
910  SmallVector<EVT, 4> RetTys;
911  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
912 
914  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
915 
916  bool CanLowerReturn = TLI.CanLowerReturn(
917  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
918 
919  // FIXME: sret demotion isn't supported yet - bail out.
920  if (!CanLowerReturn)
921  return false;
922 
923  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
924  EVT VT = RetTys[I];
925  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
926  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
927  for (unsigned i = 0; i != NumRegs; ++i) {
928  ISD::InputArg MyFlags;
929  MyFlags.VT = RegisterVT;
930  MyFlags.ArgVT = VT;
931  MyFlags.Used = CLI.IsReturnValueUsed;
932  if (CLI.RetSExt)
933  MyFlags.Flags.setSExt();
934  if (CLI.RetZExt)
935  MyFlags.Flags.setZExt();
936  if (CLI.IsInReg)
937  MyFlags.Flags.setInReg();
938  CLI.Ins.push_back(MyFlags);
939  }
940  }
941 
942  // Handle all of the outgoing arguments.
943  CLI.clearOuts();
944  for (auto &Arg : CLI.getArgs()) {
945  Type *FinalType = Arg.Ty;
946  if (Arg.IsByVal)
947  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
948  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
949  FinalType, CLI.CallConv, CLI.IsVarArg);
950 
952  if (Arg.IsZExt)
953  Flags.setZExt();
954  if (Arg.IsSExt)
955  Flags.setSExt();
956  if (Arg.IsInReg)
957  Flags.setInReg();
958  if (Arg.IsSRet)
959  Flags.setSRet();
960  if (Arg.IsByVal)
961  Flags.setByVal();
962  if (Arg.IsInAlloca) {
963  Flags.setInAlloca();
964  // Set the byval flag for CCAssignFn callbacks that don't know about
965  // inalloca. This way we can know how many bytes we should've allocated
966  // and how many bytes a callee cleanup function will pop. If we port
967  // inalloca to more targets, we'll have to add custom inalloca handling in
968  // the various CC lowering callbacks.
969  Flags.setByVal();
970  }
971  if (Arg.IsByVal || Arg.IsInAlloca) {
972  PointerType *Ty = cast<PointerType>(Arg.Ty);
973  Type *ElementTy = Ty->getElementType();
974  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
975  // For ByVal, alignment should come from FE. BE will guess if this info is
976  // not there, but there are cases it cannot get right.
977  unsigned FrameAlign = Arg.Alignment;
978  if (!FrameAlign)
979  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
980  Flags.setByValSize(FrameSize);
981  Flags.setByValAlign(FrameAlign);
982  }
983  if (Arg.IsNest)
984  Flags.setNest();
985  if (NeedsRegBlock)
986  Flags.setInConsecutiveRegs();
987  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
988  Flags.setOrigAlign(OriginalAlignment);
989 
990  CLI.OutVals.push_back(Arg.Val);
991  CLI.OutFlags.push_back(Flags);
992  }
993 
994  if (!fastLowerCall(CLI))
995  return false;
996 
997  // Set all unused physreg defs as dead.
998  assert(CLI.Call && "No call instruction specified.");
1000 
1001  if (CLI.NumResultRegs && CLI.CS)
1003 
1004  return true;
1005 }
1006 
1008  ImmutableCallSite CS(CI);
1009 
1010  PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
1011  FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
1012  Type *RetTy = FuncTy->getReturnType();
1013 
1014  ArgListTy Args;
1015  ArgListEntry Entry;
1016  Args.reserve(CS.arg_size());
1017 
1018  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1019  i != e; ++i) {
1020  Value *V = *i;
1021 
1022  // Skip empty types
1023  if (V->getType()->isEmptyTy())
1024  continue;
1025 
1026  Entry.Val = V;
1027  Entry.Ty = V->getType();
1028 
1029  // Skip the first return-type Attribute to get to params.
1030  Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1031  Args.push_back(Entry);
1032  }
1033 
1034  // Check if target-independent constraints permit a tail call here.
1035  // Target-dependent constraints are checked within fastLowerCall.
1036  bool IsTailCall = CI->isTailCall();
1037  if (IsTailCall && !isInTailCallPosition(CS, TM))
1038  IsTailCall = false;
1039 
1040  CallLoweringInfo CLI;
1041  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1042  .setTailCall(IsTailCall);
1043 
1044  return lowerCallTo(CLI);
1045 }
1046 
1048  const CallInst *Call = cast<CallInst>(I);
1049 
1050  // Handle simple inline asms.
1051  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1052  // If the inline asm has side effects, then make sure that no local value
1053  // lives across by flushing the local value map.
1054  if (IA->hasSideEffects())
1055  flushLocalValueMap();
1056 
1057  // Don't attempt to handle constraints.
1058  if (!IA->getConstraintString().empty())
1059  return false;
1060 
1061  unsigned ExtraInfo = 0;
1062  if (IA->hasSideEffects())
1063  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1064  if (IA->isAlignStack())
1065  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1066 
1069  .addExternalSymbol(IA->getAsmString().c_str())
1070  .addImm(ExtraInfo);
1071  return true;
1072  }
1073 
1074  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1075  ComputeUsesVAFloatArgument(*Call, &MMI);
1076 
1077  // Handle intrinsic function calls.
1078  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1079  return selectIntrinsicCall(II);
1080 
1081  // Usually, it does not make sense to initialize a value,
1082  // make an unrelated function call and use the value, because
1083  // it tends to be spilled on the stack. So, we move the pointer
1084  // to the last local value to the beginning of the block, so that
1085  // all the values which have already been materialized,
1086  // appear after the call. It also makes sense to skip intrinsics
1087  // since they tend to be inlined.
1088  flushLocalValueMap();
1089 
1090  return lowerCall(Call);
1091 }
1092 
1094  switch (II->getIntrinsicID()) {
1095  default:
1096  break;
1097  // At -O0 we don't care about the lifetime intrinsics.
1098  case Intrinsic::lifetime_start:
1099  case Intrinsic::lifetime_end:
1100  // The donothing intrinsic does, well, nothing.
1101  case Intrinsic::donothing:
1102  return true;
1103  case Intrinsic::eh_actions: {
1104  unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
1105  if (!ResultReg)
1106  return false;
1107  updateValueMap(II, ResultReg);
1108  return true;
1109  }
1110  case Intrinsic::dbg_declare: {
1111  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1112  assert(DI->getVariable() && "Missing variable");
1113  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1114  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1115  return true;
1116  }
1117 
1118  const Value *Address = DI->getAddress();
1119  if (!Address || isa<UndefValue>(Address)) {
1120  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1121  return true;
1122  }
1123 
1124  unsigned Offset = 0;
1126  if (const auto *Arg = dyn_cast<Argument>(Address))
1127  // Some arguments' frame index is recorded during argument lowering.
1128  Offset = FuncInfo.getArgumentFrameIndex(Arg);
1129  if (Offset)
1130  Op = MachineOperand::CreateFI(Offset);
1131  if (!Op)
1132  if (unsigned Reg = lookUpRegForValue(Address))
1133  Op = MachineOperand::CreateReg(Reg, false);
1134 
1135  // If we have a VLA that has a "use" in a metadata node that's then used
1136  // here but it has no other uses, then we have a problem. E.g.,
1137  //
1138  // int foo (const int *x) {
1139  // char a[*x];
1140  // return 0;
1141  // }
1142  //
1143  // If we assign 'a' a vreg and fast isel later on has to use the selection
1144  // DAG isel, it will want to copy the value to the vreg. However, there are
1145  // no uses, which goes counter to what selection DAG isel expects.
1146  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1147  (!isa<AllocaInst>(Address) ||
1148  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1150  false);
1151 
1152  if (Op) {
1154  "Expected inlined-at fields to agree");
1155  if (Op->isReg()) {
1156  Op->setIsDebug(true);
1158  TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1159  DI->getVariable(), DI->getExpression());
1160  } else
1163  .addOperand(*Op)
1164  .addImm(0)
1165  .addMetadata(DI->getVariable())
1166  .addMetadata(DI->getExpression());
1167  } else {
1168  // We can't yet handle anything else here because it would require
1169  // generating code, thus altering codegen because of debug info.
1170  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1171  }
1172  return true;
1173  }
1174  case Intrinsic::dbg_value: {
1175  // This form of DBG_VALUE is target-independent.
1176  const DbgValueInst *DI = cast<DbgValueInst>(II);
1178  const Value *V = DI->getValue();
1180  "Expected inlined-at fields to agree");
1181  if (!V) {
1182  // Currently the optimizer can produce this; insert an undef to
1183  // help debugging. Probably the optimizer should not do this.
1185  .addReg(0U)
1186  .addImm(DI->getOffset())
1187  .addMetadata(DI->getVariable())
1188  .addMetadata(DI->getExpression());
1189  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1190  if (CI->getBitWidth() > 64)
1192  .addCImm(CI)
1193  .addImm(DI->getOffset())
1194  .addMetadata(DI->getVariable())
1195  .addMetadata(DI->getExpression());
1196  else
1198  .addImm(CI->getZExtValue())
1199  .addImm(DI->getOffset())
1200  .addMetadata(DI->getVariable())
1201  .addMetadata(DI->getExpression());
1202  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1204  .addFPImm(CF)
1205  .addImm(DI->getOffset())
1206  .addMetadata(DI->getVariable())
1207  .addMetadata(DI->getExpression());
1208  } else if (unsigned Reg = lookUpRegForValue(V)) {
1209  // FIXME: This does not handle register-indirect values at offset 0.
1210  bool IsIndirect = DI->getOffset() != 0;
1211  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1212  DI->getOffset(), DI->getVariable(), DI->getExpression());
1213  } else {
1214  // We can't yet handle anything else here because it would require
1215  // generating code, thus altering codegen because of debug info.
1216  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1217  }
1218  return true;
1219  }
1220  case Intrinsic::objectsize: {
1221  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1222  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1223  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1224  unsigned ResultReg = getRegForValue(ResCI);
1225  if (!ResultReg)
1226  return false;
1227  updateValueMap(II, ResultReg);
1228  return true;
1229  }
1230  case Intrinsic::expect: {
1231  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1232  if (!ResultReg)
1233  return false;
1234  updateValueMap(II, ResultReg);
1235  return true;
1236  }
1237  case Intrinsic::experimental_stackmap:
1238  return selectStackmap(II);
1239  case Intrinsic::experimental_patchpoint_void:
1240  case Intrinsic::experimental_patchpoint_i64:
1241  return selectPatchpoint(II);
1242  }
1243 
1244  return fastLowerIntrinsicCall(II);
1245 }
1246 
1247 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1248  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1249  EVT DstVT = TLI.getValueType(DL, I->getType());
1250 
1251  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1252  !DstVT.isSimple())
1253  // Unhandled type. Halt "fast" selection and bail.
1254  return false;
1255 
1256  // Check if the destination type is legal.
1257  if (!TLI.isTypeLegal(DstVT))
1258  return false;
1259 
1260  // Check if the source operand is legal.
1261  if (!TLI.isTypeLegal(SrcVT))
1262  return false;
1263 
1264  unsigned InputReg = getRegForValue(I->getOperand(0));
1265  if (!InputReg)
1266  // Unhandled operand. Halt "fast" selection and bail.
1267  return false;
1268 
1269  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1270 
1271  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1272  Opcode, InputReg, InputRegIsKill);
1273  if (!ResultReg)
1274  return false;
1275 
1276  updateValueMap(I, ResultReg);
1277  return true;
1278 }
1279 
1281  // If the bitcast doesn't change the type, just use the operand value.
1282  if (I->getType() == I->getOperand(0)->getType()) {
1283  unsigned Reg = getRegForValue(I->getOperand(0));
1284  if (!Reg)
1285  return false;
1286  updateValueMap(I, Reg);
1287  return true;
1288  }
1289 
1290  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1291  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1292  EVT DstEVT = TLI.getValueType(DL, I->getType());
1293  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1294  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1295  // Unhandled type. Halt "fast" selection and bail.
1296  return false;
1297 
1298  MVT SrcVT = SrcEVT.getSimpleVT();
1299  MVT DstVT = DstEVT.getSimpleVT();
1300  unsigned Op0 = getRegForValue(I->getOperand(0));
1301  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1302  return false;
1303  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1304 
1305  // First, try to perform the bitcast by inserting a reg-reg copy.
1306  unsigned ResultReg = 0;
1307  if (SrcVT == DstVT) {
1308  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1309  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1310  // Don't attempt a cross-class copy. It will likely fail.
1311  if (SrcClass == DstClass) {
1312  ResultReg = createResultReg(DstClass);
1314  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1315  }
1316  }
1317 
1318  // If the reg-reg copy failed, select a BITCAST opcode.
1319  if (!ResultReg)
1320  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1321 
1322  if (!ResultReg)
1323  return false;
1324 
1325  updateValueMap(I, ResultReg);
1326  return true;
1327 }
1328 
1330  // Just before the terminator instruction, insert instructions to
1331  // feed PHI nodes in successor blocks.
1332  if (isa<TerminatorInst>(I))
1333  if (!handlePHINodesInSuccessorBlocks(I->getParent()))
1334  return false;
1335 
1336  DbgLoc = I->getDebugLoc();
1337 
1338  SavedInsertPt = FuncInfo.InsertPt;
1339 
1340  if (const auto *Call = dyn_cast<CallInst>(I)) {
1341  const Function *F = Call->getCalledFunction();
1343 
1344  // As a special case, don't handle calls to builtin library functions that
1345  // may be translated directly to target instructions.
1346  if (F && !F->hasLocalLinkage() && F->hasName() &&
1347  LibInfo->getLibFunc(F->getName(), Func) &&
1349  return false;
1350 
1351  // Don't handle Intrinsic::trap if a trap funciton is specified.
1352  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1353  Call->hasFnAttr("trap-func-name"))
1354  return false;
1355  }
1356 
1357  // First, try doing target-independent selection.
1359  if (selectOperator(I, I->getOpcode())) {
1360  ++NumFastIselSuccessIndependent;
1361  DbgLoc = DebugLoc();
1362  return true;
1363  }
1364  // Remove dead code.
1366  if (SavedInsertPt != FuncInfo.InsertPt)
1367  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1368  SavedInsertPt = FuncInfo.InsertPt;
1369  }
1370  // Next, try calling the target to attempt to handle the instruction.
1371  if (fastSelectInstruction(I)) {
1372  ++NumFastIselSuccessTarget;
1373  DbgLoc = DebugLoc();
1374  return true;
1375  }
1376  // Remove dead code.
1378  if (SavedInsertPt != FuncInfo.InsertPt)
1379  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1380 
1381  DbgLoc = DebugLoc();
1382  // Undo phi node updates, because they will be added again by SelectionDAG.
1383  if (isa<TerminatorInst>(I))
1385  return false;
1386 }
1387 
1388 /// Emit an unconditional branch to the given block, unless it is the immediate
1389 /// (fall-through) successor, and update the CFG.
1391  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1392  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1393  // For more accurate line information if this is the only instruction
1394  // in the block then emit it, otherwise we have the unconditional
1395  // fall-through case, which needs no instructions.
1396  } else {
1397  // The unconditional branch case.
1398  TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1399  SmallVector<MachineOperand, 0>(), DbgLoc);
1400  }
1401  uint32_t BranchWeight = 0;
1402  if (FuncInfo.BPI)
1403  BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1404  MSucc->getBasicBlock());
1405  FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1406 }
1407 
1408 /// Emit an FNeg operation.
1410  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1411  if (!OpReg)
1412  return false;
1413  bool OpRegIsKill = hasTrivialKill(I);
1414 
1415  // If the target has ISD::FNEG, use it.
1416  EVT VT = TLI.getValueType(DL, I->getType());
1417  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1418  OpReg, OpRegIsKill);
1419  if (ResultReg) {
1420  updateValueMap(I, ResultReg);
1421  return true;
1422  }
1423 
1424  // Bitcast the value to integer, twiddle the sign bit with xor,
1425  // and then bitcast it back to floating-point.
1426  if (VT.getSizeInBits() > 64)
1427  return false;
1428  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1429  if (!TLI.isTypeLegal(IntVT))
1430  return false;
1431 
1432  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1433  ISD::BITCAST, OpReg, OpRegIsKill);
1434  if (!IntReg)
1435  return false;
1436 
1437  unsigned IntResultReg = fastEmit_ri_(
1438  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1439  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1440  if (!IntResultReg)
1441  return false;
1442 
1443  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1444  IntResultReg, /*IsKill=*/true);
1445  if (!ResultReg)
1446  return false;
1447 
1448  updateValueMap(I, ResultReg);
1449  return true;
1450 }
1451 
1453  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1454  if (!EVI)
1455  return false;
1456 
1457  // Make sure we only try to handle extracts with a legal result. But also
1458  // allow i1 because it's easy.
1459  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1460  if (!RealVT.isSimple())
1461  return false;
1462  MVT VT = RealVT.getSimpleVT();
1463  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1464  return false;
1465 
1466  const Value *Op0 = EVI->getOperand(0);
1467  Type *AggTy = Op0->getType();
1468 
1469  // Get the base result register.
1470  unsigned ResultReg;
1472  if (I != FuncInfo.ValueMap.end())
1473  ResultReg = I->second;
1474  else if (isa<Instruction>(Op0))
1475  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1476  else
1477  return false; // fast-isel can't handle aggregate constants at the moment
1478 
1479  // Get the actual result register, which is an offset from the base register.
1480  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1481 
1482  SmallVector<EVT, 4> AggValueVTs;
1483  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1484 
1485  for (unsigned i = 0; i < VTIndex; i++)
1486  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1487 
1488  updateValueMap(EVI, ResultReg);
1489  return true;
1490 }
1491 
1492 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1493  switch (Opcode) {
1494  case Instruction::Add:
1495  return selectBinaryOp(I, ISD::ADD);
1496  case Instruction::FAdd:
1497  return selectBinaryOp(I, ISD::FADD);
1498  case Instruction::Sub:
1499  return selectBinaryOp(I, ISD::SUB);
1500  case Instruction::FSub:
1501  // FNeg is currently represented in LLVM IR as a special case of FSub.
1502  if (BinaryOperator::isFNeg(I))
1503  return selectFNeg(I);
1504  return selectBinaryOp(I, ISD::FSUB);
1505  case Instruction::Mul:
1506  return selectBinaryOp(I, ISD::MUL);
1507  case Instruction::FMul:
1508  return selectBinaryOp(I, ISD::FMUL);
1509  case Instruction::SDiv:
1510  return selectBinaryOp(I, ISD::SDIV);
1511  case Instruction::UDiv:
1512  return selectBinaryOp(I, ISD::UDIV);
1513  case Instruction::FDiv:
1514  return selectBinaryOp(I, ISD::FDIV);
1515  case Instruction::SRem:
1516  return selectBinaryOp(I, ISD::SREM);
1517  case Instruction::URem:
1518  return selectBinaryOp(I, ISD::UREM);
1519  case Instruction::FRem:
1520  return selectBinaryOp(I, ISD::FREM);
1521  case Instruction::Shl:
1522  return selectBinaryOp(I, ISD::SHL);
1523  case Instruction::LShr:
1524  return selectBinaryOp(I, ISD::SRL);
1525  case Instruction::AShr:
1526  return selectBinaryOp(I, ISD::SRA);
1527  case Instruction::And:
1528  return selectBinaryOp(I, ISD::AND);
1529  case Instruction::Or:
1530  return selectBinaryOp(I, ISD::OR);
1531  case Instruction::Xor:
1532  return selectBinaryOp(I, ISD::XOR);
1533 
1534  case Instruction::GetElementPtr:
1535  return selectGetElementPtr(I);
1536 
1537  case Instruction::Br: {
1538  const BranchInst *BI = cast<BranchInst>(I);
1539 
1540  if (BI->isUnconditional()) {
1541  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1542  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1543  fastEmitBranch(MSucc, BI->getDebugLoc());
1544  return true;
1545  }
1546 
1547  // Conditional branches are not handed yet.
1548  // Halt "fast" selection and bail.
1549  return false;
1550  }
1551 
1552  case Instruction::Unreachable:
1554  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1555  else
1556  return true;
1557 
1558  case Instruction::Alloca:
1559  // FunctionLowering has the static-sized case covered.
1560  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1561  return true;
1562 
1563  // Dynamic-sized alloca is not handled yet.
1564  return false;
1565 
1566  case Instruction::Call:
1567  return selectCall(I);
1568 
1569  case Instruction::BitCast:
1570  return selectBitCast(I);
1571 
1572  case Instruction::FPToSI:
1573  return selectCast(I, ISD::FP_TO_SINT);
1574  case Instruction::ZExt:
1575  return selectCast(I, ISD::ZERO_EXTEND);
1576  case Instruction::SExt:
1577  return selectCast(I, ISD::SIGN_EXTEND);
1578  case Instruction::Trunc:
1579  return selectCast(I, ISD::TRUNCATE);
1580  case Instruction::SIToFP:
1581  return selectCast(I, ISD::SINT_TO_FP);
1582 
1583  case Instruction::IntToPtr: // Deliberate fall-through.
1584  case Instruction::PtrToInt: {
1585  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1586  EVT DstVT = TLI.getValueType(DL, I->getType());
1587  if (DstVT.bitsGT(SrcVT))
1588  return selectCast(I, ISD::ZERO_EXTEND);
1589  if (DstVT.bitsLT(SrcVT))
1590  return selectCast(I, ISD::TRUNCATE);
1591  unsigned Reg = getRegForValue(I->getOperand(0));
1592  if (!Reg)
1593  return false;
1594  updateValueMap(I, Reg);
1595  return true;
1596  }
1597 
1598  case Instruction::ExtractValue:
1599  return selectExtractValue(I);
1600 
1601  case Instruction::PHI:
1602  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1603 
1604  default:
1605  // Unhandled instruction. Halt "fast" selection and bail.
1606  return false;
1607  }
1608 }
1609 
1611  const TargetLibraryInfo *LibInfo,
1613  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1614  MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1615  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1616  TII(*MF->getSubtarget().getInstrInfo()),
1617  TLI(*MF->getSubtarget().getTargetLowering()),
1618  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1619  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1620 
1622 
1623 bool FastISel::fastLowerArguments() { return false; }
1624 
1625 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1626 
1628  return false;
1629 }
1630 
1631 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1632 
1633 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1634  bool /*Op0IsKill*/) {
1635  return 0;
1636 }
1637 
1638 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1639  bool /*Op0IsKill*/, unsigned /*Op1*/,
1640  bool /*Op1IsKill*/) {
1641  return 0;
1642 }
1643 
1644 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1645  return 0;
1646 }
1647 
1648 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1649  const ConstantFP * /*FPImm*/) {
1650  return 0;
1651 }
1652 
1653 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1654  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1655  return 0;
1656 }
1657 
1658 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1659  bool /*Op0IsKill*/,
1660  const ConstantFP * /*FPImm*/) {
1661  return 0;
1662 }
1663 
1664 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1665  bool /*Op0IsKill*/, unsigned /*Op1*/,
1666  bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1667  return 0;
1668 }
1669 
1670 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1671 /// instruction with an immediate operand using fastEmit_ri.
1672 /// If that fails, it materializes the immediate into a register and try
1673 /// fastEmit_rr instead.
1674 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1675  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1676  // If this is a multiply by a power of two, emit this as a shift left.
1677  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1678  Opcode = ISD::SHL;
1679  Imm = Log2_64(Imm);
1680  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1681  // div x, 8 -> srl x, 3
1682  Opcode = ISD::SRL;
1683  Imm = Log2_64(Imm);
1684  }
1685 
1686  // Horrible hack (to be removed), check to make sure shift amounts are
1687  // in-range.
1688  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1689  Imm >= VT.getSizeInBits())
1690  return 0;
1691 
1692  // First check if immediate type is legal. If not, we can't use the ri form.
1693  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1694  if (ResultReg)
1695  return ResultReg;
1696  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1697  bool IsImmKill = true;
1698  if (!MaterialReg) {
1699  // This is a bit ugly/slow, but failing here means falling out of
1700  // fast-isel, which would be very slow.
1701  IntegerType *ITy =
1703  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1704  if (!MaterialReg)
1705  return 0;
1706  // FIXME: If the materialized register here has no uses yet then this
1707  // will be the first use and we should be able to mark it as killed.
1708  // However, the local value area for materialising constant expressions
1709  // grows down, not up, which means that any constant expressions we generate
1710  // later which also use 'Imm' could be after this instruction and therefore
1711  // after this kill.
1712  IsImmKill = false;
1713  }
1714  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1715 }
1716 
1718  return MRI.createVirtualRegister(RC);
1719 }
1720 
1721 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1722  unsigned OpNum) {
1724  const TargetRegisterClass *RegClass =
1725  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1726  if (!MRI.constrainRegClass(Op, RegClass)) {
1727  // If it's not legal to COPY between the register classes, something
1728  // has gone very wrong before we got here.
1729  unsigned NewOp = createResultReg(RegClass);
1731  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1732  return NewOp;
1733  }
1734  }
1735  return Op;
1736 }
1737 
1738 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1739  const TargetRegisterClass *RC) {
1740  unsigned ResultReg = createResultReg(RC);
1741  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1742 
1743  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1744  return ResultReg;
1745 }
1746 
1747 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1748  const TargetRegisterClass *RC, unsigned Op0,
1749  bool Op0IsKill) {
1750  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1751 
1752  unsigned ResultReg = createResultReg(RC);
1753  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1754 
1755  if (II.getNumDefs() >= 1)
1756  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1757  .addReg(Op0, getKillRegState(Op0IsKill));
1758  else {
1760  .addReg(Op0, getKillRegState(Op0IsKill));
1762  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1763  }
1764 
1765  return ResultReg;
1766 }
1767 
1768 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1769  const TargetRegisterClass *RC, unsigned Op0,
1770  bool Op0IsKill, unsigned Op1,
1771  bool Op1IsKill) {
1772  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1773 
1774  unsigned ResultReg = createResultReg(RC);
1775  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1776  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1777 
1778  if (II.getNumDefs() >= 1)
1779  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1780  .addReg(Op0, getKillRegState(Op0IsKill))
1781  .addReg(Op1, getKillRegState(Op1IsKill));
1782  else {
1784  .addReg(Op0, getKillRegState(Op0IsKill))
1785  .addReg(Op1, getKillRegState(Op1IsKill));
1787  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1788  }
1789  return ResultReg;
1790 }
1791 
1792 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1793  const TargetRegisterClass *RC, unsigned Op0,
1794  bool Op0IsKill, unsigned Op1,
1795  bool Op1IsKill, unsigned Op2,
1796  bool Op2IsKill) {
1797  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1798 
1799  unsigned ResultReg = createResultReg(RC);
1800  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1801  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1802  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1803 
1804  if (II.getNumDefs() >= 1)
1805  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1806  .addReg(Op0, getKillRegState(Op0IsKill))
1807  .addReg(Op1, getKillRegState(Op1IsKill))
1808  .addReg(Op2, getKillRegState(Op2IsKill));
1809  else {
1811  .addReg(Op0, getKillRegState(Op0IsKill))
1812  .addReg(Op1, getKillRegState(Op1IsKill))
1813  .addReg(Op2, getKillRegState(Op2IsKill));
1815  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1816  }
1817  return ResultReg;
1818 }
1819 
1820 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1821  const TargetRegisterClass *RC, unsigned Op0,
1822  bool Op0IsKill, uint64_t Imm) {
1823  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1824 
1825  unsigned ResultReg = createResultReg(RC);
1826  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1827 
1828  if (II.getNumDefs() >= 1)
1829  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1830  .addReg(Op0, getKillRegState(Op0IsKill))
1831  .addImm(Imm);
1832  else {
1834  .addReg(Op0, getKillRegState(Op0IsKill))
1835  .addImm(Imm);
1837  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1838  }
1839  return ResultReg;
1840 }
1841 
1842 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1843  const TargetRegisterClass *RC, unsigned Op0,
1844  bool Op0IsKill, uint64_t Imm1,
1845  uint64_t Imm2) {
1846  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1847 
1848  unsigned ResultReg = createResultReg(RC);
1849  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1850 
1851  if (II.getNumDefs() >= 1)
1852  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1853  .addReg(Op0, getKillRegState(Op0IsKill))
1854  .addImm(Imm1)
1855  .addImm(Imm2);
1856  else {
1858  .addReg(Op0, getKillRegState(Op0IsKill))
1859  .addImm(Imm1)
1860  .addImm(Imm2);
1862  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1863  }
1864  return ResultReg;
1865 }
1866 
1867 unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
1868  const TargetRegisterClass *RC, unsigned Op0,
1869  bool Op0IsKill, const ConstantFP *FPImm) {
1870  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1871 
1872  unsigned ResultReg = createResultReg(RC);
1873  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1874 
1875  if (II.getNumDefs() >= 1)
1876  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1877  .addReg(Op0, getKillRegState(Op0IsKill))
1878  .addFPImm(FPImm);
1879  else {
1881  .addReg(Op0, getKillRegState(Op0IsKill))
1882  .addFPImm(FPImm);
1884  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1885  }
1886  return ResultReg;
1887 }
1888 
1889 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1890  const TargetRegisterClass *RC, unsigned Op0,
1891  bool Op0IsKill, unsigned Op1,
1892  bool Op1IsKill, uint64_t Imm) {
1893  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1894 
1895  unsigned ResultReg = createResultReg(RC);
1896  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1897  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1898 
1899  if (II.getNumDefs() >= 1)
1900  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1901  .addReg(Op0, getKillRegState(Op0IsKill))
1902  .addReg(Op1, getKillRegState(Op1IsKill))
1903  .addImm(Imm);
1904  else {
1906  .addReg(Op0, getKillRegState(Op0IsKill))
1907  .addReg(Op1, getKillRegState(Op1IsKill))
1908  .addImm(Imm);
1910  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1911  }
1912  return ResultReg;
1913 }
1914 
1915 unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
1916  const TargetRegisterClass *RC,
1917  unsigned Op0, bool Op0IsKill, unsigned Op1,
1918  bool Op1IsKill, uint64_t Imm1,
1919  uint64_t Imm2) {
1920  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1921 
1922  unsigned ResultReg = createResultReg(RC);
1923  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1924  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1925 
1926  if (II.getNumDefs() >= 1)
1927  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1928  .addReg(Op0, getKillRegState(Op0IsKill))
1929  .addReg(Op1, getKillRegState(Op1IsKill))
1930  .addImm(Imm1)
1931  .addImm(Imm2);
1932  else {
1934  .addReg(Op0, getKillRegState(Op0IsKill))
1935  .addReg(Op1, getKillRegState(Op1IsKill))
1936  .addImm(Imm1)
1937  .addImm(Imm2);
1939  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1940  }
1941  return ResultReg;
1942 }
1943 
1944 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1945  const TargetRegisterClass *RC, uint64_t Imm) {
1946  unsigned ResultReg = createResultReg(RC);
1947  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1948 
1949  if (II.getNumDefs() >= 1)
1950  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1951  .addImm(Imm);
1952  else {
1955  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1956  }
1957  return ResultReg;
1958 }
1959 
1960 unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
1961  const TargetRegisterClass *RC, uint64_t Imm1,
1962  uint64_t Imm2) {
1963  unsigned ResultReg = createResultReg(RC);
1964  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1965 
1966  if (II.getNumDefs() >= 1)
1967  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1968  .addImm(Imm1)
1969  .addImm(Imm2);
1970  else {
1972  .addImm(Imm2);
1974  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1975  }
1976  return ResultReg;
1977 }
1978 
1979 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1980  bool Op0IsKill, uint32_t Idx) {
1981  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1983  "Cannot yet extract from physregs");
1984  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1987  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
1988  return ResultReg;
1989 }
1990 
1991 /// Emit MachineInstrs to compute the value of Op with all but the least
1992 /// significant bit set to zero.
1993 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1994  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1995 }
1996 
1997 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1998 /// Emit code to ensure constants are copied into registers when needed.
1999 /// Remember the virtual registers that need to be added to the Machine PHI
2000 /// nodes as input. We cannot just directly add them, because expansion
2001 /// might result in multiple MBB's for one BB. As such, the start of the
2002 /// BB might correspond to a different MBB than the end.
2003 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2004  const TerminatorInst *TI = LLVMBB->getTerminator();
2005 
2008 
2009  // Check successor nodes' PHI nodes that expect a constant to be available
2010  // from this block.
2011  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2012  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2013  if (!isa<PHINode>(SuccBB->begin()))
2014  continue;
2015  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2016 
2017  // If this terminator has multiple identical successors (common for
2018  // switches), only handle each succ once.
2019  if (!SuccsHandled.insert(SuccMBB).second)
2020  continue;
2021 
2022  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2023 
2024  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2025  // nodes and Machine PHI nodes, but the incoming operands have not been
2026  // emitted yet.
2027  for (BasicBlock::const_iterator I = SuccBB->begin();
2028  const auto *PN = dyn_cast<PHINode>(I); ++I) {
2029 
2030  // Ignore dead phi's.
2031  if (PN->use_empty())
2032  continue;
2033 
2034  // Only handle legal types. Two interesting things to note here. First,
2035  // by bailing out early, we may leave behind some dead instructions,
2036  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2037  // own moves. Second, this check is necessary because FastISel doesn't
2038  // use CreateRegs to create registers, so it always creates
2039  // exactly one register for each non-void instruction.
2040  EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2041  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2042  // Handle integer promotions, though, because they're common and easy.
2043  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2045  return false;
2046  }
2047  }
2048 
2049  const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2050 
2051  // Set the DebugLoc for the copy. Prefer the location of the operand
2052  // if there is one; use the location of the PHI otherwise.
2053  DbgLoc = PN->getDebugLoc();
2054  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2055  DbgLoc = Inst->getDebugLoc();
2056 
2057  unsigned Reg = getRegForValue(PHIOp);
2058  if (!Reg) {
2060  return false;
2061  }
2062  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2063  DbgLoc = DebugLoc();
2064  }
2065  }
2066 
2067  return true;
2068 }
2069 
2070 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2071  assert(LI->hasOneUse() &&
2072  "tryToFoldLoad expected a LoadInst with a single use");
2073  // We know that the load has a single use, but don't know what it is. If it
2074  // isn't one of the folded instructions, then we can't succeed here. Handle
2075  // this by scanning the single-use users of the load until we get to FoldInst.
2076  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2077 
2078  const Instruction *TheUser = LI->user_back();
2079  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2080  // Stay in the right block.
2081  TheUser->getParent() == FoldInst->getParent() &&
2082  --MaxUsers) { // Don't scan too far.
2083  // If there are multiple or no uses of this instruction, then bail out.
2084  if (!TheUser->hasOneUse())
2085  return false;
2086 
2087  TheUser = TheUser->user_back();
2088  }
2089 
2090  // If we didn't find the fold instruction, then we failed to collapse the
2091  // sequence.
2092  if (TheUser != FoldInst)
2093  return false;
2094 
2095  // Don't try to fold volatile loads. Target has to deal with alignment
2096  // constraints.
2097  if (LI->isVolatile())
2098  return false;
2099 
2100  // Figure out which vreg this is going into. If there is no assigned vreg yet
2101  // then there actually was no reference to it. Perhaps the load is referenced
2102  // by a dead instruction.
2103  unsigned LoadReg = getRegForValue(LI);
2104  if (!LoadReg)
2105  return false;
2106 
2107  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2108  // may mean that the instruction got lowered to multiple MIs, or the use of
2109  // the loaded value ended up being multiple operands of the result.
2110  if (!MRI.hasOneUse(LoadReg))
2111  return false;
2112 
2114  MachineInstr *User = RI->getParent();
2115 
2116  // Set the insertion point properly. Folding the load can cause generation of
2117  // other random instructions (like sign extends) for addressing modes; make
2118  // sure they get inserted in a logical place before the new instruction.
2119  FuncInfo.InsertPt = User;
2120  FuncInfo.MBB = User->getParent();
2121 
2122  // Ask the target to try folding the load.
2123  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2124 }
2125 
2126 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2127  // Must be an add.
2128  if (!isa<AddOperator>(Add))
2129  return false;
2130  // Type size needs to match.
2131  if (DL.getTypeSizeInBits(GEP->getType()) !=
2132  DL.getTypeSizeInBits(Add->getType()))
2133  return false;
2134  // Must be in the same basic block.
2135  if (isa<Instruction>(Add) &&
2136  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2137  return false;
2138  // Must have a constant operand.
2139  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2140 }
2141 
2144  const Value *Ptr;
2145  Type *ValTy;
2146  unsigned Alignment;
2147  unsigned Flags;
2148  bool IsVolatile;
2149 
2150  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2151  Alignment = LI->getAlignment();
2152  IsVolatile = LI->isVolatile();
2153  Flags = MachineMemOperand::MOLoad;
2154  Ptr = LI->getPointerOperand();
2155  ValTy = LI->getType();
2156  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2157  Alignment = SI->getAlignment();
2158  IsVolatile = SI->isVolatile();
2160  Ptr = SI->getPointerOperand();
2161  ValTy = SI->getValueOperand()->getType();
2162  } else
2163  return nullptr;
2164 
2165  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2166  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2168 
2169  AAMDNodes AAInfo;
2170  I->getAAMetadata(AAInfo);
2171 
2172  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2173  Alignment = DL.getABITypeAlignment(ValTy);
2174 
2175  unsigned Size = DL.getTypeStoreSize(ValTy);
2176 
2177  if (IsVolatile)
2179  if (IsNonTemporal)
2181  if (IsInvariant)
2183 
2184  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2185  Alignment, AAInfo, Ranges);
2186 }
2187 
2189  // If both operands are the same, then try to optimize or fold the cmp.
2191  if (CI->getOperand(0) != CI->getOperand(1))
2192  return Predicate;
2193 
2194  switch (Predicate) {
2195  default: llvm_unreachable("Invalid predicate!");
2196  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2197  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2198  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2199  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2200  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2201  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2202  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2203  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2204  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2205  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2206  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2207  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2208  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2209  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2210  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2211  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2212 
2213  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2214  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2215  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2216  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2217  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2218  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2219  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2220  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2221  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2222  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2223  }
2224 
2225  return Predicate;
2226 }
void setHasStackMap(bool s=true)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:477
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1792
const Value * getCalledValue() const
getCalledValue - Get a pointer to the function that is invoked by this instruction.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void setByValAlign(unsigned A)
The memory access reads data.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:679
unsigned fastEmitInst_rrii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with two register operands, two immediates operands, and a result register in the...
Definition: FastISel.cpp:1915
The memory access writes data.
std::vector< ArgListEntry > ArgListTy
Definition: FastISel.h:54
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:1993
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
ExtractValueInst - This instruction extracts a struct member or array element value from an aggregate...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:223
MachineConstantPool & MCP
Definition: FastISel.h:197
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1007
static const Value * getFNegArgument(const Value *BinOp)
bool hasName() const
Definition: Value.h:228
STATISTIC(NumFunctions,"Total number of functions")
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:484
Sign extended before/after call.
Definition: Attributes.h:105
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
Force argument to be passed in register.
Definition: Attributes.h:78
InstrTy * getInstruction() const
Definition: CallSite.h:82
Intrinsic::ID getIntrinsicID() const
getIntrinsicID - Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:44
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2188
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:191
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void fastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1390
ImmutableCallSite * CS
Definition: FastISel.h:74
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:484
Nested function static chain.
Definition: Attributes.h:82
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:373
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:138
virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1658
static bool isVirtualRegister(unsigned Reg)
isVirtualRegister - Return true if the specified register number is in the virtual register namespace...
A Stackmap instruction captures the location of live variables at its position in the instruction str...
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:602
CallInst - This class represents a function call, abstracting a target machine's calling convention...
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:285
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
Definition: FastISel.cpp:81
unsigned less or equal
Definition: InstrTypes.h:723
unsigned less than
Definition: InstrTypes.h:722
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:703
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2143
unsigned getSizeInBits() const
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:713
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1631
Type * getReturnType() const
Definition: Function.cpp:233
arg_iterator arg_end()
Definition: Function.h:480
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:740
F(f)
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:85
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1644
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:491
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
getMachineMemOperand - Allocate a new MachineMemOperand.
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
uint64_t getOffset() const
op_iterator op_begin()
Definition: User.h:183
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:189
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1625
static Constant * getNullValue(Type *Ty)
Definition: Constants.cpp:178
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:188
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:231
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1329
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1842
virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1664
COPY - Target-independent register copy.
Definition: TargetOpcodes.h:86
ArrayRef< unsigned > getIndices() const
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:708
IterTy arg_end() const
Definition: CallSite.h:157
MachineFunction * MF
Definition: FastISel.h:194
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:192
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1820
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:707
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1889
bool isUnconditional() const
MachineMemOperand - A description of a memory reference used in the backend.
unsigned getCallFrameDestroyOpcode() const
void setHasPatchPoint(bool s=true)
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:551
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Definition: ISDOpcodes.h:332
opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const
Definition: APFloat.cpp:2191
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
A Use represents the edge between a Value definition and its users.
Definition: Use.h:69
bool hasOptimizedCodeGen(LibFunc::Func F) const
Tests if the function is both available and a candidate for optimized code generation.
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1944
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2126
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getNumArgOperands() const
getNumArgOperands - Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1747
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:216
const TargetRegisterClass * getRegClass(unsigned Reg) const
getRegClass - Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:704
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
bool hasDebugInfo() const
hasDebugInfo - Returns true if valid debug info is present.
void setByValSize(unsigned S)
The memory access is volatile.
uint16_t getParamAlignment(uint16_t i) const
Extract the alignment for a call or parameter (0=unknown).
Definition: CallSite.h:247
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
Hidden pointer to structure to return.
Definition: Attributes.h:114
const TargetMachine & TM
Definition: FastISel.h:199
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1093
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1247
Context object for machine code objects.
Definition: MCContext.h:48
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
FunctionType - Class to represent function types.
Definition: DerivedTypes.h:96
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:84
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1721
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:393
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1492
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:125
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
Definition: CallSite.h:91
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:168
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:191
BasicBlock * getSuccessor(unsigned i) const
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1738
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:25
Pass structure by value.
Definition: Attributes.h:73
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:135
MCContext & getContext() const
void setOrigAlign(unsigned A)
Type * getElementType() const
Definition: DerivedTypes.h:323
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:221
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
PointerType - Class to represent pointers.
Definition: DerivedTypes.h:449
unsigned getKillRegState(bool B)
const BasicBlock * getBasicBlock() const
getBasicBlock - Return the LLVM basic block that this instance corresponded to originally.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:436
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:285
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:120
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:491
The memory access is non-temporal.
DBG_VALUE - a mapping of the llvm.dbg.value intrinsic.
Definition: TargetOpcodes.h:69
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
Definition: InstrTypes.h:57
IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
Definition: TargetOpcodes.h:52
bundle_iterator< MachineInstr, instr_iterator > iterator
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const
Searches for a particular function name.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Patchable call instruction - this instruction represents a call to a constant address, followed by a series of NOPs.
virtual ~FastISel()
Definition: FastISel.cpp:1621
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:35
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:94
MVT - Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:65
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
Definition: InstrTypes.h:62
DIExpression * getExpression() const
Definition: IntrinsicInst.h:88
Simple binary floating point operators.
Definition: ISDOpcodes.h:237
BranchInst - Conditional or Unconditional Branch instruction.
This is an important base class in LLVM.
Definition: Constant.h:41
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:352
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:82
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1895
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:233
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:264
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1900
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
MachineFrameInfo & MFI
Definition: FastISel.h:196
Return value is always equal to this argument.
Definition: Attributes.h:103
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1633
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:230
bool SkipTargetIndependentISel
Definition: FastISel.h:205
Pass structure in an alloca.
Definition: Attributes.h:74
op_iterator op_end()
Definition: User.h:185
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:225
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2070
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:108
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:697
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:644
Value * getOperand(unsigned i) const
Definition: User.h:118
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
Zero extended before/after call.
Definition: Attributes.h:119
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:706
arg_iterator arg_begin()
Definition: Function.h:472
Class to represent integer types.
Definition: DerivedTypes.h:37
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:760
const TargetRegisterInfo & TRI
Definition: FastISel.h:203
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:714
static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeSet representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:854
EVT - Extended Value Type.
Definition: ValueTypes.h:31
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1473
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:519
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1409
MachinePointerInfo - This class contains a discriminated union of information about pointers in memor...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:712
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:81
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
const TargetInstrInfo & TII
Definition: FastISel.h:201
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
signed greater than
Definition: InstrTypes.h:724
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:211
The memory access is invariant.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:338
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
Definition: ValueTypes.h:177
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:694
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:701
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:674
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:304
DIExpression * getExpression() const
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1638
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:299
This is the shared class of boolean and integer constants.
Definition: Constants.h:47
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:388
bool paramHasAttr(unsigned i, Attribute::AttrKind A) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:242
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1653
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:711
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
Instruction * user_back()
user_back - Specialize the methods defined in Value, as we know that an instruction can only be used ...
Definition: Instruction.h:69
Provides information about what library functions are available for the current target.
MDNode * getMetadata(unsigned KindID) const
getMetadata - Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:167
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
bool isVolatile() const
isVolatile - Return true if this is a load from a volatile memory location.
Definition: Instructions.h:232
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:204
signed less than
Definition: InstrTypes.h:726
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:548
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
unsigned arg_size() const
Definition: CallSite.h:162
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:458
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1768
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:582
Function * getCalledFunction() const
getCalledFunction - Return the function called, or null if this is an indirect function invocation...
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:296
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:161
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:159
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:96
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:123
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
signed less or equal
Definition: InstrTypes.h:727
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1280
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
Definition: APInt.h:73
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1648
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:83
const DataLayout & DL
Definition: FastISel.h:200
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:382
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getCallPreservedMask - Return a mask of call-preserved registers for the given calling convention on ...
BranchProbabilityInfo * BPI
This file defines the FastISel class.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:383
bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
Definition: MathExtras.h:360
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
DebugLoc DbgLoc
Definition: FastISel.h:198
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool selectCall(const User *Call)
Definition: FastISel.cpp:1047
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1890
SI Fix SGPR Live Ranges
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:364
Representation of each machine instruction.
Definition: MachineInstr.h:51
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1627
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:714
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1452
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:321
MachineRegisterInfo & MRI
Definition: FastISel.h:195
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:111
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:108
unsigned greater or equal
Definition: InstrTypes.h:721
DbgValueInst - This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:876
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
getAAMetadata - Fills the AAMDNodes structure with AA metadata from this instruction.
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:418
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:233
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
unsigned fastEmitInst_rf(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, const ConstantFP *FPImm)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1867
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:311
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:487
iterator end()
Definition: DenseMap.h:68
bool isTailCall() const
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:705
iterator find(const KeyT &Val)
Definition: DenseMap.h:124
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, DebugLoc DL) const
Insert branch code into the end of the specified MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
size_t size() const
Definition: BasicBlock.h:241
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:371
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:709
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:277
const TargetLowering & TLI
Definition: FastISel.h:202
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1717
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool hasLocalLinkage() const
Definition: GlobalValue.h:280
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:172
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1674
unsigned fastEmitInst_ii(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with a two immediate operands.
Definition: FastISel.cpp:1960
unsigned getReg() const
getReg - Returns the register number.
bool use_empty() const
Definition: Value.h:275
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:1979
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:299
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:85
const uint16_t * ImplicitDefs
Definition: MCInstrDesc.h:148
user_iterator user_begin()
Definition: Value.h:294
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1610
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:94
bool isEmptyTy() const
isEmptyTy - Return true if this type is empty, that is, it has no elements or all its elements are em...
Definition: Type.cpp:102
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1623
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
getSubClassWithSubReg - Returns the largest legal sub-class of RC that supports the sub-register inde...
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:700
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
LLVM Value Representation.
Definition: Value.h:69
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:710
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:112
Value * getAddress() const
DbgDeclareInst - This represents the llvm.dbg.declare instruction.
DILocalVariable * getVariable() const
static const Function * getParent(const Value *V)
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:507
const Value * getValue() const
DbgValueInst - This represents the llvm.dbg.value instruction.
#define DEBUG(X)
Definition: Debug.h:92
uint32_t getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get the raw edge weight calculated for the edge.
IterTy arg_begin() const
arg_begin/arg_end - Return iterators corresponding to the actual argument list for a call site...
Definition: CallSite.h:151
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
unsigned greater than
Definition: InstrTypes.h:720
MachineModuleInfo & getMMI() const
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
reg_iterator reg_begin(unsigned RegNo) const
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
Conversion operators.
Definition: ISDOpcodes.h:380
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:193
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:389
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
isLayoutSuccessor - Return true if the specified MBB will be emitted immediately after this block...
void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI)
ComputeUsesVAFloatArgument - Determine if any floating-point values are being passed to this variadic...
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:702
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
DbgDeclareInst - This represents the llvm.dbg.declare instruction.
Definition: IntrinsicInst.h:82
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:506
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
getIntegerVT - Returns the EVT that represents an integer with the given number of bits...
Definition: ValueTypes.h:61
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
addSuccessor - Add succ as a successor of this MachineBasicBlock.
static MachineOperand CreateFI(int Idx)
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
Definition: MathExtras.h:474
const BasicBlock * getParent() const
Definition: Instruction.h:72
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:203
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:699
signed greater or equal
Definition: InstrTypes.h:725
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:37
MachineModuleInfo - This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
isVoidTy - Return true if this is 'void'.
Definition: Type.h:137
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
unsigned InitializeRegForValue(const Value *V)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:315