LLVM  6.0.0svn
FastISel.cpp
Go to the documentation of this file.
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
53 #include "llvm/CodeGen/Analysis.h"
65 #include "llvm/CodeGen/StackMaps.h"
67 #include "llvm/IR/Argument.h"
68 #include "llvm/IR/Attributes.h"
69 #include "llvm/IR/BasicBlock.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/Type.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/MC/MCContext.h"
94 #include "llvm/MC/MCInstrDesc.h"
95 #include "llvm/MC/MCRegisterInfo.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/Debug.h"
106 #include <algorithm>
107 #include <cassert>
108 #include <cstdint>
109 #include <iterator>
110 #include <utility>
111 
112 using namespace llvm;
113 
114 #define DEBUG_TYPE "isel"
115 
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117  "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119  "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121 
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
126 
127  // Instructions are appended to FuncInfo.MBB. If the basic block already
128  // contains labels or copies, use the last instruction as the last local
129  // value.
130  EmitStartPt = nullptr;
131  if (!FuncInfo.MBB->empty())
134 }
135 
138  // Fallback to SDISel argument lowering code to deal with sret pointer
139  // parameter.
140  return false;
141 
142  if (!fastLowerArguments())
143  return false;
144 
145  // Enter arguments into ValueMap for uses in non-entry BBs.
147  E = FuncInfo.Fn->arg_end();
148  I != E; ++I) {
150  assert(VI != LocalValueMap.end() && "Missed an argument?");
151  FuncInfo.ValueMap[&*I] = VI->second;
152  }
153  return true;
154 }
155 
156 void FastISel::flushLocalValueMap() {
160  SavedInsertPt = FuncInfo.InsertPt;
161 }
162 
164  // Don't consider constants or arguments to have trivial kills.
165  const Instruction *I = dyn_cast<Instruction>(V);
166  if (!I)
167  return false;
168 
169  // No-op casts are trivially coalesced by fast-isel.
170  if (const auto *Cast = dyn_cast<CastInst>(I))
171  if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
172  !hasTrivialKill(Cast->getOperand(0)))
173  return false;
174 
175  // Even the value might have only one use in the LLVM IR, it is possible that
176  // FastISel might fold the use into another instruction and now there is more
177  // than one use at the Machine Instruction level.
178  unsigned Reg = lookUpRegForValue(V);
179  if (Reg && !MRI.use_empty(Reg))
180  return false;
181 
182  // GEPs with all zero indices are trivially coalesced by fast-isel.
183  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
184  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
185  return false;
186 
187  // Only instructions with a single use in the same basic block are considered
188  // to have trivial kills.
189  return I->hasOneUse() &&
190  !(I->getOpcode() == Instruction::BitCast ||
191  I->getOpcode() == Instruction::PtrToInt ||
192  I->getOpcode() == Instruction::IntToPtr) &&
193  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
194 }
195 
196 unsigned FastISel::getRegForValue(const Value *V) {
197  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
198  // Don't handle non-simple values in FastISel.
199  if (!RealVT.isSimple())
200  return 0;
201 
202  // Ignore illegal types. We must do this before looking up the value
203  // in ValueMap because Arguments are given virtual registers regardless
204  // of whether FastISel can handle them.
205  MVT VT = RealVT.getSimpleVT();
206  if (!TLI.isTypeLegal(VT)) {
207  // Handle integer promotions, though, because they're common and easy.
208  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
209  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
210  else
211  return 0;
212  }
213 
214  // Look up the value to see if we already have a register for it.
215  unsigned Reg = lookUpRegForValue(V);
216  if (Reg)
217  return Reg;
218 
219  // In bottom-up mode, just create the virtual register which will be used
220  // to hold the value. It will be materialized later.
221  if (isa<Instruction>(V) &&
222  (!isa<AllocaInst>(V) ||
223  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
225 
226  SavePoint SaveInsertPt = enterLocalValueArea();
227 
228  // Materialize the value in a register. Emit any instructions in the
229  // local value area.
230  Reg = materializeRegForValue(V, VT);
231 
232  leaveLocalValueArea(SaveInsertPt);
233 
234  return Reg;
235 }
236 
237 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
238  unsigned Reg = 0;
239  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
240  if (CI->getValue().getActiveBits() <= 64)
241  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
242  } else if (isa<AllocaInst>(V))
243  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
244  else if (isa<ConstantPointerNull>(V))
245  // Translate this as an integer zero so that it can be
246  // local-CSE'd with actual integer zeros.
247  Reg = getRegForValue(
249  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
250  if (CF->isNullValue())
251  Reg = fastMaterializeFloatZero(CF);
252  else
253  // Try to emit the constant directly.
254  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
255 
256  if (!Reg) {
257  // Try to emit the constant by using an integer constant with a cast.
258  const APFloat &Flt = CF->getValueAPF();
259  EVT IntVT = TLI.getPointerTy(DL);
260  uint32_t IntBitWidth = IntVT.getSizeInBits();
261  APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
262  bool isExact;
263  (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
264  if (isExact) {
265  unsigned IntegerReg =
267  if (IntegerReg != 0)
268  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
269  /*Kill=*/false);
270  }
271  }
272  } else if (const auto *Op = dyn_cast<Operator>(V)) {
273  if (!selectOperator(Op, Op->getOpcode()))
274  if (!isa<Instruction>(Op) ||
275  !fastSelectInstruction(cast<Instruction>(Op)))
276  return 0;
277  Reg = lookUpRegForValue(Op);
278  } else if (isa<UndefValue>(V)) {
281  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
282  }
283  return Reg;
284 }
285 
286 /// Helper for getRegForValue. This function is called when the value isn't
287 /// already available in a register and must be materialized with new
288 /// instructions.
289 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
290  unsigned Reg = 0;
291  // Give the target-specific code a try first.
292  if (isa<Constant>(V))
293  Reg = fastMaterializeConstant(cast<Constant>(V));
294 
295  // If target-specific code couldn't or didn't want to handle the value, then
296  // give target-independent code a try.
297  if (!Reg)
298  Reg = materializeConstant(V, VT);
299 
300  // Don't cache constant materializations in the general ValueMap.
301  // To do so would require tracking what uses they dominate.
302  if (Reg) {
303  LocalValueMap[V] = Reg;
305  }
306  return Reg;
307 }
308 
309 unsigned FastISel::lookUpRegForValue(const Value *V) {
310  // Look up the value to see if we already have a register for it. We
311  // cache values defined by Instructions across blocks, and other values
312  // only locally. This is because Instructions already have the SSA
313  // def-dominates-use requirement enforced.
315  if (I != FuncInfo.ValueMap.end())
316  return I->second;
317  return LocalValueMap[V];
318 }
319 
320 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
321  if (!isa<Instruction>(I)) {
322  LocalValueMap[I] = Reg;
323  return;
324  }
325 
326  unsigned &AssignedReg = FuncInfo.ValueMap[I];
327  if (AssignedReg == 0)
328  // Use the new register.
329  AssignedReg = Reg;
330  else if (Reg != AssignedReg) {
331  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
332  for (unsigned i = 0; i < NumRegs; i++)
333  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
334 
335  AssignedReg = Reg;
336  }
337 }
338 
339 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
340  unsigned IdxN = getRegForValue(Idx);
341  if (IdxN == 0)
342  // Unhandled operand. Halt "fast" selection and bail.
343  return std::pair<unsigned, bool>(0, false);
344 
345  bool IdxNIsKill = hasTrivialKill(Idx);
346 
347  // If the index is smaller or larger than intptr_t, truncate or extend it.
348  MVT PtrVT = TLI.getPointerTy(DL);
349  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
350  if (IdxVT.bitsLT(PtrVT)) {
351  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
352  IdxNIsKill);
353  IdxNIsKill = true;
354  } else if (IdxVT.bitsGT(PtrVT)) {
355  IdxN =
356  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
357  IdxNIsKill = true;
358  }
359  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
360 }
361 
363  if (getLastLocalValue()) {
365  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
366  ++FuncInfo.InsertPt;
367  } else
369 
370  // Now skip past any EH_LABELs, which must remain at the beginning.
371  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
372  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
373  ++FuncInfo.InsertPt;
374 }
375 
378  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
379  "Invalid iterator!");
380  while (I != E) {
381  MachineInstr *Dead = &*I;
382  ++I;
383  Dead->eraseFromParent();
384  ++NumFastIselDead;
385  }
387 }
388 
391  DebugLoc OldDL = DbgLoc;
393  DbgLoc = DebugLoc();
394  SavePoint SP = {OldInsertPt, OldDL};
395  return SP;
396 }
397 
399  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
400  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
401 
402  // Restore the previous insert position.
403  FuncInfo.InsertPt = OldInsertPt.InsertPt;
404  DbgLoc = OldInsertPt.DL;
405 }
406 
407 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
408  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
409  if (VT == MVT::Other || !VT.isSimple())
410  // Unhandled type. Halt "fast" selection and bail.
411  return false;
412 
413  // We only handle legal types. For example, on x86-32 the instruction
414  // selector contains all of the 64-bit instructions from x86-64,
415  // under the assumption that i64 won't be used if the target doesn't
416  // support it.
417  if (!TLI.isTypeLegal(VT)) {
418  // MVT::i1 is special. Allow AND, OR, or XOR because they
419  // don't require additional zeroing, which makes them easy.
420  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
421  ISDOpcode == ISD::XOR))
422  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
423  else
424  return false;
425  }
426 
427  // Check if the first operand is a constant, and handle it as "ri". At -O0,
428  // we don't have anything that canonicalizes operand order.
429  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
430  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
431  unsigned Op1 = getRegForValue(I->getOperand(1));
432  if (!Op1)
433  return false;
434  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
435 
436  unsigned ResultReg =
437  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
438  CI->getZExtValue(), VT.getSimpleVT());
439  if (!ResultReg)
440  return false;
441 
442  // We successfully emitted code for the given LLVM Instruction.
443  updateValueMap(I, ResultReg);
444  return true;
445  }
446 
447  unsigned Op0 = getRegForValue(I->getOperand(0));
448  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
449  return false;
450  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
451 
452  // Check if the second operand is a constant and handle it appropriately.
453  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
454  uint64_t Imm = CI->getSExtValue();
455 
456  // Transform "sdiv exact X, 8" -> "sra X, 3".
457  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
458  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
459  Imm = Log2_64(Imm);
460  ISDOpcode = ISD::SRA;
461  }
462 
463  // Transform "urem x, pow2" -> "and x, pow2-1".
464  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
465  isPowerOf2_64(Imm)) {
466  --Imm;
467  ISDOpcode = ISD::AND;
468  }
469 
470  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
471  Op0IsKill, Imm, VT.getSimpleVT());
472  if (!ResultReg)
473  return false;
474 
475  // We successfully emitted code for the given LLVM Instruction.
476  updateValueMap(I, ResultReg);
477  return true;
478  }
479 
480  unsigned Op1 = getRegForValue(I->getOperand(1));
481  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
482  return false;
483  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
484 
485  // Now we have both operands in registers. Emit the instruction.
486  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
487  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
488  if (!ResultReg)
489  // Target-specific code wasn't able to find a machine opcode for
490  // the given ISD opcode and type. Halt "fast" selection and bail.
491  return false;
492 
493  // We successfully emitted code for the given LLVM Instruction.
494  updateValueMap(I, ResultReg);
495  return true;
496 }
497 
499  unsigned N = getRegForValue(I->getOperand(0));
500  if (!N) // Unhandled operand. Halt "fast" selection and bail.
501  return false;
502  bool NIsKill = hasTrivialKill(I->getOperand(0));
503 
504  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
505  // into a single N = N + TotalOffset.
506  uint64_t TotalOffs = 0;
507  // FIXME: What's a good SWAG number for MaxOffs?
508  uint64_t MaxOffs = 2048;
509  MVT VT = TLI.getPointerTy(DL);
510  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
511  GTI != E; ++GTI) {
512  const Value *Idx = GTI.getOperand();
513  if (StructType *StTy = GTI.getStructTypeOrNull()) {
514  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
515  if (Field) {
516  // N = N + Offset
517  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
518  if (TotalOffs >= MaxOffs) {
519  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
520  if (!N) // Unhandled operand. Halt "fast" selection and bail.
521  return false;
522  NIsKill = true;
523  TotalOffs = 0;
524  }
525  }
526  } else {
527  Type *Ty = GTI.getIndexedType();
528 
529  // If this is a constant subscript, handle it quickly.
530  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
531  if (CI->isZero())
532  continue;
533  // N = N + Offset
534  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
535  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
536  if (TotalOffs >= MaxOffs) {
537  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
538  if (!N) // Unhandled operand. Halt "fast" selection and bail.
539  return false;
540  NIsKill = true;
541  TotalOffs = 0;
542  }
543  continue;
544  }
545  if (TotalOffs) {
546  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
547  if (!N) // Unhandled operand. Halt "fast" selection and bail.
548  return false;
549  NIsKill = true;
550  TotalOffs = 0;
551  }
552 
553  // N = N + Idx * ElementSize;
554  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
555  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
556  unsigned IdxN = Pair.first;
557  bool IdxNIsKill = Pair.second;
558  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
559  return false;
560 
561  if (ElementSize != 1) {
562  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
563  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
564  return false;
565  IdxNIsKill = true;
566  }
567  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
568  if (!N) // Unhandled operand. Halt "fast" selection and bail.
569  return false;
570  }
571  }
572  if (TotalOffs) {
573  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
574  if (!N) // Unhandled operand. Halt "fast" selection and bail.
575  return false;
576  }
577 
578  // We successfully emitted code for the given LLVM Instruction.
579  updateValueMap(I, N);
580  return true;
581 }
582 
583 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
584  const CallInst *CI, unsigned StartIdx) {
585  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
586  Value *Val = CI->getArgOperand(i);
587  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
588  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
589  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
590  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
591  } else if (isa<ConstantPointerNull>(Val)) {
592  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
594  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
595  // Values coming from a stack location also require a special encoding,
596  // but that is added later on by the target specific frame index
597  // elimination implementation.
598  auto SI = FuncInfo.StaticAllocaMap.find(AI);
599  if (SI != FuncInfo.StaticAllocaMap.end())
600  Ops.push_back(MachineOperand::CreateFI(SI->second));
601  else
602  return false;
603  } else {
604  unsigned Reg = getRegForValue(Val);
605  if (!Reg)
606  return false;
607  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
608  }
609  }
610  return true;
611 }
612 
614  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
615  // [live variables...])
617  "Stackmap cannot return a value.");
618 
619  // The stackmap intrinsic only records the live variables (the arguments
620  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
621  // intrinsic, this won't be lowered to a function call. This means we don't
622  // have to worry about calling conventions and target-specific lowering code.
623  // Instead we perform the call lowering right here.
624  //
625  // CALLSEQ_START(0, 0...)
626  // STACKMAP(id, nbytes, ...)
627  // CALLSEQ_END(0, 0)
628  //
630 
631  // Add the <id> and <numBytes> constants.
632  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
633  "Expected a constant integer.");
634  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
635  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
636 
637  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
638  "Expected a constant integer.");
639  const auto *NumBytes =
640  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
641  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
642 
643  // Push live variables for the stack map (skipping the first two arguments
644  // <id> and <numBytes>).
645  if (!addStackMapLiveVars(Ops, I, 2))
646  return false;
647 
648  // We are not adding any register mask info here, because the stackmap doesn't
649  // clobber anything.
650 
651  // Add scratch registers as implicit def and early clobber.
653  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
654  for (unsigned i = 0; ScratchRegs[i]; ++i)
655  Ops.push_back(MachineOperand::CreateReg(
656  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
657  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
658 
659  // Issue CALLSEQ_START
660  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
661  auto Builder =
662  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
663  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
664  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
665  Builder.addImm(0);
666 
667  // Issue STACKMAP.
669  TII.get(TargetOpcode::STACKMAP));
670  for (auto const &MO : Ops)
671  MIB.add(MO);
672 
673  // Issue CALLSEQ_END
674  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
675  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
676  .addImm(0)
677  .addImm(0);
678 
679  // Inform the Frame Information that we have a stackmap in this function.
681 
682  return true;
683 }
684 
685 /// \brief Lower an argument list according to the target calling convention.
686 ///
687 /// This is a helper for lowering intrinsics that follow a target calling
688 /// convention or require stack pointer adjustment. Only a subset of the
689 /// intrinsic's operands need to participate in the calling convention.
690 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
691  unsigned NumArgs, const Value *Callee,
692  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
693  ArgListTy Args;
694  Args.reserve(NumArgs);
695 
696  // Populate the argument list.
697  ImmutableCallSite CS(CI);
698  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
699  Value *V = CI->getOperand(ArgI);
700 
701  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
702 
703  ArgListEntry Entry;
704  Entry.Val = V;
705  Entry.Ty = V->getType();
706  Entry.setAttributes(&CS, ArgIdx);
707  Args.push_back(Entry);
708  }
709 
710  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
711  : CI->getType();
712  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
713 
714  return lowerCallTo(CLI);
715 }
716 
718  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
719  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
720  SmallString<32> MangledName;
721  Mangler::getNameWithPrefix(MangledName, Target, DL);
722  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
723  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
724 }
725 
727  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
728  // i32 <numBytes>,
729  // i8* <target>,
730  // i32 <numArgs>,
731  // [Args...],
732  // [live variables...])
734  bool IsAnyRegCC = CC == CallingConv::AnyReg;
735  bool HasDef = !I->getType()->isVoidTy();
737 
738  // Get the real number of arguments participating in the call <numArgs>
739  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
740  "Expected a constant integer.");
741  const auto *NumArgsVal =
742  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
743  unsigned NumArgs = NumArgsVal->getZExtValue();
744 
745  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
746  // This includes all meta-operands up to but not including CC.
747  unsigned NumMetaOpers = PatchPointOpers::CCPos;
748  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
749  "Not enough arguments provided to the patchpoint intrinsic");
750 
751  // For AnyRegCC the arguments are lowered later on manually.
752  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
753  CallLoweringInfo CLI;
754  CLI.setIsPatchPoint();
755  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
756  return false;
757 
758  assert(CLI.Call && "No call instruction specified.");
759 
761 
762  // Add an explicit result reg if we use the anyreg calling convention.
763  if (IsAnyRegCC && HasDef) {
764  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
766  CLI.NumResultRegs = 1;
767  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
768  }
769 
770  // Add the <id> and <numBytes> constants.
771  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
772  "Expected a constant integer.");
773  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
774  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
775 
776  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
777  "Expected a constant integer.");
778  const auto *NumBytes =
779  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
780  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
781 
782  // Add the call target.
783  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
784  uint64_t CalleeConstAddr =
785  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
786  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
787  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
788  if (C->getOpcode() == Instruction::IntToPtr) {
789  uint64_t CalleeConstAddr =
790  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
791  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
792  } else
793  llvm_unreachable("Unsupported ConstantExpr.");
794  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
796  } else if (isa<ConstantPointerNull>(Callee))
798  else
799  llvm_unreachable("Unsupported callee address.");
800 
801  // Adjust <numArgs> to account for any arguments that have been passed on
802  // the stack instead.
803  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
804  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
805 
806  // Add the calling convention
807  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
808 
809  // Add the arguments we omitted previously. The register allocator should
810  // place these in any free register.
811  if (IsAnyRegCC) {
812  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
813  unsigned Reg = getRegForValue(I->getArgOperand(i));
814  if (!Reg)
815  return false;
816  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
817  }
818  }
819 
820  // Push the arguments from the call instruction.
821  for (auto Reg : CLI.OutRegs)
822  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
823 
824  // Push live variables for the stack map.
825  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
826  return false;
827 
828  // Push the register mask info.
831 
832  // Add scratch registers as implicit def and early clobber.
833  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
834  for (unsigned i = 0; ScratchRegs[i]; ++i)
836  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
837  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
838 
839  // Add implicit defs (return values).
840  for (auto Reg : CLI.InRegs)
841  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
842  /*IsImpl=*/true));
843 
844  // Insert the patchpoint instruction before the call generated by the target.
846  TII.get(TargetOpcode::PATCHPOINT));
847 
848  for (auto &MO : Ops)
849  MIB.add(MO);
850 
851  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
852 
853  // Delete the original call instruction.
854  CLI.Call->eraseFromParent();
855 
856  // Inform the Frame Information that we have a patchpoint in this function.
858 
859  if (CLI.NumResultRegs)
861  return true;
862 }
863 
865  const auto &Triple = TM.getTargetTriple();
867  return true; // don't do anything to this instruction.
870  /*IsDef=*/false));
872  /*IsDef=*/false));
873  MachineInstrBuilder MIB =
875  TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
876  for (auto &MO : Ops)
877  MIB.add(MO);
878  // Insert the Patchable Event Call instruction, that gets lowered properly.
879  return true;
880 }
881 
882 
883 /// Returns an AttributeList representing the attributes applied to the return
884 /// value of the given call.
887  if (CLI.RetSExt)
888  Attrs.push_back(Attribute::SExt);
889  if (CLI.RetZExt)
890  Attrs.push_back(Attribute::ZExt);
891  if (CLI.IsInReg)
892  Attrs.push_back(Attribute::InReg);
893 
895  Attrs);
896 }
897 
898 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
899  unsigned NumArgs) {
900  MCContext &Ctx = MF->getContext();
901  SmallString<32> MangledName;
902  Mangler::getNameWithPrefix(MangledName, SymName, DL);
903  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
904  return lowerCallTo(CI, Sym, NumArgs);
905 }
906 
908  unsigned NumArgs) {
909  ImmutableCallSite CS(CI);
910 
911  FunctionType *FTy = CS.getFunctionType();
912  Type *RetTy = CS.getType();
913 
914  ArgListTy Args;
915  Args.reserve(NumArgs);
916 
917  // Populate the argument list.
918  // Attributes for args start at offset 1, after the return attribute.
919  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
920  Value *V = CI->getOperand(ArgI);
921 
922  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
923 
924  ArgListEntry Entry;
925  Entry.Val = V;
926  Entry.Ty = V->getType();
927  Entry.setAttributes(&CS, ArgI);
928  Args.push_back(Entry);
929  }
931 
932  CallLoweringInfo CLI;
933  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
934 
935  return lowerCallTo(CLI);
936 }
937 
939  // Handle the incoming return values from the call.
940  CLI.clearIns();
941  SmallVector<EVT, 4> RetTys;
942  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
943 
945  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
946 
947  bool CanLowerReturn = TLI.CanLowerReturn(
948  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
949 
950  // FIXME: sret demotion isn't supported yet - bail out.
951  if (!CanLowerReturn)
952  return false;
953 
954  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
955  EVT VT = RetTys[I];
956  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
957  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
958  for (unsigned i = 0; i != NumRegs; ++i) {
959  ISD::InputArg MyFlags;
960  MyFlags.VT = RegisterVT;
961  MyFlags.ArgVT = VT;
962  MyFlags.Used = CLI.IsReturnValueUsed;
963  if (CLI.RetSExt)
964  MyFlags.Flags.setSExt();
965  if (CLI.RetZExt)
966  MyFlags.Flags.setZExt();
967  if (CLI.IsInReg)
968  MyFlags.Flags.setInReg();
969  CLI.Ins.push_back(MyFlags);
970  }
971  }
972 
973  // Handle all of the outgoing arguments.
974  CLI.clearOuts();
975  for (auto &Arg : CLI.getArgs()) {
976  Type *FinalType = Arg.Ty;
977  if (Arg.IsByVal)
978  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
979  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
980  FinalType, CLI.CallConv, CLI.IsVarArg);
981 
983  if (Arg.IsZExt)
984  Flags.setZExt();
985  if (Arg.IsSExt)
986  Flags.setSExt();
987  if (Arg.IsInReg)
988  Flags.setInReg();
989  if (Arg.IsSRet)
990  Flags.setSRet();
991  if (Arg.IsSwiftSelf)
992  Flags.setSwiftSelf();
993  if (Arg.IsSwiftError)
994  Flags.setSwiftError();
995  if (Arg.IsByVal)
996  Flags.setByVal();
997  if (Arg.IsInAlloca) {
998  Flags.setInAlloca();
999  // Set the byval flag for CCAssignFn callbacks that don't know about
1000  // inalloca. This way we can know how many bytes we should've allocated
1001  // and how many bytes a callee cleanup function will pop. If we port
1002  // inalloca to more targets, we'll have to add custom inalloca handling in
1003  // the various CC lowering callbacks.
1004  Flags.setByVal();
1005  }
1006  if (Arg.IsByVal || Arg.IsInAlloca) {
1007  PointerType *Ty = cast<PointerType>(Arg.Ty);
1008  Type *ElementTy = Ty->getElementType();
1009  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1010  // For ByVal, alignment should come from FE. BE will guess if this info is
1011  // not there, but there are cases it cannot get right.
1012  unsigned FrameAlign = Arg.Alignment;
1013  if (!FrameAlign)
1014  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1015  Flags.setByValSize(FrameSize);
1016  Flags.setByValAlign(FrameAlign);
1017  }
1018  if (Arg.IsNest)
1019  Flags.setNest();
1020  if (NeedsRegBlock)
1021  Flags.setInConsecutiveRegs();
1022  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1023  Flags.setOrigAlign(OriginalAlignment);
1024 
1025  CLI.OutVals.push_back(Arg.Val);
1026  CLI.OutFlags.push_back(Flags);
1027  }
1028 
1029  if (!fastLowerCall(CLI))
1030  return false;
1031 
1032  // Set all unused physreg defs as dead.
1033  assert(CLI.Call && "No call instruction specified.");
1034  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1035 
1036  if (CLI.NumResultRegs && CLI.CS)
1038 
1039  return true;
1040 }
1041 
1043  ImmutableCallSite CS(CI);
1044 
1045  FunctionType *FuncTy = CS.getFunctionType();
1046  Type *RetTy = CS.getType();
1047 
1048  ArgListTy Args;
1049  ArgListEntry Entry;
1050  Args.reserve(CS.arg_size());
1051 
1052  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1053  i != e; ++i) {
1054  Value *V = *i;
1055 
1056  // Skip empty types
1057  if (V->getType()->isEmptyTy())
1058  continue;
1059 
1060  Entry.Val = V;
1061  Entry.Ty = V->getType();
1062 
1063  // Skip the first return-type Attribute to get to params.
1064  Entry.setAttributes(&CS, i - CS.arg_begin());
1065  Args.push_back(Entry);
1066  }
1067 
1068  // Check if target-independent constraints permit a tail call here.
1069  // Target-dependent constraints are checked within fastLowerCall.
1070  bool IsTailCall = CI->isTailCall();
1071  if (IsTailCall && !isInTailCallPosition(CS, TM))
1072  IsTailCall = false;
1073 
1074  CallLoweringInfo CLI;
1075  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1076  .setTailCall(IsTailCall);
1077 
1078  return lowerCallTo(CLI);
1079 }
1080 
1082  const CallInst *Call = cast<CallInst>(I);
1083 
1084  // Handle simple inline asms.
1085  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1086  // If the inline asm has side effects, then make sure that no local value
1087  // lives across by flushing the local value map.
1088  if (IA->hasSideEffects())
1089  flushLocalValueMap();
1090 
1091  // Don't attempt to handle constraints.
1092  if (!IA->getConstraintString().empty())
1093  return false;
1094 
1095  unsigned ExtraInfo = 0;
1096  if (IA->hasSideEffects())
1097  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1098  if (IA->isAlignStack())
1099  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1100 
1103  .addExternalSymbol(IA->getAsmString().c_str())
1104  .addImm(ExtraInfo);
1105  return true;
1106  }
1107 
1108  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1109  computeUsesVAFloatArgument(*Call, MMI);
1110 
1111  // Handle intrinsic function calls.
1112  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1113  return selectIntrinsicCall(II);
1114 
1115  // Usually, it does not make sense to initialize a value,
1116  // make an unrelated function call and use the value, because
1117  // it tends to be spilled on the stack. So, we move the pointer
1118  // to the last local value to the beginning of the block, so that
1119  // all the values which have already been materialized,
1120  // appear after the call. It also makes sense to skip intrinsics
1121  // since they tend to be inlined.
1122  flushLocalValueMap();
1123 
1124  return lowerCall(Call);
1125 }
1126 
1128  switch (II->getIntrinsicID()) {
1129  default:
1130  break;
1131  // At -O0 we don't care about the lifetime intrinsics.
1132  case Intrinsic::lifetime_start:
1133  case Intrinsic::lifetime_end:
1134  // The donothing intrinsic does, well, nothing.
1135  case Intrinsic::donothing:
1136  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1137  case Intrinsic::assume:
1138  return true;
1139  case Intrinsic::dbg_declare: {
1140  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1141  assert(DI->getVariable() && "Missing variable");
1142  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1143  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1144  return true;
1145  }
1146 
1147  const Value *Address = DI->getAddress();
1148  if (!Address || isa<UndefValue>(Address)) {
1149  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1150  return true;
1151  }
1152 
1153  // Byval arguments with frame indices were already handled after argument
1154  // lowering and before isel.
1155  const auto *Arg =
1157  if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1158  return true;
1159 
1161  if (unsigned Reg = lookUpRegForValue(Address))
1162  Op = MachineOperand::CreateReg(Reg, false);
1163 
1164  // If we have a VLA that has a "use" in a metadata node that's then used
1165  // here but it has no other uses, then we have a problem. E.g.,
1166  //
1167  // int foo (const int *x) {
1168  // char a[*x];
1169  // return 0;
1170  // }
1171  //
1172  // If we assign 'a' a vreg and fast isel later on has to use the selection
1173  // DAG isel, it will want to copy the value to the vreg. However, there are
1174  // no uses, which goes counter to what selection DAG isel expects.
1175  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1176  (!isa<AllocaInst>(Address) ||
1177  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1179  false);
1180 
1181  if (Op) {
1183  "Expected inlined-at fields to agree");
1184  if (Op->isReg()) {
1185  Op->setIsDebug(true);
1186  // A dbg.declare describes the address of a source variable, so lower it
1187  // into an indirect DBG_VALUE.
1189  TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1190  Op->getReg(), 0, DI->getVariable(), DI->getExpression());
1191  } else
1193  TII.get(TargetOpcode::DBG_VALUE))
1194  .add(*Op)
1195  .addImm(0)
1196  .addMetadata(DI->getVariable())
1197  .addMetadata(DI->getExpression());
1198  } else {
1199  // We can't yet handle anything else here because it would require
1200  // generating code, thus altering codegen because of debug info.
1201  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1202  }
1203  return true;
1204  }
1205  case Intrinsic::dbg_value: {
1206  // This form of DBG_VALUE is target-independent.
1207  const DbgValueInst *DI = cast<DbgValueInst>(II);
1208  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1209  const Value *V = DI->getValue();
1211  "Expected inlined-at fields to agree");
1212  if (!V) {
1213  // Currently the optimizer can produce this; insert an undef to
1214  // help debugging. Probably the optimizer should not do this.
1216  .addReg(0U)
1217  .addImm(DI->getOffset())
1218  .addMetadata(DI->getVariable())
1219  .addMetadata(DI->getExpression());
1220  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1221  if (CI->getBitWidth() > 64)
1223  .addCImm(CI)
1224  .addImm(DI->getOffset())
1225  .addMetadata(DI->getVariable())
1226  .addMetadata(DI->getExpression());
1227  else
1229  .addImm(CI->getZExtValue())
1230  .addImm(DI->getOffset())
1231  .addMetadata(DI->getVariable())
1232  .addMetadata(DI->getExpression());
1233  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1235  .addFPImm(CF)
1236  .addImm(DI->getOffset())
1237  .addMetadata(DI->getVariable())
1238  .addMetadata(DI->getExpression());
1239  } else if (unsigned Reg = lookUpRegForValue(V)) {
1240  // FIXME: This does not handle register-indirect values at offset 0.
1241  bool IsIndirect = DI->getOffset() != 0;
1242  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1243  DI->getOffset(), DI->getVariable(), DI->getExpression());
1244  } else {
1245  // We can't yet handle anything else here because it would require
1246  // generating code, thus altering codegen because of debug info.
1247  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1248  }
1249  return true;
1250  }
1251  case Intrinsic::objectsize: {
1252  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1253  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1254  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1255  unsigned ResultReg = getRegForValue(ResCI);
1256  if (!ResultReg)
1257  return false;
1258  updateValueMap(II, ResultReg);
1259  return true;
1260  }
1261  case Intrinsic::invariant_group_barrier:
1262  case Intrinsic::expect: {
1263  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1264  if (!ResultReg)
1265  return false;
1266  updateValueMap(II, ResultReg);
1267  return true;
1268  }
1269  case Intrinsic::experimental_stackmap:
1270  return selectStackmap(II);
1271  case Intrinsic::experimental_patchpoint_void:
1272  case Intrinsic::experimental_patchpoint_i64:
1273  return selectPatchpoint(II);
1274 
1275  case Intrinsic::xray_customevent:
1276  return selectXRayCustomEvent(II);
1277  }
1278 
1279  return fastLowerIntrinsicCall(II);
1280 }
1281 
1282 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1283  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1284  EVT DstVT = TLI.getValueType(DL, I->getType());
1285 
1286  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1287  !DstVT.isSimple())
1288  // Unhandled type. Halt "fast" selection and bail.
1289  return false;
1290 
1291  // Check if the destination type is legal.
1292  if (!TLI.isTypeLegal(DstVT))
1293  return false;
1294 
1295  // Check if the source operand is legal.
1296  if (!TLI.isTypeLegal(SrcVT))
1297  return false;
1298 
1299  unsigned InputReg = getRegForValue(I->getOperand(0));
1300  if (!InputReg)
1301  // Unhandled operand. Halt "fast" selection and bail.
1302  return false;
1303 
1304  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1305 
1306  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1307  Opcode, InputReg, InputRegIsKill);
1308  if (!ResultReg)
1309  return false;
1310 
1311  updateValueMap(I, ResultReg);
1312  return true;
1313 }
1314 
1316  // If the bitcast doesn't change the type, just use the operand value.
1317  if (I->getType() == I->getOperand(0)->getType()) {
1318  unsigned Reg = getRegForValue(I->getOperand(0));
1319  if (!Reg)
1320  return false;
1321  updateValueMap(I, Reg);
1322  return true;
1323  }
1324 
1325  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1326  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1327  EVT DstEVT = TLI.getValueType(DL, I->getType());
1328  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1329  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1330  // Unhandled type. Halt "fast" selection and bail.
1331  return false;
1332 
1333  MVT SrcVT = SrcEVT.getSimpleVT();
1334  MVT DstVT = DstEVT.getSimpleVT();
1335  unsigned Op0 = getRegForValue(I->getOperand(0));
1336  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1337  return false;
1338  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1339 
1340  // First, try to perform the bitcast by inserting a reg-reg copy.
1341  unsigned ResultReg = 0;
1342  if (SrcVT == DstVT) {
1343  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1344  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1345  // Don't attempt a cross-class copy. It will likely fail.
1346  if (SrcClass == DstClass) {
1347  ResultReg = createResultReg(DstClass);
1349  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1350  }
1351  }
1352 
1353  // If the reg-reg copy failed, select a BITCAST opcode.
1354  if (!ResultReg)
1355  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1356 
1357  if (!ResultReg)
1358  return false;
1359 
1360  updateValueMap(I, ResultReg);
1361  return true;
1362 }
1363 
1364 // Remove local value instructions starting from the instruction after
1365 // SavedLastLocalValue to the current function insert point.
1366 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1367 {
1368  MachineInstr *CurLastLocalValue = getLastLocalValue();
1369  if (CurLastLocalValue != SavedLastLocalValue) {
1370  // Find the first local value instruction to be deleted.
1371  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1372  // Otherwise it's the first instruction in the block.
1373  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1374  if (SavedLastLocalValue)
1375  ++FirstDeadInst;
1376  else
1377  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1378  setLastLocalValue(SavedLastLocalValue);
1379  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1380  }
1381 }
1382 
1384  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1385  // Just before the terminator instruction, insert instructions to
1386  // feed PHI nodes in successor blocks.
1387  if (isa<TerminatorInst>(I)) {
1388  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1389  // PHI node handling may have generated local value instructions,
1390  // even though it failed to handle all PHI nodes.
1391  // We remove these instructions because SelectionDAGISel will generate
1392  // them again.
1393  removeDeadLocalValueCode(SavedLastLocalValue);
1394  return false;
1395  }
1396  }
1397 
1398  // FastISel does not handle any operand bundles except OB_funclet.
1400  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1401  if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1402  return false;
1403 
1404  DbgLoc = I->getDebugLoc();
1405 
1406  SavedInsertPt = FuncInfo.InsertPt;
1407 
1408  if (const auto *Call = dyn_cast<CallInst>(I)) {
1409  const Function *F = Call->getCalledFunction();
1410  LibFunc Func;
1411 
1412  // As a special case, don't handle calls to builtin library functions that
1413  // may be translated directly to target instructions.
1414  if (F && !F->hasLocalLinkage() && F->hasName() &&
1415  LibInfo->getLibFunc(F->getName(), Func) &&
1417  return false;
1418 
1419  // Don't handle Intrinsic::trap if a trap function is specified.
1420  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1421  Call->hasFnAttr("trap-func-name"))
1422  return false;
1423  }
1424 
1425  // First, try doing target-independent selection.
1427  if (selectOperator(I, I->getOpcode())) {
1428  ++NumFastIselSuccessIndependent;
1429  DbgLoc = DebugLoc();
1430  return true;
1431  }
1432  // Remove dead code.
1434  if (SavedInsertPt != FuncInfo.InsertPt)
1435  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1436  SavedInsertPt = FuncInfo.InsertPt;
1437  }
1438  // Next, try calling the target to attempt to handle the instruction.
1439  if (fastSelectInstruction(I)) {
1440  ++NumFastIselSuccessTarget;
1441  DbgLoc = DebugLoc();
1442  return true;
1443  }
1444  // Remove dead code.
1446  if (SavedInsertPt != FuncInfo.InsertPt)
1447  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1448 
1449  DbgLoc = DebugLoc();
1450  // Undo phi node updates, because they will be added again by SelectionDAG.
1451  if (isa<TerminatorInst>(I)) {
1452  // PHI node handling may have generated local value instructions.
1453  // We remove them because SelectionDAGISel will generate them again.
1454  removeDeadLocalValueCode(SavedLastLocalValue);
1456  }
1457  return false;
1458 }
1459 
1460 /// Emit an unconditional branch to the given block, unless it is the immediate
1461 /// (fall-through) successor, and update the CFG.
1463  const DebugLoc &DbgLoc) {
1464  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1465  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1466  // For more accurate line information if this is the only instruction
1467  // in the block then emit it, otherwise we have the unconditional
1468  // fall-through case, which needs no instructions.
1469  } else {
1470  // The unconditional branch case.
1471  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1472  SmallVector<MachineOperand, 0>(), DbgLoc);
1473  }
1474  if (FuncInfo.BPI) {
1476  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1478  } else
1480 }
1481 
1483  MachineBasicBlock *TrueMBB,
1484  MachineBasicBlock *FalseMBB) {
1485  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1486  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1487  // successor/predecessor lists.
1488  if (TrueMBB != FalseMBB) {
1489  if (FuncInfo.BPI) {
1490  auto BranchProbability =
1491  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1493  } else
1495  }
1496 
1497  fastEmitBranch(FalseMBB, DbgLoc);
1498 }
1499 
1500 /// Emit an FNeg operation.
1502  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1503  if (!OpReg)
1504  return false;
1505  bool OpRegIsKill = hasTrivialKill(I);
1506 
1507  // If the target has ISD::FNEG, use it.
1508  EVT VT = TLI.getValueType(DL, I->getType());
1509  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1510  OpReg, OpRegIsKill);
1511  if (ResultReg) {
1512  updateValueMap(I, ResultReg);
1513  return true;
1514  }
1515 
1516  // Bitcast the value to integer, twiddle the sign bit with xor,
1517  // and then bitcast it back to floating-point.
1518  if (VT.getSizeInBits() > 64)
1519  return false;
1520  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1521  if (!TLI.isTypeLegal(IntVT))
1522  return false;
1523 
1524  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1525  ISD::BITCAST, OpReg, OpRegIsKill);
1526  if (!IntReg)
1527  return false;
1528 
1529  unsigned IntResultReg = fastEmit_ri_(
1530  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1531  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1532  if (!IntResultReg)
1533  return false;
1534 
1535  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1536  IntResultReg, /*IsKill=*/true);
1537  if (!ResultReg)
1538  return false;
1539 
1540  updateValueMap(I, ResultReg);
1541  return true;
1542 }
1543 
1545  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1546  if (!EVI)
1547  return false;
1548 
1549  // Make sure we only try to handle extracts with a legal result. But also
1550  // allow i1 because it's easy.
1551  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1552  if (!RealVT.isSimple())
1553  return false;
1554  MVT VT = RealVT.getSimpleVT();
1555  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1556  return false;
1557 
1558  const Value *Op0 = EVI->getOperand(0);
1559  Type *AggTy = Op0->getType();
1560 
1561  // Get the base result register.
1562  unsigned ResultReg;
1564  if (I != FuncInfo.ValueMap.end())
1565  ResultReg = I->second;
1566  else if (isa<Instruction>(Op0))
1567  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1568  else
1569  return false; // fast-isel can't handle aggregate constants at the moment
1570 
1571  // Get the actual result register, which is an offset from the base register.
1572  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1573 
1574  SmallVector<EVT, 4> AggValueVTs;
1575  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1576 
1577  for (unsigned i = 0; i < VTIndex; i++)
1578  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1579 
1580  updateValueMap(EVI, ResultReg);
1581  return true;
1582 }
1583 
1584 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1585  switch (Opcode) {
1586  case Instruction::Add:
1587  return selectBinaryOp(I, ISD::ADD);
1588  case Instruction::FAdd:
1589  return selectBinaryOp(I, ISD::FADD);
1590  case Instruction::Sub:
1591  return selectBinaryOp(I, ISD::SUB);
1592  case Instruction::FSub:
1593  // FNeg is currently represented in LLVM IR as a special case of FSub.
1594  if (BinaryOperator::isFNeg(I))
1595  return selectFNeg(I);
1596  return selectBinaryOp(I, ISD::FSUB);
1597  case Instruction::Mul:
1598  return selectBinaryOp(I, ISD::MUL);
1599  case Instruction::FMul:
1600  return selectBinaryOp(I, ISD::FMUL);
1601  case Instruction::SDiv:
1602  return selectBinaryOp(I, ISD::SDIV);
1603  case Instruction::UDiv:
1604  return selectBinaryOp(I, ISD::UDIV);
1605  case Instruction::FDiv:
1606  return selectBinaryOp(I, ISD::FDIV);
1607  case Instruction::SRem:
1608  return selectBinaryOp(I, ISD::SREM);
1609  case Instruction::URem:
1610  return selectBinaryOp(I, ISD::UREM);
1611  case Instruction::FRem:
1612  return selectBinaryOp(I, ISD::FREM);
1613  case Instruction::Shl:
1614  return selectBinaryOp(I, ISD::SHL);
1615  case Instruction::LShr:
1616  return selectBinaryOp(I, ISD::SRL);
1617  case Instruction::AShr:
1618  return selectBinaryOp(I, ISD::SRA);
1619  case Instruction::And:
1620  return selectBinaryOp(I, ISD::AND);
1621  case Instruction::Or:
1622  return selectBinaryOp(I, ISD::OR);
1623  case Instruction::Xor:
1624  return selectBinaryOp(I, ISD::XOR);
1625 
1626  case Instruction::GetElementPtr:
1627  return selectGetElementPtr(I);
1628 
1629  case Instruction::Br: {
1630  const BranchInst *BI = cast<BranchInst>(I);
1631 
1632  if (BI->isUnconditional()) {
1633  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1634  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1635  fastEmitBranch(MSucc, BI->getDebugLoc());
1636  return true;
1637  }
1638 
1639  // Conditional branches are not handed yet.
1640  // Halt "fast" selection and bail.
1641  return false;
1642  }
1643 
1644  case Instruction::Unreachable:
1646  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1647  else
1648  return true;
1649 
1650  case Instruction::Alloca:
1651  // FunctionLowering has the static-sized case covered.
1652  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1653  return true;
1654 
1655  // Dynamic-sized alloca is not handled yet.
1656  return false;
1657 
1658  case Instruction::Call:
1659  return selectCall(I);
1660 
1661  case Instruction::BitCast:
1662  return selectBitCast(I);
1663 
1664  case Instruction::FPToSI:
1665  return selectCast(I, ISD::FP_TO_SINT);
1666  case Instruction::ZExt:
1667  return selectCast(I, ISD::ZERO_EXTEND);
1668  case Instruction::SExt:
1669  return selectCast(I, ISD::SIGN_EXTEND);
1670  case Instruction::Trunc:
1671  return selectCast(I, ISD::TRUNCATE);
1672  case Instruction::SIToFP:
1673  return selectCast(I, ISD::SINT_TO_FP);
1674 
1675  case Instruction::IntToPtr: // Deliberate fall-through.
1676  case Instruction::PtrToInt: {
1677  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1678  EVT DstVT = TLI.getValueType(DL, I->getType());
1679  if (DstVT.bitsGT(SrcVT))
1680  return selectCast(I, ISD::ZERO_EXTEND);
1681  if (DstVT.bitsLT(SrcVT))
1682  return selectCast(I, ISD::TRUNCATE);
1683  unsigned Reg = getRegForValue(I->getOperand(0));
1684  if (!Reg)
1685  return false;
1686  updateValueMap(I, Reg);
1687  return true;
1688  }
1689 
1690  case Instruction::ExtractValue:
1691  return selectExtractValue(I);
1692 
1693  case Instruction::PHI:
1694  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1695 
1696  default:
1697  // Unhandled instruction. Halt "fast" selection and bail.
1698  return false;
1699  }
1700 }
1701 
1703  const TargetLibraryInfo *LibInfo,
1705  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1706  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1707  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1708  TII(*MF->getSubtarget().getInstrInfo()),
1709  TLI(*MF->getSubtarget().getTargetLowering()),
1710  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1711  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1712 
1713 FastISel::~FastISel() = default;
1714 
1715 bool FastISel::fastLowerArguments() { return false; }
1716 
1717 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1718 
1720  return false;
1721 }
1722 
1723 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1724 
1725 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1726  bool /*Op0IsKill*/) {
1727  return 0;
1728 }
1729 
1730 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1731  bool /*Op0IsKill*/, unsigned /*Op1*/,
1732  bool /*Op1IsKill*/) {
1733  return 0;
1734 }
1735 
1736 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1737  return 0;
1738 }
1739 
1740 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1741  const ConstantFP * /*FPImm*/) {
1742  return 0;
1743 }
1744 
1745 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1746  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1747  return 0;
1748 }
1749 
1750 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1751 /// instruction with an immediate operand using fastEmit_ri.
1752 /// If that fails, it materializes the immediate into a register and try
1753 /// fastEmit_rr instead.
1754 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1755  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1756  // If this is a multiply by a power of two, emit this as a shift left.
1757  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1758  Opcode = ISD::SHL;
1759  Imm = Log2_64(Imm);
1760  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1761  // div x, 8 -> srl x, 3
1762  Opcode = ISD::SRL;
1763  Imm = Log2_64(Imm);
1764  }
1765 
1766  // Horrible hack (to be removed), check to make sure shift amounts are
1767  // in-range.
1768  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1769  Imm >= VT.getSizeInBits())
1770  return 0;
1771 
1772  // First check if immediate type is legal. If not, we can't use the ri form.
1773  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1774  if (ResultReg)
1775  return ResultReg;
1776  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1777  bool IsImmKill = true;
1778  if (!MaterialReg) {
1779  // This is a bit ugly/slow, but failing here means falling out of
1780  // fast-isel, which would be very slow.
1781  IntegerType *ITy =
1783  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1784  if (!MaterialReg)
1785  return 0;
1786  // FIXME: If the materialized register here has no uses yet then this
1787  // will be the first use and we should be able to mark it as killed.
1788  // However, the local value area for materialising constant expressions
1789  // grows down, not up, which means that any constant expressions we generate
1790  // later which also use 'Imm' could be after this instruction and therefore
1791  // after this kill.
1792  IsImmKill = false;
1793  }
1794  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1795 }
1796 
1798  return MRI.createVirtualRegister(RC);
1799 }
1800 
1802  unsigned OpNum) {
1804  const TargetRegisterClass *RegClass =
1805  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1806  if (!MRI.constrainRegClass(Op, RegClass)) {
1807  // If it's not legal to COPY between the register classes, something
1808  // has gone very wrong before we got here.
1809  unsigned NewOp = createResultReg(RegClass);
1811  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1812  return NewOp;
1813  }
1814  }
1815  return Op;
1816 }
1817 
1818 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1819  const TargetRegisterClass *RC) {
1820  unsigned ResultReg = createResultReg(RC);
1821  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1822 
1823  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1824  return ResultReg;
1825 }
1826 
1827 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1828  const TargetRegisterClass *RC, unsigned Op0,
1829  bool Op0IsKill) {
1830  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1831 
1832  unsigned ResultReg = createResultReg(RC);
1833  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1834 
1835  if (II.getNumDefs() >= 1)
1836  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1837  .addReg(Op0, getKillRegState(Op0IsKill));
1838  else {
1840  .addReg(Op0, getKillRegState(Op0IsKill));
1842  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1843  }
1844 
1845  return ResultReg;
1846 }
1847 
1848 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1849  const TargetRegisterClass *RC, unsigned Op0,
1850  bool Op0IsKill, unsigned Op1,
1851  bool Op1IsKill) {
1852  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1853 
1854  unsigned ResultReg = createResultReg(RC);
1855  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1856  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1857 
1858  if (II.getNumDefs() >= 1)
1859  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1860  .addReg(Op0, getKillRegState(Op0IsKill))
1861  .addReg(Op1, getKillRegState(Op1IsKill));
1862  else {
1864  .addReg(Op0, getKillRegState(Op0IsKill))
1865  .addReg(Op1, getKillRegState(Op1IsKill));
1867  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1868  }
1869  return ResultReg;
1870 }
1871 
1872 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1873  const TargetRegisterClass *RC, unsigned Op0,
1874  bool Op0IsKill, unsigned Op1,
1875  bool Op1IsKill, unsigned Op2,
1876  bool Op2IsKill) {
1877  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1878 
1879  unsigned ResultReg = createResultReg(RC);
1880  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1881  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1882  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1883 
1884  if (II.getNumDefs() >= 1)
1885  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1886  .addReg(Op0, getKillRegState(Op0IsKill))
1887  .addReg(Op1, getKillRegState(Op1IsKill))
1888  .addReg(Op2, getKillRegState(Op2IsKill));
1889  else {
1891  .addReg(Op0, getKillRegState(Op0IsKill))
1892  .addReg(Op1, getKillRegState(Op1IsKill))
1893  .addReg(Op2, getKillRegState(Op2IsKill));
1895  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1896  }
1897  return ResultReg;
1898 }
1899 
1900 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1901  const TargetRegisterClass *RC, unsigned Op0,
1902  bool Op0IsKill, uint64_t Imm) {
1903  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1904 
1905  unsigned ResultReg = createResultReg(RC);
1906  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1907 
1908  if (II.getNumDefs() >= 1)
1909  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1910  .addReg(Op0, getKillRegState(Op0IsKill))
1911  .addImm(Imm);
1912  else {
1914  .addReg(Op0, getKillRegState(Op0IsKill))
1915  .addImm(Imm);
1917  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1918  }
1919  return ResultReg;
1920 }
1921 
1922 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1923  const TargetRegisterClass *RC, unsigned Op0,
1924  bool Op0IsKill, uint64_t Imm1,
1925  uint64_t Imm2) {
1926  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1927 
1928  unsigned ResultReg = createResultReg(RC);
1929  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1930 
1931  if (II.getNumDefs() >= 1)
1932  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1933  .addReg(Op0, getKillRegState(Op0IsKill))
1934  .addImm(Imm1)
1935  .addImm(Imm2);
1936  else {
1938  .addReg(Op0, getKillRegState(Op0IsKill))
1939  .addImm(Imm1)
1940  .addImm(Imm2);
1942  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1943  }
1944  return ResultReg;
1945 }
1946 
1947 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1948  const TargetRegisterClass *RC,
1949  const ConstantFP *FPImm) {
1950  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1951 
1952  unsigned ResultReg = createResultReg(RC);
1953 
1954  if (II.getNumDefs() >= 1)
1955  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1956  .addFPImm(FPImm);
1957  else {
1959  .addFPImm(FPImm);
1961  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1962  }
1963  return ResultReg;
1964 }
1965 
1966 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1967  const TargetRegisterClass *RC, unsigned Op0,
1968  bool Op0IsKill, unsigned Op1,
1969  bool Op1IsKill, uint64_t Imm) {
1970  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1971 
1972  unsigned ResultReg = createResultReg(RC);
1973  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1974  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1975 
1976  if (II.getNumDefs() >= 1)
1977  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1978  .addReg(Op0, getKillRegState(Op0IsKill))
1979  .addReg(Op1, getKillRegState(Op1IsKill))
1980  .addImm(Imm);
1981  else {
1983  .addReg(Op0, getKillRegState(Op0IsKill))
1984  .addReg(Op1, getKillRegState(Op1IsKill))
1985  .addImm(Imm);
1987  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1988  }
1989  return ResultReg;
1990 }
1991 
1992 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1993  const TargetRegisterClass *RC, uint64_t Imm) {
1994  unsigned ResultReg = createResultReg(RC);
1995  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1996 
1997  if (II.getNumDefs() >= 1)
1998  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1999  .addImm(Imm);
2000  else {
2003  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2004  }
2005  return ResultReg;
2006 }
2007 
2008 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2009  bool Op0IsKill, uint32_t Idx) {
2010  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2012  "Cannot yet extract from physregs");
2013  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2015  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2016  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2017  return ResultReg;
2018 }
2019 
2020 /// Emit MachineInstrs to compute the value of Op with all but the least
2021 /// significant bit set to zero.
2022 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2023  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2024 }
2025 
2026 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2027 /// Emit code to ensure constants are copied into registers when needed.
2028 /// Remember the virtual registers that need to be added to the Machine PHI
2029 /// nodes as input. We cannot just directly add them, because expansion
2030 /// might result in multiple MBB's for one BB. As such, the start of the
2031 /// BB might correspond to a different MBB than the end.
2032 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2033  const TerminatorInst *TI = LLVMBB->getTerminator();
2034 
2037 
2038  // Check successor nodes' PHI nodes that expect a constant to be available
2039  // from this block.
2040  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2041  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2042  if (!isa<PHINode>(SuccBB->begin()))
2043  continue;
2044  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2045 
2046  // If this terminator has multiple identical successors (common for
2047  // switches), only handle each succ once.
2048  if (!SuccsHandled.insert(SuccMBB).second)
2049  continue;
2050 
2051  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2052 
2053  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2054  // nodes and Machine PHI nodes, but the incoming operands have not been
2055  // emitted yet.
2056  for (BasicBlock::const_iterator I = SuccBB->begin();
2057  const auto *PN = dyn_cast<PHINode>(I); ++I) {
2058 
2059  // Ignore dead phi's.
2060  if (PN->use_empty())
2061  continue;
2062 
2063  // Only handle legal types. Two interesting things to note here. First,
2064  // by bailing out early, we may leave behind some dead instructions,
2065  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2066  // own moves. Second, this check is necessary because FastISel doesn't
2067  // use CreateRegs to create registers, so it always creates
2068  // exactly one register for each non-void instruction.
2069  EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2070  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2071  // Handle integer promotions, though, because they're common and easy.
2072  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2074  return false;
2075  }
2076  }
2077 
2078  const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2079 
2080  // Set the DebugLoc for the copy. Prefer the location of the operand
2081  // if there is one; use the location of the PHI otherwise.
2082  DbgLoc = PN->getDebugLoc();
2083  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2084  DbgLoc = Inst->getDebugLoc();
2085 
2086  unsigned Reg = getRegForValue(PHIOp);
2087  if (!Reg) {
2089  return false;
2090  }
2091  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2092  DbgLoc = DebugLoc();
2093  }
2094  }
2095 
2096  return true;
2097 }
2098 
2099 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2100  assert(LI->hasOneUse() &&
2101  "tryToFoldLoad expected a LoadInst with a single use");
2102  // We know that the load has a single use, but don't know what it is. If it
2103  // isn't one of the folded instructions, then we can't succeed here. Handle
2104  // this by scanning the single-use users of the load until we get to FoldInst.
2105  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2106 
2107  const Instruction *TheUser = LI->user_back();
2108  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2109  // Stay in the right block.
2110  TheUser->getParent() == FoldInst->getParent() &&
2111  --MaxUsers) { // Don't scan too far.
2112  // If there are multiple or no uses of this instruction, then bail out.
2113  if (!TheUser->hasOneUse())
2114  return false;
2115 
2116  TheUser = TheUser->user_back();
2117  }
2118 
2119  // If we didn't find the fold instruction, then we failed to collapse the
2120  // sequence.
2121  if (TheUser != FoldInst)
2122  return false;
2123 
2124  // Don't try to fold volatile loads. Target has to deal with alignment
2125  // constraints.
2126  if (LI->isVolatile())
2127  return false;
2128 
2129  // Figure out which vreg this is going into. If there is no assigned vreg yet
2130  // then there actually was no reference to it. Perhaps the load is referenced
2131  // by a dead instruction.
2132  unsigned LoadReg = getRegForValue(LI);
2133  if (!LoadReg)
2134  return false;
2135 
2136  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2137  // may mean that the instruction got lowered to multiple MIs, or the use of
2138  // the loaded value ended up being multiple operands of the result.
2139  if (!MRI.hasOneUse(LoadReg))
2140  return false;
2141 
2143  MachineInstr *User = RI->getParent();
2144 
2145  // Set the insertion point properly. Folding the load can cause generation of
2146  // other random instructions (like sign extends) for addressing modes; make
2147  // sure they get inserted in a logical place before the new instruction.
2148  FuncInfo.InsertPt = User;
2149  FuncInfo.MBB = User->getParent();
2150 
2151  // Ask the target to try folding the load.
2152  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2153 }
2154 
2156  // Must be an add.
2157  if (!isa<AddOperator>(Add))
2158  return false;
2159  // Type size needs to match.
2160  if (DL.getTypeSizeInBits(GEP->getType()) !=
2161  DL.getTypeSizeInBits(Add->getType()))
2162  return false;
2163  // Must be in the same basic block.
2164  if (isa<Instruction>(Add) &&
2165  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2166  return false;
2167  // Must have a constant operand.
2168  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2169 }
2170 
2173  const Value *Ptr;
2174  Type *ValTy;
2175  unsigned Alignment;
2177  bool IsVolatile;
2178 
2179  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2180  Alignment = LI->getAlignment();
2181  IsVolatile = LI->isVolatile();
2182  Flags = MachineMemOperand::MOLoad;
2183  Ptr = LI->getPointerOperand();
2184  ValTy = LI->getType();
2185  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2186  Alignment = SI->getAlignment();
2187  IsVolatile = SI->isVolatile();
2189  Ptr = SI->getPointerOperand();
2190  ValTy = SI->getValueOperand()->getType();
2191  } else
2192  return nullptr;
2193 
2194  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2195  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2196  bool IsDereferenceable =
2198  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2199 
2200  AAMDNodes AAInfo;
2201  I->getAAMetadata(AAInfo);
2202 
2203  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2204  Alignment = DL.getABITypeAlignment(ValTy);
2205 
2206  unsigned Size = DL.getTypeStoreSize(ValTy);
2207 
2208  if (IsVolatile)
2210  if (IsNonTemporal)
2212  if (IsDereferenceable)
2214  if (IsInvariant)
2216 
2217  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2218  Alignment, AAInfo, Ranges);
2219 }
2220 
2222  // If both operands are the same, then try to optimize or fold the cmp.
2224  if (CI->getOperand(0) != CI->getOperand(1))
2225  return Predicate;
2226 
2227  switch (Predicate) {
2228  default: llvm_unreachable("Invalid predicate!");
2229  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2230  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2231  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2232  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2233  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2234  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2235  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2236  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2237  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2238  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2239  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2240  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2241  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2242  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2243  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2244  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2245 
2246  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2247  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2248  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2249  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2250  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2251  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2252  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2253  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2254  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2255  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2256  }
2257 
2258  return Predicate;
2259 }
void setHasStackMap(bool s=true)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:544
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1872
void setByValAlign(unsigned A)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:850
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:2022
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool hasLocalLinkage() const
Definition: GlobalValue.h:416
This instruction extracts a struct member or array element value from an aggregate value...
MachineConstantPool & MCP
Definition: FastISel.h:208
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1042
unsigned arg_size() const
Definition: CallSite.h:216
static const Value * getFNegArgument(const Value *BinOp)
bool hasDebugInfo() const
Returns true if valid debug info is present.
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Definition: CallSite.h:309
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:475
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
ImmutableCallSite * CS
Definition: FastISel.h:90
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:498
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:398
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:613
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:298
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
gep_type_iterator gep_type_end(const User *GEP)
unsigned less or equal
Definition: InstrTypes.h:886
unsigned less than
Definition: InstrTypes.h:885
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:866
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2172
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:697
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:876
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1723
BasicBlock * getSuccessor(unsigned i) const
arg_iterator arg_end()
Definition: Function.h:604
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
MachineModuleInfo & getMMI() const
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:99
unsigned getCallFrameDestroyOpcode() const
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function&#39;s return value can be lowered to registers.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1736
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:482
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo&#39;s usesVAFloatArgument flag if so.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1717
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
DIExpression * getExpression() const
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1383
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1922
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1069
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:871
MachineFunction * MF
Definition: FastISel.h:205
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:203
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1900
ArrayRef< unsigned > getIndices() const
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:238
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:870
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1966
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2221
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
A description of a memory reference used in the backend.
void setHasPatchPoint(bool s=true)
unsigned getNumArgOperands() const
Return the number of call arguments.
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:70
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Definition: ISDOpcodes.h:378
Class to represent struct types.
Definition: DerivedTypes.h:201
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1992
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2155
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
IterTy arg_end() const
Definition: CallSite.h:549
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1827
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:227
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:867
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:89
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
void setByValSize(unsigned S)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetMachine & TM
Definition: FastISel.h:210
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:633
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1127
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1282
unsigned getSizeInBits() const
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Context object for machine code objects.
Definition: MCContext.h:59
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
#define F(x, y, z)
Definition: MD5.cpp:55
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:98
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1801
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:454
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1584
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:280
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:93
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:196
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1818
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:190
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:121
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:163
void setOrigAlign(unsigned A)
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:234
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:85
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Value * getOperand(unsigned i) const
Definition: User.h:154
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getKillRegState(bool B)
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:497
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:309
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:131
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:229
MCContext & getContext() const
void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:702
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:142
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
virtual ~FastISel()
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:105
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
Simple binary floating point operators.
Definition: ISDOpcodes.h:259
Conditional or Unconditional Branch instruction.
Value * getAddress() const
Definition: IntrinsicInst.h:91
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:376
Value * getValue() const
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:96
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:372
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
size_t size() const
Definition: BasicBlock.h:262
MachineFrameInfo & MFI
Definition: FastISel.h:207
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1725
bool SkipTargetIndependentISel
Definition: FastISel.h:216
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2099
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:136
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:860
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:727
const Triple & getTargetTriple() const
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:869
DIExpression * getExpression() const
Definition: IntrinsicInst.h:97
arg_iterator arg_begin()
Definition: Function.h:595
uint64_t getOffset() const
The memory access is non-temporal.
Class to represent integer types.
Definition: DerivedTypes.h:40
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:864
const TargetRegisterInfo & TRI
Definition: FastISel.h:214
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:877
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
Extended Value Type.
Definition: ValueTypes.h:34
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:527
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1501
This class contains a discriminated union of information about pointers in memory operands...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:875
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:95
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:885
const TargetInstrInfo & TII
Definition: FastISel.h:212
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:472
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
signed greater than
Definition: InstrTypes.h:887
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:222
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:638
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge&#39;s probability, relative to other out-edges of the Src.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:362
The memory access writes data.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:864
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1730
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:423
#define E
Definition: LargeTest.cpp:27
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1745
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
IterTy arg_begin() const
Definition: CallSite.h:545
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:874
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
Provides information about what library functions are available for the current target.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1482
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:215
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:556
signed less than
Definition: InstrTypes.h:889
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
const size_t N
reg_iterator reg_begin(unsigned RegNo) const
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1848
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:320
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:167
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:124
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
signed less or equal
Definition: InstrTypes.h:890
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1315
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1740
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:97
const DataLayout & DL
Definition: FastISel.h:211
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:407
BranchProbabilityInfo * BPI
This file defines the FastISel class.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:444
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
bool isTailCall() const
DebugLoc DbgLoc
Definition: FastISel.h:209
bool selectCall(const User *Call)
Definition: FastISel.cpp:1081
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
Flags
Flags values. These may be or&#39;d together.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
The memory access reads data.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:532
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:389
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:405
static std::vector< std::string > Flags
Definition: FlagsTest.cpp:8
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Representation of each machine instruction.
Definition: MachineInstr.h:59
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:934
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1719
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:726
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1544
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:280
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:361
MachineRegisterInfo & MRI
Definition: FastISel.h:206
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:515
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:121
unsigned greater or equal
Definition: InstrTypes.h:884
This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:907
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:218
TargetOptions Options
Definition: TargetMachine.h:95
Establish a view to a call site for examination.
Definition: CallSite.h:687
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The memory access always returns the same value (or traps).
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:478
iterator end()
Definition: DenseMap.h:73
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:868
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isUnconditional() const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:872
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:311
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
Type * getType() const
Return the type of the instruction that generated this call site.
Definition: CallSite.h:261
const TargetLowering & TLI
Definition: FastISel.h:213
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1797
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:183
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1754
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2008
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:312
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
user_iterator user_begin()
Definition: Value.h:371
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1702
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1715
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:863
LLVM Value Representation.
Definition: Value.h:73
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:873
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:388
FunctionType * getFunctionType() const
Definition: CallSite.h:317
static const Function * getParent(const Value *V)
#define DEBUG(X)
Definition: Debug.h:118
DILocalVariable * getVariable() const
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable&#39;s name.
Definition: Mangler.cpp:109
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:408
unsigned greater than
Definition: InstrTypes.h:883
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned TrapUnreachable
Emit target-specific trap instruction for &#39;unreachable&#39; IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
unsigned fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:1947
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
Definition: Type.cpp:98
Conversion operators.
Definition: ISDOpcodes.h:441
int * Ptr
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:204
const Value * stripInBoundsConstantOffsets() const
Strip off pointer casts and all-constant inbounds GEPs.
Definition: Value.cpp:535
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:450
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:865
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:37
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1462
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
This represents the llvm.dbg.declare instruction.
Definition: IntrinsicInst.h:89
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:560
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
static MachineOperand CreateFI(int Idx)
bool use_empty() const
Definition: Value.h:322
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:537
Type * getElementType() const
Definition: DerivedTypes.h:486
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:868
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:862
signed greater or equal
Definition: InstrTypes.h:888
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
unsigned InitializeRegForValue(const Value *V)
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:339