LLVM  6.0.0svn
FastISel.cpp
Go to the documentation of this file.
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
53 #include "llvm/CodeGen/Analysis.h"
65 #include "llvm/CodeGen/StackMaps.h"
67 #include "llvm/IR/Argument.h"
68 #include "llvm/IR/Attributes.h"
69 #include "llvm/IR/BasicBlock.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/Type.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/MC/MCContext.h"
94 #include "llvm/MC/MCInstrDesc.h"
95 #include "llvm/MC/MCRegisterInfo.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/Debug.h"
106 #include <algorithm>
107 #include <cassert>
108 #include <cstdint>
109 #include <iterator>
110 #include <utility>
111 
112 using namespace llvm;
113 
114 #define DEBUG_TYPE "isel"
115 
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117  "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119  "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121 
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
126 
127  // Instructions are appended to FuncInfo.MBB. If the basic block already
128  // contains labels or copies, use the last instruction as the last local
129  // value.
130  EmitStartPt = nullptr;
131  if (!FuncInfo.MBB->empty())
134 }
135 
138  // Fallback to SDISel argument lowering code to deal with sret pointer
139  // parameter.
140  return false;
141 
142  if (!fastLowerArguments())
143  return false;
144 
145  // Enter arguments into ValueMap for uses in non-entry BBs.
147  E = FuncInfo.Fn->arg_end();
148  I != E; ++I) {
150  assert(VI != LocalValueMap.end() && "Missed an argument?");
151  FuncInfo.ValueMap[&*I] = VI->second;
152  }
153  return true;
154 }
155 
156 void FastISel::flushLocalValueMap() {
160  SavedInsertPt = FuncInfo.InsertPt;
161 }
162 
164  // Don't consider constants or arguments to have trivial kills.
165  const Instruction *I = dyn_cast<Instruction>(V);
166  if (!I)
167  return false;
168 
169  // No-op casts are trivially coalesced by fast-isel.
170  if (const auto *Cast = dyn_cast<CastInst>(I))
171  if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
172  !hasTrivialKill(Cast->getOperand(0)))
173  return false;
174 
175  // Even the value might have only one use in the LLVM IR, it is possible that
176  // FastISel might fold the use into another instruction and now there is more
177  // than one use at the Machine Instruction level.
178  unsigned Reg = lookUpRegForValue(V);
179  if (Reg && !MRI.use_empty(Reg))
180  return false;
181 
182  // GEPs with all zero indices are trivially coalesced by fast-isel.
183  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
184  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
185  return false;
186 
187  // Only instructions with a single use in the same basic block are considered
188  // to have trivial kills.
189  return I->hasOneUse() &&
190  !(I->getOpcode() == Instruction::BitCast ||
191  I->getOpcode() == Instruction::PtrToInt ||
192  I->getOpcode() == Instruction::IntToPtr) &&
193  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
194 }
195 
196 unsigned FastISel::getRegForValue(const Value *V) {
197  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
198  // Don't handle non-simple values in FastISel.
199  if (!RealVT.isSimple())
200  return 0;
201 
202  // Ignore illegal types. We must do this before looking up the value
203  // in ValueMap because Arguments are given virtual registers regardless
204  // of whether FastISel can handle them.
205  MVT VT = RealVT.getSimpleVT();
206  if (!TLI.isTypeLegal(VT)) {
207  // Handle integer promotions, though, because they're common and easy.
208  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
209  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
210  else
211  return 0;
212  }
213 
214  // Look up the value to see if we already have a register for it.
215  unsigned Reg = lookUpRegForValue(V);
216  if (Reg)
217  return Reg;
218 
219  // In bottom-up mode, just create the virtual register which will be used
220  // to hold the value. It will be materialized later.
221  if (isa<Instruction>(V) &&
222  (!isa<AllocaInst>(V) ||
223  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
225 
226  SavePoint SaveInsertPt = enterLocalValueArea();
227 
228  // Materialize the value in a register. Emit any instructions in the
229  // local value area.
230  Reg = materializeRegForValue(V, VT);
231 
232  leaveLocalValueArea(SaveInsertPt);
233 
234  return Reg;
235 }
236 
237 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
238  unsigned Reg = 0;
239  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
240  if (CI->getValue().getActiveBits() <= 64)
241  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
242  } else if (isa<AllocaInst>(V))
243  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
244  else if (isa<ConstantPointerNull>(V))
245  // Translate this as an integer zero so that it can be
246  // local-CSE'd with actual integer zeros.
247  Reg = getRegForValue(
249  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
250  if (CF->isNullValue())
251  Reg = fastMaterializeFloatZero(CF);
252  else
253  // Try to emit the constant directly.
254  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
255 
256  if (!Reg) {
257  // Try to emit the constant by using an integer constant with a cast.
258  const APFloat &Flt = CF->getValueAPF();
259  EVT IntVT = TLI.getPointerTy(DL);
260  uint32_t IntBitWidth = IntVT.getSizeInBits();
261  APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
262  bool isExact;
263  (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
264  if (isExact) {
265  unsigned IntegerReg =
267  if (IntegerReg != 0)
268  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
269  /*Kill=*/false);
270  }
271  }
272  } else if (const auto *Op = dyn_cast<Operator>(V)) {
273  if (!selectOperator(Op, Op->getOpcode()))
274  if (!isa<Instruction>(Op) ||
275  !fastSelectInstruction(cast<Instruction>(Op)))
276  return 0;
277  Reg = lookUpRegForValue(Op);
278  } else if (isa<UndefValue>(V)) {
281  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
282  }
283  return Reg;
284 }
285 
286 /// Helper for getRegForValue. This function is called when the value isn't
287 /// already available in a register and must be materialized with new
288 /// instructions.
289 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
290  unsigned Reg = 0;
291  // Give the target-specific code a try first.
292  if (isa<Constant>(V))
293  Reg = fastMaterializeConstant(cast<Constant>(V));
294 
295  // If target-specific code couldn't or didn't want to handle the value, then
296  // give target-independent code a try.
297  if (!Reg)
298  Reg = materializeConstant(V, VT);
299 
300  // Don't cache constant materializations in the general ValueMap.
301  // To do so would require tracking what uses they dominate.
302  if (Reg) {
303  LocalValueMap[V] = Reg;
305  }
306  return Reg;
307 }
308 
309 unsigned FastISel::lookUpRegForValue(const Value *V) {
310  // Look up the value to see if we already have a register for it. We
311  // cache values defined by Instructions across blocks, and other values
312  // only locally. This is because Instructions already have the SSA
313  // def-dominates-use requirement enforced.
315  if (I != FuncInfo.ValueMap.end())
316  return I->second;
317  return LocalValueMap[V];
318 }
319 
320 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
321  if (!isa<Instruction>(I)) {
322  LocalValueMap[I] = Reg;
323  return;
324  }
325 
326  unsigned &AssignedReg = FuncInfo.ValueMap[I];
327  if (AssignedReg == 0)
328  // Use the new register.
329  AssignedReg = Reg;
330  else if (Reg != AssignedReg) {
331  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
332  for (unsigned i = 0; i < NumRegs; i++)
333  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
334 
335  AssignedReg = Reg;
336  }
337 }
338 
339 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
340  unsigned IdxN = getRegForValue(Idx);
341  if (IdxN == 0)
342  // Unhandled operand. Halt "fast" selection and bail.
343  return std::pair<unsigned, bool>(0, false);
344 
345  bool IdxNIsKill = hasTrivialKill(Idx);
346 
347  // If the index is smaller or larger than intptr_t, truncate or extend it.
348  MVT PtrVT = TLI.getPointerTy(DL);
349  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
350  if (IdxVT.bitsLT(PtrVT)) {
351  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
352  IdxNIsKill);
353  IdxNIsKill = true;
354  } else if (IdxVT.bitsGT(PtrVT)) {
355  IdxN =
356  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
357  IdxNIsKill = true;
358  }
359  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
360 }
361 
363  if (getLastLocalValue()) {
365  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
366  ++FuncInfo.InsertPt;
367  } else
369 
370  // Now skip past any EH_LABELs, which must remain at the beginning.
371  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
372  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
373  ++FuncInfo.InsertPt;
374 }
375 
378  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
379  "Invalid iterator!");
380  while (I != E) {
381  MachineInstr *Dead = &*I;
382  ++I;
383  Dead->eraseFromParent();
384  ++NumFastIselDead;
385  }
387 }
388 
391  DebugLoc OldDL = DbgLoc;
393  DbgLoc = DebugLoc();
394  SavePoint SP = {OldInsertPt, OldDL};
395  return SP;
396 }
397 
399  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
400  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
401 
402  // Restore the previous insert position.
403  FuncInfo.InsertPt = OldInsertPt.InsertPt;
404  DbgLoc = OldInsertPt.DL;
405 }
406 
407 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
408  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
409  if (VT == MVT::Other || !VT.isSimple())
410  // Unhandled type. Halt "fast" selection and bail.
411  return false;
412 
413  // We only handle legal types. For example, on x86-32 the instruction
414  // selector contains all of the 64-bit instructions from x86-64,
415  // under the assumption that i64 won't be used if the target doesn't
416  // support it.
417  if (!TLI.isTypeLegal(VT)) {
418  // MVT::i1 is special. Allow AND, OR, or XOR because they
419  // don't require additional zeroing, which makes them easy.
420  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
421  ISDOpcode == ISD::XOR))
422  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
423  else
424  return false;
425  }
426 
427  // Check if the first operand is a constant, and handle it as "ri". At -O0,
428  // we don't have anything that canonicalizes operand order.
429  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
430  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
431  unsigned Op1 = getRegForValue(I->getOperand(1));
432  if (!Op1)
433  return false;
434  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
435 
436  unsigned ResultReg =
437  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
438  CI->getZExtValue(), VT.getSimpleVT());
439  if (!ResultReg)
440  return false;
441 
442  // We successfully emitted code for the given LLVM Instruction.
443  updateValueMap(I, ResultReg);
444  return true;
445  }
446 
447  unsigned Op0 = getRegForValue(I->getOperand(0));
448  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
449  return false;
450  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
451 
452  // Check if the second operand is a constant and handle it appropriately.
453  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
454  uint64_t Imm = CI->getSExtValue();
455 
456  // Transform "sdiv exact X, 8" -> "sra X, 3".
457  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
458  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
459  Imm = Log2_64(Imm);
460  ISDOpcode = ISD::SRA;
461  }
462 
463  // Transform "urem x, pow2" -> "and x, pow2-1".
464  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
465  isPowerOf2_64(Imm)) {
466  --Imm;
467  ISDOpcode = ISD::AND;
468  }
469 
470  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
471  Op0IsKill, Imm, VT.getSimpleVT());
472  if (!ResultReg)
473  return false;
474 
475  // We successfully emitted code for the given LLVM Instruction.
476  updateValueMap(I, ResultReg);
477  return true;
478  }
479 
480  unsigned Op1 = getRegForValue(I->getOperand(1));
481  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
482  return false;
483  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
484 
485  // Now we have both operands in registers. Emit the instruction.
486  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
487  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
488  if (!ResultReg)
489  // Target-specific code wasn't able to find a machine opcode for
490  // the given ISD opcode and type. Halt "fast" selection and bail.
491  return false;
492 
493  // We successfully emitted code for the given LLVM Instruction.
494  updateValueMap(I, ResultReg);
495  return true;
496 }
497 
499  unsigned N = getRegForValue(I->getOperand(0));
500  if (!N) // Unhandled operand. Halt "fast" selection and bail.
501  return false;
502  bool NIsKill = hasTrivialKill(I->getOperand(0));
503 
504  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
505  // into a single N = N + TotalOffset.
506  uint64_t TotalOffs = 0;
507  // FIXME: What's a good SWAG number for MaxOffs?
508  uint64_t MaxOffs = 2048;
509  MVT VT = TLI.getPointerTy(DL);
510  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
511  GTI != E; ++GTI) {
512  const Value *Idx = GTI.getOperand();
513  if (StructType *StTy = GTI.getStructTypeOrNull()) {
514  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
515  if (Field) {
516  // N = N + Offset
517  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
518  if (TotalOffs >= MaxOffs) {
519  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
520  if (!N) // Unhandled operand. Halt "fast" selection and bail.
521  return false;
522  NIsKill = true;
523  TotalOffs = 0;
524  }
525  }
526  } else {
527  Type *Ty = GTI.getIndexedType();
528 
529  // If this is a constant subscript, handle it quickly.
530  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
531  if (CI->isZero())
532  continue;
533  // N = N + Offset
534  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
535  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
536  if (TotalOffs >= MaxOffs) {
537  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
538  if (!N) // Unhandled operand. Halt "fast" selection and bail.
539  return false;
540  NIsKill = true;
541  TotalOffs = 0;
542  }
543  continue;
544  }
545  if (TotalOffs) {
546  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
547  if (!N) // Unhandled operand. Halt "fast" selection and bail.
548  return false;
549  NIsKill = true;
550  TotalOffs = 0;
551  }
552 
553  // N = N + Idx * ElementSize;
554  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
555  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
556  unsigned IdxN = Pair.first;
557  bool IdxNIsKill = Pair.second;
558  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
559  return false;
560 
561  if (ElementSize != 1) {
562  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
563  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
564  return false;
565  IdxNIsKill = true;
566  }
567  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
568  if (!N) // Unhandled operand. Halt "fast" selection and bail.
569  return false;
570  }
571  }
572  if (TotalOffs) {
573  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
574  if (!N) // Unhandled operand. Halt "fast" selection and bail.
575  return false;
576  }
577 
578  // We successfully emitted code for the given LLVM Instruction.
579  updateValueMap(I, N);
580  return true;
581 }
582 
583 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
584  const CallInst *CI, unsigned StartIdx) {
585  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
586  Value *Val = CI->getArgOperand(i);
587  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
588  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
589  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
590  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
591  } else if (isa<ConstantPointerNull>(Val)) {
592  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
594  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
595  // Values coming from a stack location also require a special encoding,
596  // but that is added later on by the target specific frame index
597  // elimination implementation.
598  auto SI = FuncInfo.StaticAllocaMap.find(AI);
599  if (SI != FuncInfo.StaticAllocaMap.end())
600  Ops.push_back(MachineOperand::CreateFI(SI->second));
601  else
602  return false;
603  } else {
604  unsigned Reg = getRegForValue(Val);
605  if (!Reg)
606  return false;
607  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
608  }
609  }
610  return true;
611 }
612 
614  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
615  // [live variables...])
617  "Stackmap cannot return a value.");
618 
619  // The stackmap intrinsic only records the live variables (the arguments
620  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
621  // intrinsic, this won't be lowered to a function call. This means we don't
622  // have to worry about calling conventions and target-specific lowering code.
623  // Instead we perform the call lowering right here.
624  //
625  // CALLSEQ_START(0, 0...)
626  // STACKMAP(id, nbytes, ...)
627  // CALLSEQ_END(0, 0)
628  //
630 
631  // Add the <id> and <numBytes> constants.
632  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
633  "Expected a constant integer.");
634  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
635  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
636 
637  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
638  "Expected a constant integer.");
639  const auto *NumBytes =
640  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
641  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
642 
643  // Push live variables for the stack map (skipping the first two arguments
644  // <id> and <numBytes>).
645  if (!addStackMapLiveVars(Ops, I, 2))
646  return false;
647 
648  // We are not adding any register mask info here, because the stackmap doesn't
649  // clobber anything.
650 
651  // Add scratch registers as implicit def and early clobber.
653  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
654  for (unsigned i = 0; ScratchRegs[i]; ++i)
655  Ops.push_back(MachineOperand::CreateReg(
656  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
657  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
658 
659  // Issue CALLSEQ_START
660  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
661  auto Builder =
662  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
663  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
664  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
665  Builder.addImm(0);
666 
667  // Issue STACKMAP.
669  TII.get(TargetOpcode::STACKMAP));
670  for (auto const &MO : Ops)
671  MIB.add(MO);
672 
673  // Issue CALLSEQ_END
674  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
675  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
676  .addImm(0)
677  .addImm(0);
678 
679  // Inform the Frame Information that we have a stackmap in this function.
681 
682  return true;
683 }
684 
685 /// \brief Lower an argument list according to the target calling convention.
686 ///
687 /// This is a helper for lowering intrinsics that follow a target calling
688 /// convention or require stack pointer adjustment. Only a subset of the
689 /// intrinsic's operands need to participate in the calling convention.
690 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
691  unsigned NumArgs, const Value *Callee,
692  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
693  ArgListTy Args;
694  Args.reserve(NumArgs);
695 
696  // Populate the argument list.
697  ImmutableCallSite CS(CI);
698  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
699  Value *V = CI->getOperand(ArgI);
700 
701  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
702 
703  ArgListEntry Entry;
704  Entry.Val = V;
705  Entry.Ty = V->getType();
706  Entry.setAttributes(&CS, ArgIdx);
707  Args.push_back(Entry);
708  }
709 
710  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
711  : CI->getType();
712  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
713 
714  return lowerCallTo(CLI);
715 }
716 
718  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
719  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
720  SmallString<32> MangledName;
721  Mangler::getNameWithPrefix(MangledName, Target, DL);
722  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
723  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
724 }
725 
727  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
728  // i32 <numBytes>,
729  // i8* <target>,
730  // i32 <numArgs>,
731  // [Args...],
732  // [live variables...])
734  bool IsAnyRegCC = CC == CallingConv::AnyReg;
735  bool HasDef = !I->getType()->isVoidTy();
737 
738  // Get the real number of arguments participating in the call <numArgs>
739  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
740  "Expected a constant integer.");
741  const auto *NumArgsVal =
742  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
743  unsigned NumArgs = NumArgsVal->getZExtValue();
744 
745  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
746  // This includes all meta-operands up to but not including CC.
747  unsigned NumMetaOpers = PatchPointOpers::CCPos;
748  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
749  "Not enough arguments provided to the patchpoint intrinsic");
750 
751  // For AnyRegCC the arguments are lowered later on manually.
752  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
753  CallLoweringInfo CLI;
754  CLI.setIsPatchPoint();
755  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
756  return false;
757 
758  assert(CLI.Call && "No call instruction specified.");
759 
761 
762  // Add an explicit result reg if we use the anyreg calling convention.
763  if (IsAnyRegCC && HasDef) {
764  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
766  CLI.NumResultRegs = 1;
767  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
768  }
769 
770  // Add the <id> and <numBytes> constants.
771  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
772  "Expected a constant integer.");
773  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
774  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
775 
776  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
777  "Expected a constant integer.");
778  const auto *NumBytes =
779  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
780  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
781 
782  // Add the call target.
783  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
784  uint64_t CalleeConstAddr =
785  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
786  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
787  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
788  if (C->getOpcode() == Instruction::IntToPtr) {
789  uint64_t CalleeConstAddr =
790  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
791  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
792  } else
793  llvm_unreachable("Unsupported ConstantExpr.");
794  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
796  } else if (isa<ConstantPointerNull>(Callee))
798  else
799  llvm_unreachable("Unsupported callee address.");
800 
801  // Adjust <numArgs> to account for any arguments that have been passed on
802  // the stack instead.
803  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
804  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
805 
806  // Add the calling convention
807  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
808 
809  // Add the arguments we omitted previously. The register allocator should
810  // place these in any free register.
811  if (IsAnyRegCC) {
812  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
813  unsigned Reg = getRegForValue(I->getArgOperand(i));
814  if (!Reg)
815  return false;
816  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
817  }
818  }
819 
820  // Push the arguments from the call instruction.
821  for (auto Reg : CLI.OutRegs)
822  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
823 
824  // Push live variables for the stack map.
825  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
826  return false;
827 
828  // Push the register mask info.
831 
832  // Add scratch registers as implicit def and early clobber.
833  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
834  for (unsigned i = 0; ScratchRegs[i]; ++i)
836  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
837  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
838 
839  // Add implicit defs (return values).
840  for (auto Reg : CLI.InRegs)
841  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
842  /*IsImpl=*/true));
843 
844  // Insert the patchpoint instruction before the call generated by the target.
846  TII.get(TargetOpcode::PATCHPOINT));
847 
848  for (auto &MO : Ops)
849  MIB.add(MO);
850 
851  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
852 
853  // Delete the original call instruction.
854  CLI.Call->eraseFromParent();
855 
856  // Inform the Frame Information that we have a patchpoint in this function.
858 
859  if (CLI.NumResultRegs)
861  return true;
862 }
863 
865  const auto &Triple = TM.getTargetTriple();
867  return true; // don't do anything to this instruction.
870  /*IsDef=*/false));
872  /*IsDef=*/false));
873  MachineInstrBuilder MIB =
875  TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
876  for (auto &MO : Ops)
877  MIB.add(MO);
878  // Insert the Patchable Event Call instruction, that gets lowered properly.
879  return true;
880 }
881 
882 
883 /// Returns an AttributeList representing the attributes applied to the return
884 /// value of the given call.
887  if (CLI.RetSExt)
888  Attrs.push_back(Attribute::SExt);
889  if (CLI.RetZExt)
890  Attrs.push_back(Attribute::ZExt);
891  if (CLI.IsInReg)
892  Attrs.push_back(Attribute::InReg);
893 
895  Attrs);
896 }
897 
898 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
899  unsigned NumArgs) {
900  MCContext &Ctx = MF->getContext();
901  SmallString<32> MangledName;
902  Mangler::getNameWithPrefix(MangledName, SymName, DL);
903  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
904  return lowerCallTo(CI, Sym, NumArgs);
905 }
906 
908  unsigned NumArgs) {
909  ImmutableCallSite CS(CI);
910 
911  FunctionType *FTy = CS.getFunctionType();
912  Type *RetTy = CS.getType();
913 
914  ArgListTy Args;
915  Args.reserve(NumArgs);
916 
917  // Populate the argument list.
918  // Attributes for args start at offset 1, after the return attribute.
919  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
920  Value *V = CI->getOperand(ArgI);
921 
922  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
923 
924  ArgListEntry Entry;
925  Entry.Val = V;
926  Entry.Ty = V->getType();
927  Entry.setAttributes(&CS, ArgI);
928  Args.push_back(Entry);
929  }
931 
932  CallLoweringInfo CLI;
933  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
934 
935  return lowerCallTo(CLI);
936 }
937 
939  // Handle the incoming return values from the call.
940  CLI.clearIns();
941  SmallVector<EVT, 4> RetTys;
942  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
943 
945  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
946 
947  bool CanLowerReturn = TLI.CanLowerReturn(
948  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
949 
950  // FIXME: sret demotion isn't supported yet - bail out.
951  if (!CanLowerReturn)
952  return false;
953 
954  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
955  EVT VT = RetTys[I];
956  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
957  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
958  for (unsigned i = 0; i != NumRegs; ++i) {
959  ISD::InputArg MyFlags;
960  MyFlags.VT = RegisterVT;
961  MyFlags.ArgVT = VT;
962  MyFlags.Used = CLI.IsReturnValueUsed;
963  if (CLI.RetSExt)
964  MyFlags.Flags.setSExt();
965  if (CLI.RetZExt)
966  MyFlags.Flags.setZExt();
967  if (CLI.IsInReg)
968  MyFlags.Flags.setInReg();
969  CLI.Ins.push_back(MyFlags);
970  }
971  }
972 
973  // Handle all of the outgoing arguments.
974  CLI.clearOuts();
975  for (auto &Arg : CLI.getArgs()) {
976  Type *FinalType = Arg.Ty;
977  if (Arg.IsByVal)
978  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
979  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
980  FinalType, CLI.CallConv, CLI.IsVarArg);
981 
983  if (Arg.IsZExt)
984  Flags.setZExt();
985  if (Arg.IsSExt)
986  Flags.setSExt();
987  if (Arg.IsInReg)
988  Flags.setInReg();
989  if (Arg.IsSRet)
990  Flags.setSRet();
991  if (Arg.IsSwiftSelf)
992  Flags.setSwiftSelf();
993  if (Arg.IsSwiftError)
994  Flags.setSwiftError();
995  if (Arg.IsByVal)
996  Flags.setByVal();
997  if (Arg.IsInAlloca) {
998  Flags.setInAlloca();
999  // Set the byval flag for CCAssignFn callbacks that don't know about
1000  // inalloca. This way we can know how many bytes we should've allocated
1001  // and how many bytes a callee cleanup function will pop. If we port
1002  // inalloca to more targets, we'll have to add custom inalloca handling in
1003  // the various CC lowering callbacks.
1004  Flags.setByVal();
1005  }
1006  if (Arg.IsByVal || Arg.IsInAlloca) {
1007  PointerType *Ty = cast<PointerType>(Arg.Ty);
1008  Type *ElementTy = Ty->getElementType();
1009  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1010  // For ByVal, alignment should come from FE. BE will guess if this info is
1011  // not there, but there are cases it cannot get right.
1012  unsigned FrameAlign = Arg.Alignment;
1013  if (!FrameAlign)
1014  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1015  Flags.setByValSize(FrameSize);
1016  Flags.setByValAlign(FrameAlign);
1017  }
1018  if (Arg.IsNest)
1019  Flags.setNest();
1020  if (NeedsRegBlock)
1021  Flags.setInConsecutiveRegs();
1022  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1023  Flags.setOrigAlign(OriginalAlignment);
1024 
1025  CLI.OutVals.push_back(Arg.Val);
1026  CLI.OutFlags.push_back(Flags);
1027  }
1028 
1029  if (!fastLowerCall(CLI))
1030  return false;
1031 
1032  // Set all unused physreg defs as dead.
1033  assert(CLI.Call && "No call instruction specified.");
1034  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1035 
1036  if (CLI.NumResultRegs && CLI.CS)
1038 
1039  return true;
1040 }
1041 
1043  ImmutableCallSite CS(CI);
1044 
1045  FunctionType *FuncTy = CS.getFunctionType();
1046  Type *RetTy = CS.getType();
1047 
1048  ArgListTy Args;
1049  ArgListEntry Entry;
1050  Args.reserve(CS.arg_size());
1051 
1052  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1053  i != e; ++i) {
1054  Value *V = *i;
1055 
1056  // Skip empty types
1057  if (V->getType()->isEmptyTy())
1058  continue;
1059 
1060  Entry.Val = V;
1061  Entry.Ty = V->getType();
1062 
1063  // Skip the first return-type Attribute to get to params.
1064  Entry.setAttributes(&CS, i - CS.arg_begin());
1065  Args.push_back(Entry);
1066  }
1067 
1068  // Check if target-independent constraints permit a tail call here.
1069  // Target-dependent constraints are checked within fastLowerCall.
1070  bool IsTailCall = CI->isTailCall();
1071  if (IsTailCall && !isInTailCallPosition(CS, TM))
1072  IsTailCall = false;
1073 
1074  CallLoweringInfo CLI;
1075  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1076  .setTailCall(IsTailCall);
1077 
1078  return lowerCallTo(CLI);
1079 }
1080 
1082  const CallInst *Call = cast<CallInst>(I);
1083 
1084  // Handle simple inline asms.
1085  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1086  // If the inline asm has side effects, then make sure that no local value
1087  // lives across by flushing the local value map.
1088  if (IA->hasSideEffects())
1089  flushLocalValueMap();
1090 
1091  // Don't attempt to handle constraints.
1092  if (!IA->getConstraintString().empty())
1093  return false;
1094 
1095  unsigned ExtraInfo = 0;
1096  if (IA->hasSideEffects())
1097  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1098  if (IA->isAlignStack())
1099  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1100 
1103  .addExternalSymbol(IA->getAsmString().c_str())
1104  .addImm(ExtraInfo);
1105  return true;
1106  }
1107 
1108  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1109  computeUsesVAFloatArgument(*Call, MMI);
1110 
1111  // Handle intrinsic function calls.
1112  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1113  return selectIntrinsicCall(II);
1114 
1115  // Usually, it does not make sense to initialize a value,
1116  // make an unrelated function call and use the value, because
1117  // it tends to be spilled on the stack. So, we move the pointer
1118  // to the last local value to the beginning of the block, so that
1119  // all the values which have already been materialized,
1120  // appear after the call. It also makes sense to skip intrinsics
1121  // since they tend to be inlined.
1122  flushLocalValueMap();
1123 
1124  return lowerCall(Call);
1125 }
1126 
1128  switch (II->getIntrinsicID()) {
1129  default:
1130  break;
1131  // At -O0 we don't care about the lifetime intrinsics.
1132  case Intrinsic::lifetime_start:
1133  case Intrinsic::lifetime_end:
1134  // The donothing intrinsic does, well, nothing.
1135  case Intrinsic::donothing:
1136  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1137  case Intrinsic::assume:
1138  return true;
1139  case Intrinsic::dbg_declare: {
1140  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1141  assert(DI->getVariable() && "Missing variable");
1142  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1143  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1144  return true;
1145  }
1146 
1147  const Value *Address = DI->getAddress();
1148  if (!Address || isa<UndefValue>(Address)) {
1149  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1150  return true;
1151  }
1152 
1153  // Byval arguments with frame indices were already handled after argument
1154  // lowering and before isel.
1155  const auto *Arg =
1157  if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1158  return true;
1159 
1161  if (unsigned Reg = lookUpRegForValue(Address))
1162  Op = MachineOperand::CreateReg(Reg, false);
1163 
1164  // If we have a VLA that has a "use" in a metadata node that's then used
1165  // here but it has no other uses, then we have a problem. E.g.,
1166  //
1167  // int foo (const int *x) {
1168  // char a[*x];
1169  // return 0;
1170  // }
1171  //
1172  // If we assign 'a' a vreg and fast isel later on has to use the selection
1173  // DAG isel, it will want to copy the value to the vreg. However, there are
1174  // no uses, which goes counter to what selection DAG isel expects.
1175  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1176  (!isa<AllocaInst>(Address) ||
1177  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1179  false);
1180 
1181  if (Op) {
1183  "Expected inlined-at fields to agree");
1184  if (Op->isReg()) {
1185  Op->setIsDebug(true);
1186  // A dbg.declare describes the address of a source variable, so lower it
1187  // into an indirect DBG_VALUE.
1189  TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1190  Op->getReg(), DI->getVariable(), DI->getExpression());
1191  } else
1193  TII.get(TargetOpcode::DBG_VALUE))
1194  .add(*Op)
1195  .addImm(0)
1196  .addMetadata(DI->getVariable())
1197  .addMetadata(DI->getExpression());
1198  } else {
1199  // We can't yet handle anything else here because it would require
1200  // generating code, thus altering codegen because of debug info.
1201  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1202  }
1203  return true;
1204  }
1205  case Intrinsic::dbg_value: {
1206  // This form of DBG_VALUE is target-independent.
1207  const DbgValueInst *DI = cast<DbgValueInst>(II);
1208  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1209  const Value *V = DI->getValue();
1211  "Expected inlined-at fields to agree");
1212  if (!V) {
1213  // Currently the optimizer can produce this; insert an undef to
1214  // help debugging. Probably the optimizer should not do this.
1215  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1216  DI->getVariable(), DI->getExpression());
1217  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1218  if (CI->getBitWidth() > 64)
1220  .addCImm(CI)
1221  .addImm(0U)
1222  .addMetadata(DI->getVariable())
1223  .addMetadata(DI->getExpression());
1224  else
1226  .addImm(CI->getZExtValue())
1227  .addImm(0U)
1228  .addMetadata(DI->getVariable())
1229  .addMetadata(DI->getExpression());
1230  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1232  .addFPImm(CF)
1233  .addImm(0U)
1234  .addMetadata(DI->getVariable())
1235  .addMetadata(DI->getExpression());
1236  } else if (unsigned Reg = lookUpRegForValue(V)) {
1237  // FIXME: This does not handle register-indirect values at offset 0.
1238  bool IsIndirect = false;
1239  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1240  DI->getVariable(), DI->getExpression());
1241  } else {
1242  // We can't yet handle anything else here because it would require
1243  // generating code, thus altering codegen because of debug info.
1244  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1245  }
1246  return true;
1247  }
1248  case Intrinsic::objectsize: {
1249  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1250  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1251  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1252  unsigned ResultReg = getRegForValue(ResCI);
1253  if (!ResultReg)
1254  return false;
1255  updateValueMap(II, ResultReg);
1256  return true;
1257  }
1258  case Intrinsic::invariant_group_barrier:
1259  case Intrinsic::expect: {
1260  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1261  if (!ResultReg)
1262  return false;
1263  updateValueMap(II, ResultReg);
1264  return true;
1265  }
1266  case Intrinsic::experimental_stackmap:
1267  return selectStackmap(II);
1268  case Intrinsic::experimental_patchpoint_void:
1269  case Intrinsic::experimental_patchpoint_i64:
1270  return selectPatchpoint(II);
1271 
1272  case Intrinsic::xray_customevent:
1273  return selectXRayCustomEvent(II);
1274  }
1275 
1276  return fastLowerIntrinsicCall(II);
1277 }
1278 
1279 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1280  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1281  EVT DstVT = TLI.getValueType(DL, I->getType());
1282 
1283  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1284  !DstVT.isSimple())
1285  // Unhandled type. Halt "fast" selection and bail.
1286  return false;
1287 
1288  // Check if the destination type is legal.
1289  if (!TLI.isTypeLegal(DstVT))
1290  return false;
1291 
1292  // Check if the source operand is legal.
1293  if (!TLI.isTypeLegal(SrcVT))
1294  return false;
1295 
1296  unsigned InputReg = getRegForValue(I->getOperand(0));
1297  if (!InputReg)
1298  // Unhandled operand. Halt "fast" selection and bail.
1299  return false;
1300 
1301  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1302 
1303  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1304  Opcode, InputReg, InputRegIsKill);
1305  if (!ResultReg)
1306  return false;
1307 
1308  updateValueMap(I, ResultReg);
1309  return true;
1310 }
1311 
1313  // If the bitcast doesn't change the type, just use the operand value.
1314  if (I->getType() == I->getOperand(0)->getType()) {
1315  unsigned Reg = getRegForValue(I->getOperand(0));
1316  if (!Reg)
1317  return false;
1318  updateValueMap(I, Reg);
1319  return true;
1320  }
1321 
1322  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1323  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1324  EVT DstEVT = TLI.getValueType(DL, I->getType());
1325  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1326  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1327  // Unhandled type. Halt "fast" selection and bail.
1328  return false;
1329 
1330  MVT SrcVT = SrcEVT.getSimpleVT();
1331  MVT DstVT = DstEVT.getSimpleVT();
1332  unsigned Op0 = getRegForValue(I->getOperand(0));
1333  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1334  return false;
1335  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1336 
1337  // First, try to perform the bitcast by inserting a reg-reg copy.
1338  unsigned ResultReg = 0;
1339  if (SrcVT == DstVT) {
1340  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1341  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1342  // Don't attempt a cross-class copy. It will likely fail.
1343  if (SrcClass == DstClass) {
1344  ResultReg = createResultReg(DstClass);
1346  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1347  }
1348  }
1349 
1350  // If the reg-reg copy failed, select a BITCAST opcode.
1351  if (!ResultReg)
1352  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1353 
1354  if (!ResultReg)
1355  return false;
1356 
1357  updateValueMap(I, ResultReg);
1358  return true;
1359 }
1360 
1361 // Remove local value instructions starting from the instruction after
1362 // SavedLastLocalValue to the current function insert point.
1363 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1364 {
1365  MachineInstr *CurLastLocalValue = getLastLocalValue();
1366  if (CurLastLocalValue != SavedLastLocalValue) {
1367  // Find the first local value instruction to be deleted.
1368  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1369  // Otherwise it's the first instruction in the block.
1370  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1371  if (SavedLastLocalValue)
1372  ++FirstDeadInst;
1373  else
1374  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1375  setLastLocalValue(SavedLastLocalValue);
1376  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1377  }
1378 }
1379 
1381  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1382  // Just before the terminator instruction, insert instructions to
1383  // feed PHI nodes in successor blocks.
1384  if (isa<TerminatorInst>(I)) {
1385  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1386  // PHI node handling may have generated local value instructions,
1387  // even though it failed to handle all PHI nodes.
1388  // We remove these instructions because SelectionDAGISel will generate
1389  // them again.
1390  removeDeadLocalValueCode(SavedLastLocalValue);
1391  return false;
1392  }
1393  }
1394 
1395  // FastISel does not handle any operand bundles except OB_funclet.
1397  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1398  if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1399  return false;
1400 
1401  DbgLoc = I->getDebugLoc();
1402 
1403  SavedInsertPt = FuncInfo.InsertPt;
1404 
1405  if (const auto *Call = dyn_cast<CallInst>(I)) {
1406  const Function *F = Call->getCalledFunction();
1407  LibFunc Func;
1408 
1409  // As a special case, don't handle calls to builtin library functions that
1410  // may be translated directly to target instructions.
1411  if (F && !F->hasLocalLinkage() && F->hasName() &&
1412  LibInfo->getLibFunc(F->getName(), Func) &&
1414  return false;
1415 
1416  // Don't handle Intrinsic::trap if a trap function is specified.
1417  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1418  Call->hasFnAttr("trap-func-name"))
1419  return false;
1420  }
1421 
1422  // First, try doing target-independent selection.
1424  if (selectOperator(I, I->getOpcode())) {
1425  ++NumFastIselSuccessIndependent;
1426  DbgLoc = DebugLoc();
1427  return true;
1428  }
1429  // Remove dead code.
1431  if (SavedInsertPt != FuncInfo.InsertPt)
1432  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1433  SavedInsertPt = FuncInfo.InsertPt;
1434  }
1435  // Next, try calling the target to attempt to handle the instruction.
1436  if (fastSelectInstruction(I)) {
1437  ++NumFastIselSuccessTarget;
1438  DbgLoc = DebugLoc();
1439  return true;
1440  }
1441  // Remove dead code.
1443  if (SavedInsertPt != FuncInfo.InsertPt)
1444  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1445 
1446  DbgLoc = DebugLoc();
1447  // Undo phi node updates, because they will be added again by SelectionDAG.
1448  if (isa<TerminatorInst>(I)) {
1449  // PHI node handling may have generated local value instructions.
1450  // We remove them because SelectionDAGISel will generate them again.
1451  removeDeadLocalValueCode(SavedLastLocalValue);
1453  }
1454  return false;
1455 }
1456 
1457 /// Emit an unconditional branch to the given block, unless it is the immediate
1458 /// (fall-through) successor, and update the CFG.
1460  const DebugLoc &DbgLoc) {
1461  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1462  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1463  // For more accurate line information if this is the only instruction
1464  // in the block then emit it, otherwise we have the unconditional
1465  // fall-through case, which needs no instructions.
1466  } else {
1467  // The unconditional branch case.
1468  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1469  SmallVector<MachineOperand, 0>(), DbgLoc);
1470  }
1471  if (FuncInfo.BPI) {
1473  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1475  } else
1477 }
1478 
1480  MachineBasicBlock *TrueMBB,
1481  MachineBasicBlock *FalseMBB) {
1482  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1483  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1484  // successor/predecessor lists.
1485  if (TrueMBB != FalseMBB) {
1486  if (FuncInfo.BPI) {
1487  auto BranchProbability =
1488  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1490  } else
1492  }
1493 
1494  fastEmitBranch(FalseMBB, DbgLoc);
1495 }
1496 
1497 /// Emit an FNeg operation.
1499  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1500  if (!OpReg)
1501  return false;
1502  bool OpRegIsKill = hasTrivialKill(I);
1503 
1504  // If the target has ISD::FNEG, use it.
1505  EVT VT = TLI.getValueType(DL, I->getType());
1506  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1507  OpReg, OpRegIsKill);
1508  if (ResultReg) {
1509  updateValueMap(I, ResultReg);
1510  return true;
1511  }
1512 
1513  // Bitcast the value to integer, twiddle the sign bit with xor,
1514  // and then bitcast it back to floating-point.
1515  if (VT.getSizeInBits() > 64)
1516  return false;
1517  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1518  if (!TLI.isTypeLegal(IntVT))
1519  return false;
1520 
1521  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1522  ISD::BITCAST, OpReg, OpRegIsKill);
1523  if (!IntReg)
1524  return false;
1525 
1526  unsigned IntResultReg = fastEmit_ri_(
1527  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1528  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1529  if (!IntResultReg)
1530  return false;
1531 
1532  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1533  IntResultReg, /*IsKill=*/true);
1534  if (!ResultReg)
1535  return false;
1536 
1537  updateValueMap(I, ResultReg);
1538  return true;
1539 }
1540 
1542  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1543  if (!EVI)
1544  return false;
1545 
1546  // Make sure we only try to handle extracts with a legal result. But also
1547  // allow i1 because it's easy.
1548  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1549  if (!RealVT.isSimple())
1550  return false;
1551  MVT VT = RealVT.getSimpleVT();
1552  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1553  return false;
1554 
1555  const Value *Op0 = EVI->getOperand(0);
1556  Type *AggTy = Op0->getType();
1557 
1558  // Get the base result register.
1559  unsigned ResultReg;
1561  if (I != FuncInfo.ValueMap.end())
1562  ResultReg = I->second;
1563  else if (isa<Instruction>(Op0))
1564  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1565  else
1566  return false; // fast-isel can't handle aggregate constants at the moment
1567 
1568  // Get the actual result register, which is an offset from the base register.
1569  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1570 
1571  SmallVector<EVT, 4> AggValueVTs;
1572  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1573 
1574  for (unsigned i = 0; i < VTIndex; i++)
1575  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1576 
1577  updateValueMap(EVI, ResultReg);
1578  return true;
1579 }
1580 
1581 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1582  switch (Opcode) {
1583  case Instruction::Add:
1584  return selectBinaryOp(I, ISD::ADD);
1585  case Instruction::FAdd:
1586  return selectBinaryOp(I, ISD::FADD);
1587  case Instruction::Sub:
1588  return selectBinaryOp(I, ISD::SUB);
1589  case Instruction::FSub:
1590  // FNeg is currently represented in LLVM IR as a special case of FSub.
1591  if (BinaryOperator::isFNeg(I))
1592  return selectFNeg(I);
1593  return selectBinaryOp(I, ISD::FSUB);
1594  case Instruction::Mul:
1595  return selectBinaryOp(I, ISD::MUL);
1596  case Instruction::FMul:
1597  return selectBinaryOp(I, ISD::FMUL);
1598  case Instruction::SDiv:
1599  return selectBinaryOp(I, ISD::SDIV);
1600  case Instruction::UDiv:
1601  return selectBinaryOp(I, ISD::UDIV);
1602  case Instruction::FDiv:
1603  return selectBinaryOp(I, ISD::FDIV);
1604  case Instruction::SRem:
1605  return selectBinaryOp(I, ISD::SREM);
1606  case Instruction::URem:
1607  return selectBinaryOp(I, ISD::UREM);
1608  case Instruction::FRem:
1609  return selectBinaryOp(I, ISD::FREM);
1610  case Instruction::Shl:
1611  return selectBinaryOp(I, ISD::SHL);
1612  case Instruction::LShr:
1613  return selectBinaryOp(I, ISD::SRL);
1614  case Instruction::AShr:
1615  return selectBinaryOp(I, ISD::SRA);
1616  case Instruction::And:
1617  return selectBinaryOp(I, ISD::AND);
1618  case Instruction::Or:
1619  return selectBinaryOp(I, ISD::OR);
1620  case Instruction::Xor:
1621  return selectBinaryOp(I, ISD::XOR);
1622 
1623  case Instruction::GetElementPtr:
1624  return selectGetElementPtr(I);
1625 
1626  case Instruction::Br: {
1627  const BranchInst *BI = cast<BranchInst>(I);
1628 
1629  if (BI->isUnconditional()) {
1630  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1631  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1632  fastEmitBranch(MSucc, BI->getDebugLoc());
1633  return true;
1634  }
1635 
1636  // Conditional branches are not handed yet.
1637  // Halt "fast" selection and bail.
1638  return false;
1639  }
1640 
1641  case Instruction::Unreachable:
1643  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1644  else
1645  return true;
1646 
1647  case Instruction::Alloca:
1648  // FunctionLowering has the static-sized case covered.
1649  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1650  return true;
1651 
1652  // Dynamic-sized alloca is not handled yet.
1653  return false;
1654 
1655  case Instruction::Call:
1656  return selectCall(I);
1657 
1658  case Instruction::BitCast:
1659  return selectBitCast(I);
1660 
1661  case Instruction::FPToSI:
1662  return selectCast(I, ISD::FP_TO_SINT);
1663  case Instruction::ZExt:
1664  return selectCast(I, ISD::ZERO_EXTEND);
1665  case Instruction::SExt:
1666  return selectCast(I, ISD::SIGN_EXTEND);
1667  case Instruction::Trunc:
1668  return selectCast(I, ISD::TRUNCATE);
1669  case Instruction::SIToFP:
1670  return selectCast(I, ISD::SINT_TO_FP);
1671 
1672  case Instruction::IntToPtr: // Deliberate fall-through.
1673  case Instruction::PtrToInt: {
1674  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1675  EVT DstVT = TLI.getValueType(DL, I->getType());
1676  if (DstVT.bitsGT(SrcVT))
1677  return selectCast(I, ISD::ZERO_EXTEND);
1678  if (DstVT.bitsLT(SrcVT))
1679  return selectCast(I, ISD::TRUNCATE);
1680  unsigned Reg = getRegForValue(I->getOperand(0));
1681  if (!Reg)
1682  return false;
1683  updateValueMap(I, Reg);
1684  return true;
1685  }
1686 
1687  case Instruction::ExtractValue:
1688  return selectExtractValue(I);
1689 
1690  case Instruction::PHI:
1691  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1692 
1693  default:
1694  // Unhandled instruction. Halt "fast" selection and bail.
1695  return false;
1696  }
1697 }
1698 
1700  const TargetLibraryInfo *LibInfo,
1702  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1703  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1704  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1705  TII(*MF->getSubtarget().getInstrInfo()),
1706  TLI(*MF->getSubtarget().getTargetLowering()),
1707  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1708  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1709 
1710 FastISel::~FastISel() = default;
1711 
1712 bool FastISel::fastLowerArguments() { return false; }
1713 
1714 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1715 
1717  return false;
1718 }
1719 
1720 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1721 
1722 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1723  bool /*Op0IsKill*/) {
1724  return 0;
1725 }
1726 
1727 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1728  bool /*Op0IsKill*/, unsigned /*Op1*/,
1729  bool /*Op1IsKill*/) {
1730  return 0;
1731 }
1732 
1733 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1734  return 0;
1735 }
1736 
1737 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1738  const ConstantFP * /*FPImm*/) {
1739  return 0;
1740 }
1741 
1742 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1743  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1744  return 0;
1745 }
1746 
1747 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1748 /// instruction with an immediate operand using fastEmit_ri.
1749 /// If that fails, it materializes the immediate into a register and try
1750 /// fastEmit_rr instead.
1751 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1752  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1753  // If this is a multiply by a power of two, emit this as a shift left.
1754  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1755  Opcode = ISD::SHL;
1756  Imm = Log2_64(Imm);
1757  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1758  // div x, 8 -> srl x, 3
1759  Opcode = ISD::SRL;
1760  Imm = Log2_64(Imm);
1761  }
1762 
1763  // Horrible hack (to be removed), check to make sure shift amounts are
1764  // in-range.
1765  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1766  Imm >= VT.getSizeInBits())
1767  return 0;
1768 
1769  // First check if immediate type is legal. If not, we can't use the ri form.
1770  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1771  if (ResultReg)
1772  return ResultReg;
1773  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1774  bool IsImmKill = true;
1775  if (!MaterialReg) {
1776  // This is a bit ugly/slow, but failing here means falling out of
1777  // fast-isel, which would be very slow.
1778  IntegerType *ITy =
1780  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1781  if (!MaterialReg)
1782  return 0;
1783  // FIXME: If the materialized register here has no uses yet then this
1784  // will be the first use and we should be able to mark it as killed.
1785  // However, the local value area for materialising constant expressions
1786  // grows down, not up, which means that any constant expressions we generate
1787  // later which also use 'Imm' could be after this instruction and therefore
1788  // after this kill.
1789  IsImmKill = false;
1790  }
1791  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1792 }
1793 
1795  return MRI.createVirtualRegister(RC);
1796 }
1797 
1799  unsigned OpNum) {
1801  const TargetRegisterClass *RegClass =
1802  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1803  if (!MRI.constrainRegClass(Op, RegClass)) {
1804  // If it's not legal to COPY between the register classes, something
1805  // has gone very wrong before we got here.
1806  unsigned NewOp = createResultReg(RegClass);
1808  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1809  return NewOp;
1810  }
1811  }
1812  return Op;
1813 }
1814 
1815 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1816  const TargetRegisterClass *RC) {
1817  unsigned ResultReg = createResultReg(RC);
1818  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1819 
1820  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1821  return ResultReg;
1822 }
1823 
1824 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1825  const TargetRegisterClass *RC, unsigned Op0,
1826  bool Op0IsKill) {
1827  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1828 
1829  unsigned ResultReg = createResultReg(RC);
1830  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1831 
1832  if (II.getNumDefs() >= 1)
1833  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1834  .addReg(Op0, getKillRegState(Op0IsKill));
1835  else {
1837  .addReg(Op0, getKillRegState(Op0IsKill));
1839  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1840  }
1841 
1842  return ResultReg;
1843 }
1844 
1845 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1846  const TargetRegisterClass *RC, unsigned Op0,
1847  bool Op0IsKill, unsigned Op1,
1848  bool Op1IsKill) {
1849  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1850 
1851  unsigned ResultReg = createResultReg(RC);
1852  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1853  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1854 
1855  if (II.getNumDefs() >= 1)
1856  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1857  .addReg(Op0, getKillRegState(Op0IsKill))
1858  .addReg(Op1, getKillRegState(Op1IsKill));
1859  else {
1861  .addReg(Op0, getKillRegState(Op0IsKill))
1862  .addReg(Op1, getKillRegState(Op1IsKill));
1864  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1865  }
1866  return ResultReg;
1867 }
1868 
1869 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1870  const TargetRegisterClass *RC, unsigned Op0,
1871  bool Op0IsKill, unsigned Op1,
1872  bool Op1IsKill, unsigned Op2,
1873  bool Op2IsKill) {
1874  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1875 
1876  unsigned ResultReg = createResultReg(RC);
1877  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1878  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1879  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1880 
1881  if (II.getNumDefs() >= 1)
1882  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1883  .addReg(Op0, getKillRegState(Op0IsKill))
1884  .addReg(Op1, getKillRegState(Op1IsKill))
1885  .addReg(Op2, getKillRegState(Op2IsKill));
1886  else {
1888  .addReg(Op0, getKillRegState(Op0IsKill))
1889  .addReg(Op1, getKillRegState(Op1IsKill))
1890  .addReg(Op2, getKillRegState(Op2IsKill));
1892  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1893  }
1894  return ResultReg;
1895 }
1896 
1897 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1898  const TargetRegisterClass *RC, unsigned Op0,
1899  bool Op0IsKill, uint64_t Imm) {
1900  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1901 
1902  unsigned ResultReg = createResultReg(RC);
1903  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1904 
1905  if (II.getNumDefs() >= 1)
1906  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1907  .addReg(Op0, getKillRegState(Op0IsKill))
1908  .addImm(Imm);
1909  else {
1911  .addReg(Op0, getKillRegState(Op0IsKill))
1912  .addImm(Imm);
1914  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1915  }
1916  return ResultReg;
1917 }
1918 
1919 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1920  const TargetRegisterClass *RC, unsigned Op0,
1921  bool Op0IsKill, uint64_t Imm1,
1922  uint64_t Imm2) {
1923  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1924 
1925  unsigned ResultReg = createResultReg(RC);
1926  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1927 
1928  if (II.getNumDefs() >= 1)
1929  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1930  .addReg(Op0, getKillRegState(Op0IsKill))
1931  .addImm(Imm1)
1932  .addImm(Imm2);
1933  else {
1935  .addReg(Op0, getKillRegState(Op0IsKill))
1936  .addImm(Imm1)
1937  .addImm(Imm2);
1939  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1940  }
1941  return ResultReg;
1942 }
1943 
1944 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1945  const TargetRegisterClass *RC,
1946  const ConstantFP *FPImm) {
1947  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1948 
1949  unsigned ResultReg = createResultReg(RC);
1950 
1951  if (II.getNumDefs() >= 1)
1952  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1953  .addFPImm(FPImm);
1954  else {
1956  .addFPImm(FPImm);
1958  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1959  }
1960  return ResultReg;
1961 }
1962 
1963 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1964  const TargetRegisterClass *RC, unsigned Op0,
1965  bool Op0IsKill, unsigned Op1,
1966  bool Op1IsKill, uint64_t Imm) {
1967  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1968 
1969  unsigned ResultReg = createResultReg(RC);
1970  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1971  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1972 
1973  if (II.getNumDefs() >= 1)
1974  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1975  .addReg(Op0, getKillRegState(Op0IsKill))
1976  .addReg(Op1, getKillRegState(Op1IsKill))
1977  .addImm(Imm);
1978  else {
1980  .addReg(Op0, getKillRegState(Op0IsKill))
1981  .addReg(Op1, getKillRegState(Op1IsKill))
1982  .addImm(Imm);
1984  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1985  }
1986  return ResultReg;
1987 }
1988 
1989 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1990  const TargetRegisterClass *RC, uint64_t Imm) {
1991  unsigned ResultReg = createResultReg(RC);
1992  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1993 
1994  if (II.getNumDefs() >= 1)
1995  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1996  .addImm(Imm);
1997  else {
2000  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2001  }
2002  return ResultReg;
2003 }
2004 
2005 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2006  bool Op0IsKill, uint32_t Idx) {
2007  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2009  "Cannot yet extract from physregs");
2010  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2012  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2013  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2014  return ResultReg;
2015 }
2016 
2017 /// Emit MachineInstrs to compute the value of Op with all but the least
2018 /// significant bit set to zero.
2019 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2020  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2021 }
2022 
2023 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2024 /// Emit code to ensure constants are copied into registers when needed.
2025 /// Remember the virtual registers that need to be added to the Machine PHI
2026 /// nodes as input. We cannot just directly add them, because expansion
2027 /// might result in multiple MBB's for one BB. As such, the start of the
2028 /// BB might correspond to a different MBB than the end.
2029 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2030  const TerminatorInst *TI = LLVMBB->getTerminator();
2031 
2034 
2035  // Check successor nodes' PHI nodes that expect a constant to be available
2036  // from this block.
2037  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2038  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2039  if (!isa<PHINode>(SuccBB->begin()))
2040  continue;
2041  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2042 
2043  // If this terminator has multiple identical successors (common for
2044  // switches), only handle each succ once.
2045  if (!SuccsHandled.insert(SuccMBB).second)
2046  continue;
2047 
2048  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2049 
2050  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2051  // nodes and Machine PHI nodes, but the incoming operands have not been
2052  // emitted yet.
2053  for (BasicBlock::const_iterator I = SuccBB->begin();
2054  const auto *PN = dyn_cast<PHINode>(I); ++I) {
2055 
2056  // Ignore dead phi's.
2057  if (PN->use_empty())
2058  continue;
2059 
2060  // Only handle legal types. Two interesting things to note here. First,
2061  // by bailing out early, we may leave behind some dead instructions,
2062  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2063  // own moves. Second, this check is necessary because FastISel doesn't
2064  // use CreateRegs to create registers, so it always creates
2065  // exactly one register for each non-void instruction.
2066  EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2067  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2068  // Handle integer promotions, though, because they're common and easy.
2069  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2071  return false;
2072  }
2073  }
2074 
2075  const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2076 
2077  // Set the DebugLoc for the copy. Prefer the location of the operand
2078  // if there is one; use the location of the PHI otherwise.
2079  DbgLoc = PN->getDebugLoc();
2080  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2081  DbgLoc = Inst->getDebugLoc();
2082 
2083  unsigned Reg = getRegForValue(PHIOp);
2084  if (!Reg) {
2086  return false;
2087  }
2088  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2089  DbgLoc = DebugLoc();
2090  }
2091  }
2092 
2093  return true;
2094 }
2095 
2096 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2097  assert(LI->hasOneUse() &&
2098  "tryToFoldLoad expected a LoadInst with a single use");
2099  // We know that the load has a single use, but don't know what it is. If it
2100  // isn't one of the folded instructions, then we can't succeed here. Handle
2101  // this by scanning the single-use users of the load until we get to FoldInst.
2102  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2103 
2104  const Instruction *TheUser = LI->user_back();
2105  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2106  // Stay in the right block.
2107  TheUser->getParent() == FoldInst->getParent() &&
2108  --MaxUsers) { // Don't scan too far.
2109  // If there are multiple or no uses of this instruction, then bail out.
2110  if (!TheUser->hasOneUse())
2111  return false;
2112 
2113  TheUser = TheUser->user_back();
2114  }
2115 
2116  // If we didn't find the fold instruction, then we failed to collapse the
2117  // sequence.
2118  if (TheUser != FoldInst)
2119  return false;
2120 
2121  // Don't try to fold volatile loads. Target has to deal with alignment
2122  // constraints.
2123  if (LI->isVolatile())
2124  return false;
2125 
2126  // Figure out which vreg this is going into. If there is no assigned vreg yet
2127  // then there actually was no reference to it. Perhaps the load is referenced
2128  // by a dead instruction.
2129  unsigned LoadReg = getRegForValue(LI);
2130  if (!LoadReg)
2131  return false;
2132 
2133  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2134  // may mean that the instruction got lowered to multiple MIs, or the use of
2135  // the loaded value ended up being multiple operands of the result.
2136  if (!MRI.hasOneUse(LoadReg))
2137  return false;
2138 
2140  MachineInstr *User = RI->getParent();
2141 
2142  // Set the insertion point properly. Folding the load can cause generation of
2143  // other random instructions (like sign extends) for addressing modes; make
2144  // sure they get inserted in a logical place before the new instruction.
2145  FuncInfo.InsertPt = User;
2146  FuncInfo.MBB = User->getParent();
2147 
2148  // Ask the target to try folding the load.
2149  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2150 }
2151 
2153  // Must be an add.
2154  if (!isa<AddOperator>(Add))
2155  return false;
2156  // Type size needs to match.
2157  if (DL.getTypeSizeInBits(GEP->getType()) !=
2158  DL.getTypeSizeInBits(Add->getType()))
2159  return false;
2160  // Must be in the same basic block.
2161  if (isa<Instruction>(Add) &&
2162  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2163  return false;
2164  // Must have a constant operand.
2165  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2166 }
2167 
2170  const Value *Ptr;
2171  Type *ValTy;
2172  unsigned Alignment;
2174  bool IsVolatile;
2175 
2176  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2177  Alignment = LI->getAlignment();
2178  IsVolatile = LI->isVolatile();
2179  Flags = MachineMemOperand::MOLoad;
2180  Ptr = LI->getPointerOperand();
2181  ValTy = LI->getType();
2182  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2183  Alignment = SI->getAlignment();
2184  IsVolatile = SI->isVolatile();
2186  Ptr = SI->getPointerOperand();
2187  ValTy = SI->getValueOperand()->getType();
2188  } else
2189  return nullptr;
2190 
2191  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2192  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2193  bool IsDereferenceable =
2195  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2196 
2197  AAMDNodes AAInfo;
2198  I->getAAMetadata(AAInfo);
2199 
2200  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2201  Alignment = DL.getABITypeAlignment(ValTy);
2202 
2203  unsigned Size = DL.getTypeStoreSize(ValTy);
2204 
2205  if (IsVolatile)
2207  if (IsNonTemporal)
2209  if (IsDereferenceable)
2211  if (IsInvariant)
2213 
2214  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2215  Alignment, AAInfo, Ranges);
2216 }
2217 
2219  // If both operands are the same, then try to optimize or fold the cmp.
2221  if (CI->getOperand(0) != CI->getOperand(1))
2222  return Predicate;
2223 
2224  switch (Predicate) {
2225  default: llvm_unreachable("Invalid predicate!");
2226  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2227  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2228  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2229  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2230  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2231  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2232  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2233  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2234  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2235  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2236  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2237  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2238  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2239  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2240  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2241  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2242 
2243  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2244  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2245  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2246  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2247  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2248  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2249  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2250  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2251  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2252  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2253  }
2254 
2255  return Predicate;
2256 }
void setHasStackMap(bool s=true)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:545
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1869
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void setByValAlign(unsigned A)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:850
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:2019
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool hasLocalLinkage() const
Definition: GlobalValue.h:416
This instruction extracts a struct member or array element value from an aggregate value...
MachineConstantPool & MCP
Definition: FastISel.h:208
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1042
unsigned arg_size() const
Definition: CallSite.h:219
static const Value * getFNegArgument(const Value *BinOp)
bool hasDebugInfo() const
Returns true if valid debug info is present.
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Definition: CallSite.h:312
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
struct fuzzer::@309 Flags
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:475
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
ImmutableCallSite * CS
Definition: FastISel.h:90
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:498
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:398
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:562
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:613
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:298
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
gep_type_iterator gep_type_end(const User *GEP)
unsigned less or equal
Definition: InstrTypes.h:886
unsigned less than
Definition: InstrTypes.h:885
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:866
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2169
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:697
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:876
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1720
BasicBlock * getSuccessor(unsigned i) const
arg_iterator arg_end()
Definition: Function.h:612
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:862
F(f)
MachineModuleInfo & getMMI() const
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:99
unsigned getCallFrameDestroyOpcode() const
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function&#39;s return value can be lowered to registers.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1733
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:482
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo&#39;s usesVAFloatArgument flag if so.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1714
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:207
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1380
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1919
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1069
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:871
MachineFunction * MF
Definition: FastISel.h:205
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:203
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1897
ArrayRef< unsigned > getIndices() const
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:238
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:870
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1963
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2218
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
A description of a memory reference used in the backend.
void setHasPatchPoint(bool s=true)
unsigned getNumArgOperands() const
Return the number of call arguments.
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:70
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Definition: ISDOpcodes.h:379
Class to represent struct types.
Definition: DerivedTypes.h:201
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1989
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2152
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
IterTy arg_end() const
Definition: CallSite.h:557
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1824
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:227
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:867
This file contains the simple types necessary to represent the attributes associated with functions a...
InstrTy * getInstruction() const
Definition: CallSite.h:92
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
void setByValSize(unsigned S)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetMachine & TM
Definition: FastISel.h:210
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:634
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1127
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1279
unsigned getSizeInBits() const
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Context object for machine code objects.
Definition: MCContext.h:59
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:98
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1798
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:455
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1581
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:283
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:196
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1815
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:163
void setOrigAlign(unsigned A)
amdgpu Simplify well known AMD library false Value * Callee
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:234
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:85
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Value * getOperand(unsigned i) const
Definition: User.h:154
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getKillRegState(bool B)
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:498
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:309
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:229
MCContext & getContext() const
void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:702
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:150
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
virtual ~FastISel()
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:105
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
bool hasName() const
Definition: Value.h:251
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
Simple binary floating point operators.
Definition: ISDOpcodes.h:259
Conditional or Unconditional Branch instruction.
Value * getAddress() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:376
Value * getValue() const
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:96
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:363
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
size_t size() const
Definition: BasicBlock.h:262
MachineFrameInfo & MFI
Definition: FastISel.h:207
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1722
bool SkipTargetIndependentISel
Definition: FastISel.h:216
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:80
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We&#39;re checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2096
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:136
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:860
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:734
const Triple & getTargetTriple() const
DIExpression * getExpression() const
Definition: IntrinsicInst.h:84
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:869
arg_iterator arg_begin()
Definition: Function.h:603
The memory access is non-temporal.
Class to represent integer types.
Definition: DerivedTypes.h:40
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:864
const TargetRegisterInfo & TRI
Definition: FastISel.h:214
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:877
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:194
Extended Value Type.
Definition: ValueTypes.h:34
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:527
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1498
This class contains a discriminated union of information about pointers in memory operands...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:875
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:95
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:885
const TargetInstrInfo & TII
Definition: FastISel.h:212
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:472
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
signed greater than
Definition: InstrTypes.h:887
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:222
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:639
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge&#39;s probability, relative to other out-edges of the Src.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:362
The memory access writes data.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:864
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1727
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:410
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1742
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
IterTy arg_begin() const
Definition: CallSite.h:553
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:874
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
Provides information about what library functions are available for the current target.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1479
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:215
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:564
signed less than
Definition: InstrTypes.h:889
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:642
reg_iterator reg_begin(unsigned RegNo) const
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1845
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:320
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:175
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:124
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
signed less or equal
Definition: InstrTypes.h:890
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1312
Target - Wrapper for Target specific information.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1737
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:97
const DataLayout & DL
Definition: FastISel.h:211
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:407
BranchProbabilityInfo * BPI
This file defines the FastISel class.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:445
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
bool isTailCall() const
DebugLoc DbgLoc
Definition: FastISel.h:209
bool selectCall(const User *Call)
Definition: FastISel.cpp:1081
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
Flags
Flags values. These may be or&#39;d together.
amdgpu Simplify well known AMD library false Value Value * Arg
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:139
The memory access reads data.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:532
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:389
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:405
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Representation of each machine instruction.
Definition: MachineInstr.h:59
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:934
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1716
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:726
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1541
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:284
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:362
MachineRegisterInfo & MRI
Definition: FastISel.h:206
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:515
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:121
unsigned greater or equal
Definition: InstrTypes.h:884
This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:907
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:218
TargetOptions Options
Definition: TargetMachine.h:96
Establish a view to a call site for examination.
Definition: CallSite.h:695
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The memory access always returns the same value (or traps).
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:478
iterator end()
Definition: DenseMap.h:79
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:193
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:868
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isUnconditional() const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:872
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:311
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
Type * getType() const
Return the type of the instruction that generated this call site.
Definition: CallSite.h:264
const TargetLowering & TLI
Definition: FastISel.h:213
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1794
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:183
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1751
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2005
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:312
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
user_iterator user_begin()
Definition: Value.h:371
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1699
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1712
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:863
LLVM Value Representation.
Definition: Value.h:73
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:873
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:388
FunctionType * getFunctionType() const
Definition: CallSite.h:320
static const Function * getParent(const Value *V)
#define DEBUG(X)
Definition: Debug.h:118
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable&#39;s name.
Definition: Mangler.cpp:109
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:408
unsigned greater than
Definition: InstrTypes.h:883
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned TrapUnreachable
Emit target-specific trap instruction for &#39;unreachable&#39; IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
unsigned fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:1944
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
Definition: Type.cpp:98
Conversion operators.
Definition: ISDOpcodes.h:442
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:204
const Value * stripInBoundsConstantOffsets() const
Strip off pointer casts and all-constant inbounds GEPs.
Definition: Value.cpp:535
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:451
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:865
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:37
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1459
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
This represents the llvm.dbg.declare instruction.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:561
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
static MachineOperand CreateFI(int Idx)
bool use_empty() const
Definition: Value.h:322
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:537
Type * getElementType() const
Definition: DerivedTypes.h:486
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:870
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:862
signed greater or equal
Definition: InstrTypes.h:888
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
unsigned InitializeRegForValue(const Value *V)
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:339