LLVM  4.0.0
FastISel.cpp
Go to the documentation of this file.
1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/ADT/Optional.h"
43 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Analysis/Loads.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FastISel.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugInfo.h"
57 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Mangler.h"
63 #include "llvm/IR/Operator.h"
64 #include "llvm/Support/Debug.h"
71 using namespace llvm;
72 
73 #define DEBUG_TYPE "isel"
74 
75 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
76  "target-independent selector");
77 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
78  "target-specific selector");
79 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
80 
82  unsigned AttrIdx) {
83  IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
84  IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
85  IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
86  IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
87  IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
88  IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
89  IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
90  IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
91  IsSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
92  IsSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
93  Alignment = CS->getParamAlignment(AttrIdx);
94 }
95 
96 /// Set the current block to which generated machine instructions will be
97 /// appended, and clear the local CSE map.
100 
101  // Instructions are appended to FuncInfo.MBB. If the basic block already
102  // contains labels or copies, use the last instruction as the last local
103  // value.
104  EmitStartPt = nullptr;
105  if (!FuncInfo.MBB->empty())
108 }
109 
112  // Fallback to SDISel argument lowering code to deal with sret pointer
113  // parameter.
114  return false;
115 
116  if (!fastLowerArguments())
117  return false;
118 
119  // Enter arguments into ValueMap for uses in non-entry BBs.
121  E = FuncInfo.Fn->arg_end();
122  I != E; ++I) {
124  assert(VI != LocalValueMap.end() && "Missed an argument?");
125  FuncInfo.ValueMap[&*I] = VI->second;
126  }
127  return true;
128 }
129 
130 void FastISel::flushLocalValueMap() {
134  SavedInsertPt = FuncInfo.InsertPt;
135 }
136 
138  // Don't consider constants or arguments to have trivial kills.
139  const Instruction *I = dyn_cast<Instruction>(V);
140  if (!I)
141  return false;
142 
143  // No-op casts are trivially coalesced by fast-isel.
144  if (const auto *Cast = dyn_cast<CastInst>(I))
145  if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
146  !hasTrivialKill(Cast->getOperand(0)))
147  return false;
148 
149  // Even the value might have only one use in the LLVM IR, it is possible that
150  // FastISel might fold the use into another instruction and now there is more
151  // than one use at the Machine Instruction level.
152  unsigned Reg = lookUpRegForValue(V);
153  if (Reg && !MRI.use_empty(Reg))
154  return false;
155 
156  // GEPs with all zero indices are trivially coalesced by fast-isel.
157  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
158  if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
159  return false;
160 
161  // Only instructions with a single use in the same basic block are considered
162  // to have trivial kills.
163  return I->hasOneUse() &&
164  !(I->getOpcode() == Instruction::BitCast ||
165  I->getOpcode() == Instruction::PtrToInt ||
166  I->getOpcode() == Instruction::IntToPtr) &&
167  cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
168 }
169 
170 unsigned FastISel::getRegForValue(const Value *V) {
171  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
172  // Don't handle non-simple values in FastISel.
173  if (!RealVT.isSimple())
174  return 0;
175 
176  // Ignore illegal types. We must do this before looking up the value
177  // in ValueMap because Arguments are given virtual registers regardless
178  // of whether FastISel can handle them.
179  MVT VT = RealVT.getSimpleVT();
180  if (!TLI.isTypeLegal(VT)) {
181  // Handle integer promotions, though, because they're common and easy.
182  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
183  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
184  else
185  return 0;
186  }
187 
188  // Look up the value to see if we already have a register for it.
189  unsigned Reg = lookUpRegForValue(V);
190  if (Reg)
191  return Reg;
192 
193  // In bottom-up mode, just create the virtual register which will be used
194  // to hold the value. It will be materialized later.
195  if (isa<Instruction>(V) &&
196  (!isa<AllocaInst>(V) ||
197  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
199 
200  SavePoint SaveInsertPt = enterLocalValueArea();
201 
202  // Materialize the value in a register. Emit any instructions in the
203  // local value area.
204  Reg = materializeRegForValue(V, VT);
205 
206  leaveLocalValueArea(SaveInsertPt);
207 
208  return Reg;
209 }
210 
211 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
212  unsigned Reg = 0;
213  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
214  if (CI->getValue().getActiveBits() <= 64)
215  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
216  } else if (isa<AllocaInst>(V))
217  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
218  else if (isa<ConstantPointerNull>(V))
219  // Translate this as an integer zero so that it can be
220  // local-CSE'd with actual integer zeros.
221  Reg = getRegForValue(
223  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
224  if (CF->isNullValue())
225  Reg = fastMaterializeFloatZero(CF);
226  else
227  // Try to emit the constant directly.
228  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
229 
230  if (!Reg) {
231  // Try to emit the constant by using an integer constant with a cast.
232  const APFloat &Flt = CF->getValueAPF();
233  EVT IntVT = TLI.getPointerTy(DL);
234 
235  uint64_t x[2];
236  uint32_t IntBitWidth = IntVT.getSizeInBits();
237  bool isExact;
238  (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
239  APFloat::rmTowardZero, &isExact);
240  if (isExact) {
241  APInt IntVal(IntBitWidth, x);
242 
243  unsigned IntegerReg =
245  if (IntegerReg != 0)
246  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
247  /*Kill=*/false);
248  }
249  }
250  } else if (const auto *Op = dyn_cast<Operator>(V)) {
251  if (!selectOperator(Op, Op->getOpcode()))
252  if (!isa<Instruction>(Op) ||
253  !fastSelectInstruction(cast<Instruction>(Op)))
254  return 0;
255  Reg = lookUpRegForValue(Op);
256  } else if (isa<UndefValue>(V)) {
259  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
260  }
261  return Reg;
262 }
263 
264 /// Helper for getRegForValue. This function is called when the value isn't
265 /// already available in a register and must be materialized with new
266 /// instructions.
267 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
268  unsigned Reg = 0;
269  // Give the target-specific code a try first.
270  if (isa<Constant>(V))
271  Reg = fastMaterializeConstant(cast<Constant>(V));
272 
273  // If target-specific code couldn't or didn't want to handle the value, then
274  // give target-independent code a try.
275  if (!Reg)
276  Reg = materializeConstant(V, VT);
277 
278  // Don't cache constant materializations in the general ValueMap.
279  // To do so would require tracking what uses they dominate.
280  if (Reg) {
281  LocalValueMap[V] = Reg;
283  }
284  return Reg;
285 }
286 
287 unsigned FastISel::lookUpRegForValue(const Value *V) {
288  // Look up the value to see if we already have a register for it. We
289  // cache values defined by Instructions across blocks, and other values
290  // only locally. This is because Instructions already have the SSA
291  // def-dominates-use requirement enforced.
293  if (I != FuncInfo.ValueMap.end())
294  return I->second;
295  return LocalValueMap[V];
296 }
297 
298 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
299  if (!isa<Instruction>(I)) {
300  LocalValueMap[I] = Reg;
301  return;
302  }
303 
304  unsigned &AssignedReg = FuncInfo.ValueMap[I];
305  if (AssignedReg == 0)
306  // Use the new register.
307  AssignedReg = Reg;
308  else if (Reg != AssignedReg) {
309  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
310  for (unsigned i = 0; i < NumRegs; i++)
311  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
312 
313  AssignedReg = Reg;
314  }
315 }
316 
317 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
318  unsigned IdxN = getRegForValue(Idx);
319  if (IdxN == 0)
320  // Unhandled operand. Halt "fast" selection and bail.
321  return std::pair<unsigned, bool>(0, false);
322 
323  bool IdxNIsKill = hasTrivialKill(Idx);
324 
325  // If the index is smaller or larger than intptr_t, truncate or extend it.
326  MVT PtrVT = TLI.getPointerTy(DL);
327  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
328  if (IdxVT.bitsLT(PtrVT)) {
329  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
330  IdxNIsKill);
331  IdxNIsKill = true;
332  } else if (IdxVT.bitsGT(PtrVT)) {
333  IdxN =
334  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
335  IdxNIsKill = true;
336  }
337  return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
338 }
339 
341  if (getLastLocalValue()) {
343  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
344  ++FuncInfo.InsertPt;
345  } else
347 
348  // Now skip past any EH_LABELs, which must remain at the beginning.
349  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
350  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
351  ++FuncInfo.InsertPt;
352 }
353 
356  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
357  "Invalid iterator!");
358  while (I != E) {
359  MachineInstr *Dead = &*I;
360  ++I;
361  Dead->eraseFromParent();
362  ++NumFastIselDead;
363  }
365 }
366 
369  DebugLoc OldDL = DbgLoc;
371  DbgLoc = DebugLoc();
372  SavePoint SP = {OldInsertPt, OldDL};
373  return SP;
374 }
375 
377  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
378  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
379 
380  // Restore the previous insert position.
381  FuncInfo.InsertPt = OldInsertPt.InsertPt;
382  DbgLoc = OldInsertPt.DL;
383 }
384 
385 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
386  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
387  if (VT == MVT::Other || !VT.isSimple())
388  // Unhandled type. Halt "fast" selection and bail.
389  return false;
390 
391  // We only handle legal types. For example, on x86-32 the instruction
392  // selector contains all of the 64-bit instructions from x86-64,
393  // under the assumption that i64 won't be used if the target doesn't
394  // support it.
395  if (!TLI.isTypeLegal(VT)) {
396  // MVT::i1 is special. Allow AND, OR, or XOR because they
397  // don't require additional zeroing, which makes them easy.
398  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
399  ISDOpcode == ISD::XOR))
400  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
401  else
402  return false;
403  }
404 
405  // Check if the first operand is a constant, and handle it as "ri". At -O0,
406  // we don't have anything that canonicalizes operand order.
407  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
408  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
409  unsigned Op1 = getRegForValue(I->getOperand(1));
410  if (!Op1)
411  return false;
412  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
413 
414  unsigned ResultReg =
415  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
416  CI->getZExtValue(), VT.getSimpleVT());
417  if (!ResultReg)
418  return false;
419 
420  // We successfully emitted code for the given LLVM Instruction.
421  updateValueMap(I, ResultReg);
422  return true;
423  }
424 
425  unsigned Op0 = getRegForValue(I->getOperand(0));
426  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
427  return false;
428  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
429 
430  // Check if the second operand is a constant and handle it appropriately.
431  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
432  uint64_t Imm = CI->getSExtValue();
433 
434  // Transform "sdiv exact X, 8" -> "sra X, 3".
435  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
436  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
437  Imm = Log2_64(Imm);
438  ISDOpcode = ISD::SRA;
439  }
440 
441  // Transform "urem x, pow2" -> "and x, pow2-1".
442  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
443  isPowerOf2_64(Imm)) {
444  --Imm;
445  ISDOpcode = ISD::AND;
446  }
447 
448  unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
449  Op0IsKill, Imm, VT.getSimpleVT());
450  if (!ResultReg)
451  return false;
452 
453  // We successfully emitted code for the given LLVM Instruction.
454  updateValueMap(I, ResultReg);
455  return true;
456  }
457 
458  unsigned Op1 = getRegForValue(I->getOperand(1));
459  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
460  return false;
461  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
462 
463  // Now we have both operands in registers. Emit the instruction.
464  unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
465  ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
466  if (!ResultReg)
467  // Target-specific code wasn't able to find a machine opcode for
468  // the given ISD opcode and type. Halt "fast" selection and bail.
469  return false;
470 
471  // We successfully emitted code for the given LLVM Instruction.
472  updateValueMap(I, ResultReg);
473  return true;
474 }
475 
477  unsigned N = getRegForValue(I->getOperand(0));
478  if (!N) // Unhandled operand. Halt "fast" selection and bail.
479  return false;
480  bool NIsKill = hasTrivialKill(I->getOperand(0));
481 
482  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
483  // into a single N = N + TotalOffset.
484  uint64_t TotalOffs = 0;
485  // FIXME: What's a good SWAG number for MaxOffs?
486  uint64_t MaxOffs = 2048;
487  MVT VT = TLI.getPointerTy(DL);
488  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
489  GTI != E; ++GTI) {
490  const Value *Idx = GTI.getOperand();
491  if (StructType *StTy = GTI.getStructTypeOrNull()) {
492  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
493  if (Field) {
494  // N = N + Offset
495  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
496  if (TotalOffs >= MaxOffs) {
497  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
498  if (!N) // Unhandled operand. Halt "fast" selection and bail.
499  return false;
500  NIsKill = true;
501  TotalOffs = 0;
502  }
503  }
504  } else {
505  Type *Ty = GTI.getIndexedType();
506 
507  // If this is a constant subscript, handle it quickly.
508  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
509  if (CI->isZero())
510  continue;
511  // N = N + Offset
512  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
513  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
514  if (TotalOffs >= MaxOffs) {
515  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
516  if (!N) // Unhandled operand. Halt "fast" selection and bail.
517  return false;
518  NIsKill = true;
519  TotalOffs = 0;
520  }
521  continue;
522  }
523  if (TotalOffs) {
524  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
525  if (!N) // Unhandled operand. Halt "fast" selection and bail.
526  return false;
527  NIsKill = true;
528  TotalOffs = 0;
529  }
530 
531  // N = N + Idx * ElementSize;
532  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
533  std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
534  unsigned IdxN = Pair.first;
535  bool IdxNIsKill = Pair.second;
536  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
537  return false;
538 
539  if (ElementSize != 1) {
540  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
541  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
542  return false;
543  IdxNIsKill = true;
544  }
545  N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
546  if (!N) // Unhandled operand. Halt "fast" selection and bail.
547  return false;
548  }
549  }
550  if (TotalOffs) {
551  N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
552  if (!N) // Unhandled operand. Halt "fast" selection and bail.
553  return false;
554  }
555 
556  // We successfully emitted code for the given LLVM Instruction.
557  updateValueMap(I, N);
558  return true;
559 }
560 
561 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
562  const CallInst *CI, unsigned StartIdx) {
563  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
564  Value *Val = CI->getArgOperand(i);
565  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
566  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
568  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
569  } else if (isa<ConstantPointerNull>(Val)) {
572  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
573  // Values coming from a stack location also require a special encoding,
574  // but that is added later on by the target specific frame index
575  // elimination implementation.
576  auto SI = FuncInfo.StaticAllocaMap.find(AI);
577  if (SI != FuncInfo.StaticAllocaMap.end())
578  Ops.push_back(MachineOperand::CreateFI(SI->second));
579  else
580  return false;
581  } else {
582  unsigned Reg = getRegForValue(Val);
583  if (!Reg)
584  return false;
585  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
586  }
587  }
588  return true;
589 }
590 
592  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
593  // [live variables...])
595  "Stackmap cannot return a value.");
596 
597  // The stackmap intrinsic only records the live variables (the arguments
598  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
599  // intrinsic, this won't be lowered to a function call. This means we don't
600  // have to worry about calling conventions and target-specific lowering code.
601  // Instead we perform the call lowering right here.
602  //
603  // CALLSEQ_START(0...)
604  // STACKMAP(id, nbytes, ...)
605  // CALLSEQ_END(0, 0)
606  //
608 
609  // Add the <id> and <numBytes> constants.
610  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
611  "Expected a constant integer.");
612  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
613  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
614 
615  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
616  "Expected a constant integer.");
617  const auto *NumBytes =
618  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
619  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
620 
621  // Push live variables for the stack map (skipping the first two arguments
622  // <id> and <numBytes>).
623  if (!addStackMapLiveVars(Ops, I, 2))
624  return false;
625 
626  // We are not adding any register mask info here, because the stackmap doesn't
627  // clobber anything.
628 
629  // Add scratch registers as implicit def and early clobber.
631  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
632  for (unsigned i = 0; ScratchRegs[i]; ++i)
633  Ops.push_back(MachineOperand::CreateReg(
634  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
635  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
636 
637  // Issue CALLSEQ_START
638  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
639  auto Builder =
640  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
641  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
642  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
643  Builder.addImm(0);
644 
645  // Issue STACKMAP.
647  TII.get(TargetOpcode::STACKMAP));
648  for (auto const &MO : Ops)
649  MIB.addOperand(MO);
650 
651  // Issue CALLSEQ_END
652  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
653  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
654  .addImm(0)
655  .addImm(0);
656 
657  // Inform the Frame Information that we have a stackmap in this function.
659 
660  return true;
661 }
662 
663 /// \brief Lower an argument list according to the target calling convention.
664 ///
665 /// This is a helper for lowering intrinsics that follow a target calling
666 /// convention or require stack pointer adjustment. Only a subset of the
667 /// intrinsic's operands need to participate in the calling convention.
668 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
669  unsigned NumArgs, const Value *Callee,
670  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
671  ArgListTy Args;
672  Args.reserve(NumArgs);
673 
674  // Populate the argument list.
675  // Attributes for args start at offset 1, after the return attribute.
676  ImmutableCallSite CS(CI);
677  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
678  ArgI != ArgE; ++ArgI) {
679  Value *V = CI->getOperand(ArgI);
680 
681  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
682 
683  ArgListEntry Entry;
684  Entry.Val = V;
685  Entry.Ty = V->getType();
686  Entry.setAttributes(&CS, AttrI);
687  Args.push_back(Entry);
688  }
689 
690  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
691  : CI->getType();
692  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
693 
694  return lowerCallTo(CLI);
695 }
696 
698  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
699  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
700  SmallString<32> MangledName;
701  Mangler::getNameWithPrefix(MangledName, Target, DL);
702  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
703  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
704 }
705 
707  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
708  // i32 <numBytes>,
709  // i8* <target>,
710  // i32 <numArgs>,
711  // [Args...],
712  // [live variables...])
714  bool IsAnyRegCC = CC == CallingConv::AnyReg;
715  bool HasDef = !I->getType()->isVoidTy();
717 
718  // Get the real number of arguments participating in the call <numArgs>
719  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
720  "Expected a constant integer.");
721  const auto *NumArgsVal =
722  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
723  unsigned NumArgs = NumArgsVal->getZExtValue();
724 
725  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
726  // This includes all meta-operands up to but not including CC.
727  unsigned NumMetaOpers = PatchPointOpers::CCPos;
728  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
729  "Not enough arguments provided to the patchpoint intrinsic");
730 
731  // For AnyRegCC the arguments are lowered later on manually.
732  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
733  CallLoweringInfo CLI;
734  CLI.setIsPatchPoint();
735  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
736  return false;
737 
738  assert(CLI.Call && "No call instruction specified.");
739 
741 
742  // Add an explicit result reg if we use the anyreg calling convention.
743  if (IsAnyRegCC && HasDef) {
744  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
746  CLI.NumResultRegs = 1;
747  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
748  }
749 
750  // Add the <id> and <numBytes> constants.
751  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
752  "Expected a constant integer.");
753  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
754  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
755 
756  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
757  "Expected a constant integer.");
758  const auto *NumBytes =
759  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
760  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
761 
762  // Add the call target.
763  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
764  uint64_t CalleeConstAddr =
765  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
766  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
767  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
768  if (C->getOpcode() == Instruction::IntToPtr) {
769  uint64_t CalleeConstAddr =
770  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
771  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
772  } else
773  llvm_unreachable("Unsupported ConstantExpr.");
774  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
776  } else if (isa<ConstantPointerNull>(Callee))
778  else
779  llvm_unreachable("Unsupported callee address.");
780 
781  // Adjust <numArgs> to account for any arguments that have been passed on
782  // the stack instead.
783  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
784  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
785 
786  // Add the calling convention
787  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
788 
789  // Add the arguments we omitted previously. The register allocator should
790  // place these in any free register.
791  if (IsAnyRegCC) {
792  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
793  unsigned Reg = getRegForValue(I->getArgOperand(i));
794  if (!Reg)
795  return false;
796  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
797  }
798  }
799 
800  // Push the arguments from the call instruction.
801  for (auto Reg : CLI.OutRegs)
802  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
803 
804  // Push live variables for the stack map.
805  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
806  return false;
807 
808  // Push the register mask info.
811 
812  // Add scratch registers as implicit def and early clobber.
813  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
814  for (unsigned i = 0; ScratchRegs[i]; ++i)
816  ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
817  /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
818 
819  // Add implicit defs (return values).
820  for (auto Reg : CLI.InRegs)
821  Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
822  /*IsImpl=*/true));
823 
824  // Insert the patchpoint instruction before the call generated by the target.
826  TII.get(TargetOpcode::PATCHPOINT));
827 
828  for (auto &MO : Ops)
829  MIB.addOperand(MO);
830 
831  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
832 
833  // Delete the original call instruction.
834  CLI.Call->eraseFromParent();
835 
836  // Inform the Frame Information that we have a patchpoint in this function.
838 
839  if (CLI.NumResultRegs)
841  return true;
842 }
843 
844 /// Returns an AttributeSet representing the attributes applied to the return
845 /// value of the given call.
848  if (CLI.RetSExt)
849  Attrs.push_back(Attribute::SExt);
850  if (CLI.RetZExt)
851  Attrs.push_back(Attribute::ZExt);
852  if (CLI.IsInReg)
853  Attrs.push_back(Attribute::InReg);
854 
855  return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
856  Attrs);
857 }
858 
859 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
860  unsigned NumArgs) {
861  MCContext &Ctx = MF->getContext();
862  SmallString<32> MangledName;
863  Mangler::getNameWithPrefix(MangledName, SymName, DL);
864  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
865  return lowerCallTo(CI, Sym, NumArgs);
866 }
867 
869  unsigned NumArgs) {
870  ImmutableCallSite CS(CI);
871 
872  FunctionType *FTy = CS.getFunctionType();
873  Type *RetTy = CS.getType();
874 
875  ArgListTy Args;
876  Args.reserve(NumArgs);
877 
878  // Populate the argument list.
879  // Attributes for args start at offset 1, after the return attribute.
880  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
881  Value *V = CI->getOperand(ArgI);
882 
883  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
884 
885  ArgListEntry Entry;
886  Entry.Val = V;
887  Entry.Ty = V->getType();
888  Entry.setAttributes(&CS, ArgI + 1);
889  Args.push_back(Entry);
890  }
891 
892  CallLoweringInfo CLI;
893  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
894 
895  return lowerCallTo(CLI);
896 }
897 
899  // Handle the incoming return values from the call.
900  CLI.clearIns();
901  SmallVector<EVT, 4> RetTys;
902  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
903 
905  GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
906 
907  bool CanLowerReturn = TLI.CanLowerReturn(
908  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
909 
910  // FIXME: sret demotion isn't supported yet - bail out.
911  if (!CanLowerReturn)
912  return false;
913 
914  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
915  EVT VT = RetTys[I];
916  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
917  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
918  for (unsigned i = 0; i != NumRegs; ++i) {
919  ISD::InputArg MyFlags;
920  MyFlags.VT = RegisterVT;
921  MyFlags.ArgVT = VT;
922  MyFlags.Used = CLI.IsReturnValueUsed;
923  if (CLI.RetSExt)
924  MyFlags.Flags.setSExt();
925  if (CLI.RetZExt)
926  MyFlags.Flags.setZExt();
927  if (CLI.IsInReg)
928  MyFlags.Flags.setInReg();
929  CLI.Ins.push_back(MyFlags);
930  }
931  }
932 
933  // Handle all of the outgoing arguments.
934  CLI.clearOuts();
935  for (auto &Arg : CLI.getArgs()) {
936  Type *FinalType = Arg.Ty;
937  if (Arg.IsByVal)
938  FinalType = cast<PointerType>(Arg.Ty)->getElementType();
939  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
940  FinalType, CLI.CallConv, CLI.IsVarArg);
941 
943  if (Arg.IsZExt)
944  Flags.setZExt();
945  if (Arg.IsSExt)
946  Flags.setSExt();
947  if (Arg.IsInReg)
948  Flags.setInReg();
949  if (Arg.IsSRet)
950  Flags.setSRet();
951  if (Arg.IsSwiftSelf)
952  Flags.setSwiftSelf();
953  if (Arg.IsSwiftError)
954  Flags.setSwiftError();
955  if (Arg.IsByVal)
956  Flags.setByVal();
957  if (Arg.IsInAlloca) {
958  Flags.setInAlloca();
959  // Set the byval flag for CCAssignFn callbacks that don't know about
960  // inalloca. This way we can know how many bytes we should've allocated
961  // and how many bytes a callee cleanup function will pop. If we port
962  // inalloca to more targets, we'll have to add custom inalloca handling in
963  // the various CC lowering callbacks.
964  Flags.setByVal();
965  }
966  if (Arg.IsByVal || Arg.IsInAlloca) {
967  PointerType *Ty = cast<PointerType>(Arg.Ty);
968  Type *ElementTy = Ty->getElementType();
969  unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
970  // For ByVal, alignment should come from FE. BE will guess if this info is
971  // not there, but there are cases it cannot get right.
972  unsigned FrameAlign = Arg.Alignment;
973  if (!FrameAlign)
974  FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
975  Flags.setByValSize(FrameSize);
976  Flags.setByValAlign(FrameAlign);
977  }
978  if (Arg.IsNest)
979  Flags.setNest();
980  if (NeedsRegBlock)
981  Flags.setInConsecutiveRegs();
982  unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
983  Flags.setOrigAlign(OriginalAlignment);
984 
985  CLI.OutVals.push_back(Arg.Val);
986  CLI.OutFlags.push_back(Flags);
987  }
988 
989  if (!fastLowerCall(CLI))
990  return false;
991 
992  // Set all unused physreg defs as dead.
993  assert(CLI.Call && "No call instruction specified.");
995 
996  if (CLI.NumResultRegs && CLI.CS)
998 
999  return true;
1000 }
1001 
1003  ImmutableCallSite CS(CI);
1004 
1005  FunctionType *FuncTy = CS.getFunctionType();
1006  Type *RetTy = CS.getType();
1007 
1008  ArgListTy Args;
1009  ArgListEntry Entry;
1010  Args.reserve(CS.arg_size());
1011 
1012  for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1013  i != e; ++i) {
1014  Value *V = *i;
1015 
1016  // Skip empty types
1017  if (V->getType()->isEmptyTy())
1018  continue;
1019 
1020  Entry.Val = V;
1021  Entry.Ty = V->getType();
1022 
1023  // Skip the first return-type Attribute to get to params.
1024  Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1025  Args.push_back(Entry);
1026  }
1027 
1028  // Check if target-independent constraints permit a tail call here.
1029  // Target-dependent constraints are checked within fastLowerCall.
1030  bool IsTailCall = CI->isTailCall();
1031  if (IsTailCall && !isInTailCallPosition(CS, TM))
1032  IsTailCall = false;
1033 
1034  CallLoweringInfo CLI;
1035  CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1036  .setTailCall(IsTailCall);
1037 
1038  return lowerCallTo(CLI);
1039 }
1040 
1042  const CallInst *Call = cast<CallInst>(I);
1043 
1044  // Handle simple inline asms.
1045  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1046  // If the inline asm has side effects, then make sure that no local value
1047  // lives across by flushing the local value map.
1048  if (IA->hasSideEffects())
1049  flushLocalValueMap();
1050 
1051  // Don't attempt to handle constraints.
1052  if (!IA->getConstraintString().empty())
1053  return false;
1054 
1055  unsigned ExtraInfo = 0;
1056  if (IA->hasSideEffects())
1057  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1058  if (IA->isAlignStack())
1059  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1060 
1063  .addExternalSymbol(IA->getAsmString().c_str())
1064  .addImm(ExtraInfo);
1065  return true;
1066  }
1067 
1068  MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1069  computeUsesVAFloatArgument(*Call, MMI);
1070 
1071  // Handle intrinsic function calls.
1072  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1073  return selectIntrinsicCall(II);
1074 
1075  // Usually, it does not make sense to initialize a value,
1076  // make an unrelated function call and use the value, because
1077  // it tends to be spilled on the stack. So, we move the pointer
1078  // to the last local value to the beginning of the block, so that
1079  // all the values which have already been materialized,
1080  // appear after the call. It also makes sense to skip intrinsics
1081  // since they tend to be inlined.
1082  flushLocalValueMap();
1083 
1084  return lowerCall(Call);
1085 }
1086 
1088  switch (II->getIntrinsicID()) {
1089  default:
1090  break;
1091  // At -O0 we don't care about the lifetime intrinsics.
1092  case Intrinsic::lifetime_start:
1093  case Intrinsic::lifetime_end:
1094  // The donothing intrinsic does, well, nothing.
1095  case Intrinsic::donothing:
1096  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1097  case Intrinsic::assume:
1098  return true;
1099  case Intrinsic::dbg_declare: {
1100  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1101  assert(DI->getVariable() && "Missing variable");
1102  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1103  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1104  return true;
1105  }
1106 
1107  const Value *Address = DI->getAddress();
1108  if (!Address || isa<UndefValue>(Address)) {
1109  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1110  return true;
1111  }
1112 
1113  unsigned Offset = 0;
1115  if (const auto *Arg = dyn_cast<Argument>(Address))
1116  // Some arguments' frame index is recorded during argument lowering.
1117  Offset = FuncInfo.getArgumentFrameIndex(Arg);
1118  if (Offset)
1119  Op = MachineOperand::CreateFI(Offset);
1120  if (!Op)
1121  if (unsigned Reg = lookUpRegForValue(Address))
1122  Op = MachineOperand::CreateReg(Reg, false);
1123 
1124  // If we have a VLA that has a "use" in a metadata node that's then used
1125  // here but it has no other uses, then we have a problem. E.g.,
1126  //
1127  // int foo (const int *x) {
1128  // char a[*x];
1129  // return 0;
1130  // }
1131  //
1132  // If we assign 'a' a vreg and fast isel later on has to use the selection
1133  // DAG isel, it will want to copy the value to the vreg. However, there are
1134  // no uses, which goes counter to what selection DAG isel expects.
1135  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1136  (!isa<AllocaInst>(Address) ||
1137  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1139  false);
1140 
1141  if (Op) {
1143  "Expected inlined-at fields to agree");
1144  if (Op->isReg()) {
1145  Op->setIsDebug(true);
1147  TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1148  DI->getVariable(), DI->getExpression());
1149  } else
1151  TII.get(TargetOpcode::DBG_VALUE))
1152  .addOperand(*Op)
1153  .addImm(0)
1154  .addMetadata(DI->getVariable())
1155  .addMetadata(DI->getExpression());
1156  } else {
1157  // We can't yet handle anything else here because it would require
1158  // generating code, thus altering codegen because of debug info.
1159  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1160  }
1161  return true;
1162  }
1163  case Intrinsic::dbg_value: {
1164  // This form of DBG_VALUE is target-independent.
1165  const DbgValueInst *DI = cast<DbgValueInst>(II);
1166  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1167  const Value *V = DI->getValue();
1169  "Expected inlined-at fields to agree");
1170  if (!V) {
1171  // Currently the optimizer can produce this; insert an undef to
1172  // help debugging. Probably the optimizer should not do this.
1174  .addReg(0U)
1175  .addImm(DI->getOffset())
1176  .addMetadata(DI->getVariable())
1177  .addMetadata(DI->getExpression());
1178  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1179  if (CI->getBitWidth() > 64)
1181  .addCImm(CI)
1182  .addImm(DI->getOffset())
1183  .addMetadata(DI->getVariable())
1184  .addMetadata(DI->getExpression());
1185  else
1187  .addImm(CI->getZExtValue())
1188  .addImm(DI->getOffset())
1189  .addMetadata(DI->getVariable())
1190  .addMetadata(DI->getExpression());
1191  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1193  .addFPImm(CF)
1194  .addImm(DI->getOffset())
1195  .addMetadata(DI->getVariable())
1196  .addMetadata(DI->getExpression());
1197  } else if (unsigned Reg = lookUpRegForValue(V)) {
1198  // FIXME: This does not handle register-indirect values at offset 0.
1199  bool IsIndirect = DI->getOffset() != 0;
1200  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1201  DI->getOffset(), DI->getVariable(), DI->getExpression());
1202  } else {
1203  // We can't yet handle anything else here because it would require
1204  // generating code, thus altering codegen because of debug info.
1205  DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1206  }
1207  return true;
1208  }
1209  case Intrinsic::objectsize: {
1210  ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1211  unsigned long long Res = CI->isZero() ? -1ULL : 0;
1212  Constant *ResCI = ConstantInt::get(II->getType(), Res);
1213  unsigned ResultReg = getRegForValue(ResCI);
1214  if (!ResultReg)
1215  return false;
1216  updateValueMap(II, ResultReg);
1217  return true;
1218  }
1219  case Intrinsic::invariant_group_barrier:
1220  case Intrinsic::expect: {
1221  unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1222  if (!ResultReg)
1223  return false;
1224  updateValueMap(II, ResultReg);
1225  return true;
1226  }
1227  case Intrinsic::experimental_stackmap:
1228  return selectStackmap(II);
1229  case Intrinsic::experimental_patchpoint_void:
1230  case Intrinsic::experimental_patchpoint_i64:
1231  return selectPatchpoint(II);
1232  }
1233 
1234  return fastLowerIntrinsicCall(II);
1235 }
1236 
1237 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1238  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1239  EVT DstVT = TLI.getValueType(DL, I->getType());
1240 
1241  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1242  !DstVT.isSimple())
1243  // Unhandled type. Halt "fast" selection and bail.
1244  return false;
1245 
1246  // Check if the destination type is legal.
1247  if (!TLI.isTypeLegal(DstVT))
1248  return false;
1249 
1250  // Check if the source operand is legal.
1251  if (!TLI.isTypeLegal(SrcVT))
1252  return false;
1253 
1254  unsigned InputReg = getRegForValue(I->getOperand(0));
1255  if (!InputReg)
1256  // Unhandled operand. Halt "fast" selection and bail.
1257  return false;
1258 
1259  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1260 
1261  unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1262  Opcode, InputReg, InputRegIsKill);
1263  if (!ResultReg)
1264  return false;
1265 
1266  updateValueMap(I, ResultReg);
1267  return true;
1268 }
1269 
1271  // If the bitcast doesn't change the type, just use the operand value.
1272  if (I->getType() == I->getOperand(0)->getType()) {
1273  unsigned Reg = getRegForValue(I->getOperand(0));
1274  if (!Reg)
1275  return false;
1276  updateValueMap(I, Reg);
1277  return true;
1278  }
1279 
1280  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1281  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1282  EVT DstEVT = TLI.getValueType(DL, I->getType());
1283  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1284  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1285  // Unhandled type. Halt "fast" selection and bail.
1286  return false;
1287 
1288  MVT SrcVT = SrcEVT.getSimpleVT();
1289  MVT DstVT = DstEVT.getSimpleVT();
1290  unsigned Op0 = getRegForValue(I->getOperand(0));
1291  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1292  return false;
1293  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1294 
1295  // First, try to perform the bitcast by inserting a reg-reg copy.
1296  unsigned ResultReg = 0;
1297  if (SrcVT == DstVT) {
1298  const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1299  const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1300  // Don't attempt a cross-class copy. It will likely fail.
1301  if (SrcClass == DstClass) {
1302  ResultReg = createResultReg(DstClass);
1304  TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1305  }
1306  }
1307 
1308  // If the reg-reg copy failed, select a BITCAST opcode.
1309  if (!ResultReg)
1310  ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1311 
1312  if (!ResultReg)
1313  return false;
1314 
1315  updateValueMap(I, ResultReg);
1316  return true;
1317 }
1318 
1319 // Remove local value instructions starting from the instruction after
1320 // SavedLastLocalValue to the current function insert point.
1321 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1322 {
1323  MachineInstr *CurLastLocalValue = getLastLocalValue();
1324  if (CurLastLocalValue != SavedLastLocalValue) {
1325  // Find the first local value instruction to be deleted.
1326  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1327  // Otherwise it's the first instruction in the block.
1328  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1329  if (SavedLastLocalValue)
1330  ++FirstDeadInst;
1331  else
1332  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1333  setLastLocalValue(SavedLastLocalValue);
1334  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1335  }
1336 }
1337 
1339  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1340  // Just before the terminator instruction, insert instructions to
1341  // feed PHI nodes in successor blocks.
1342  if (isa<TerminatorInst>(I)) {
1343  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1344  // PHI node handling may have generated local value instructions,
1345  // even though it failed to handle all PHI nodes.
1346  // We remove these instructions because SelectionDAGISel will generate
1347  // them again.
1348  removeDeadLocalValueCode(SavedLastLocalValue);
1349  return false;
1350  }
1351  }
1352 
1353  // FastISel does not handle any operand bundles except OB_funclet.
1355  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1356  if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1357  return false;
1358 
1359  DbgLoc = I->getDebugLoc();
1360 
1361  SavedInsertPt = FuncInfo.InsertPt;
1362 
1363  if (const auto *Call = dyn_cast<CallInst>(I)) {
1364  const Function *F = Call->getCalledFunction();
1366 
1367  // As a special case, don't handle calls to builtin library functions that
1368  // may be translated directly to target instructions.
1369  if (F && !F->hasLocalLinkage() && F->hasName() &&
1370  LibInfo->getLibFunc(F->getName(), Func) &&
1372  return false;
1373 
1374  // Don't handle Intrinsic::trap if a trap function is specified.
1375  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1376  Call->hasFnAttr("trap-func-name"))
1377  return false;
1378  }
1379 
1380  // First, try doing target-independent selection.
1382  if (selectOperator(I, I->getOpcode())) {
1383  ++NumFastIselSuccessIndependent;
1384  DbgLoc = DebugLoc();
1385  return true;
1386  }
1387  // Remove dead code.
1389  if (SavedInsertPt != FuncInfo.InsertPt)
1390  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1391  SavedInsertPt = FuncInfo.InsertPt;
1392  }
1393  // Next, try calling the target to attempt to handle the instruction.
1394  if (fastSelectInstruction(I)) {
1395  ++NumFastIselSuccessTarget;
1396  DbgLoc = DebugLoc();
1397  return true;
1398  }
1399  // Remove dead code.
1401  if (SavedInsertPt != FuncInfo.InsertPt)
1402  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1403 
1404  DbgLoc = DebugLoc();
1405  // Undo phi node updates, because they will be added again by SelectionDAG.
1406  if (isa<TerminatorInst>(I)) {
1407  // PHI node handling may have generated local value instructions.
1408  // We remove them because SelectionDAGISel will generate them again.
1409  removeDeadLocalValueCode(SavedLastLocalValue);
1411  }
1412  return false;
1413 }
1414 
1415 /// Emit an unconditional branch to the given block, unless it is the immediate
1416 /// (fall-through) successor, and update the CFG.
1418  const DebugLoc &DbgLoc) {
1419  if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1420  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1421  // For more accurate line information if this is the only instruction
1422  // in the block then emit it, otherwise we have the unconditional
1423  // fall-through case, which needs no instructions.
1424  } else {
1425  // The unconditional branch case.
1426  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1427  SmallVector<MachineOperand, 0>(), DbgLoc);
1428  }
1429  if (FuncInfo.BPI) {
1431  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1433  } else
1435 }
1436 
1438  MachineBasicBlock *TrueMBB,
1439  MachineBasicBlock *FalseMBB) {
1440  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1441  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1442  // successor/predecessor lists.
1443  if (TrueMBB != FalseMBB) {
1444  if (FuncInfo.BPI) {
1445  auto BranchProbability =
1446  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1448  } else
1450  }
1451 
1452  fastEmitBranch(FalseMBB, DbgLoc);
1453 }
1454 
1455 /// Emit an FNeg operation.
1457  unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1458  if (!OpReg)
1459  return false;
1460  bool OpRegIsKill = hasTrivialKill(I);
1461 
1462  // If the target has ISD::FNEG, use it.
1463  EVT VT = TLI.getValueType(DL, I->getType());
1464  unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1465  OpReg, OpRegIsKill);
1466  if (ResultReg) {
1467  updateValueMap(I, ResultReg);
1468  return true;
1469  }
1470 
1471  // Bitcast the value to integer, twiddle the sign bit with xor,
1472  // and then bitcast it back to floating-point.
1473  if (VT.getSizeInBits() > 64)
1474  return false;
1475  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1476  if (!TLI.isTypeLegal(IntVT))
1477  return false;
1478 
1479  unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1480  ISD::BITCAST, OpReg, OpRegIsKill);
1481  if (!IntReg)
1482  return false;
1483 
1484  unsigned IntResultReg = fastEmit_ri_(
1485  IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1486  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1487  if (!IntResultReg)
1488  return false;
1489 
1490  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1491  IntResultReg, /*IsKill=*/true);
1492  if (!ResultReg)
1493  return false;
1494 
1495  updateValueMap(I, ResultReg);
1496  return true;
1497 }
1498 
1500  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1501  if (!EVI)
1502  return false;
1503 
1504  // Make sure we only try to handle extracts with a legal result. But also
1505  // allow i1 because it's easy.
1506  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1507  if (!RealVT.isSimple())
1508  return false;
1509  MVT VT = RealVT.getSimpleVT();
1510  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1511  return false;
1512 
1513  const Value *Op0 = EVI->getOperand(0);
1514  Type *AggTy = Op0->getType();
1515 
1516  // Get the base result register.
1517  unsigned ResultReg;
1519  if (I != FuncInfo.ValueMap.end())
1520  ResultReg = I->second;
1521  else if (isa<Instruction>(Op0))
1522  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1523  else
1524  return false; // fast-isel can't handle aggregate constants at the moment
1525 
1526  // Get the actual result register, which is an offset from the base register.
1527  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1528 
1529  SmallVector<EVT, 4> AggValueVTs;
1530  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1531 
1532  for (unsigned i = 0; i < VTIndex; i++)
1533  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1534 
1535  updateValueMap(EVI, ResultReg);
1536  return true;
1537 }
1538 
1539 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1540  switch (Opcode) {
1541  case Instruction::Add:
1542  return selectBinaryOp(I, ISD::ADD);
1543  case Instruction::FAdd:
1544  return selectBinaryOp(I, ISD::FADD);
1545  case Instruction::Sub:
1546  return selectBinaryOp(I, ISD::SUB);
1547  case Instruction::FSub:
1548  // FNeg is currently represented in LLVM IR as a special case of FSub.
1549  if (BinaryOperator::isFNeg(I))
1550  return selectFNeg(I);
1551  return selectBinaryOp(I, ISD::FSUB);
1552  case Instruction::Mul:
1553  return selectBinaryOp(I, ISD::MUL);
1554  case Instruction::FMul:
1555  return selectBinaryOp(I, ISD::FMUL);
1556  case Instruction::SDiv:
1557  return selectBinaryOp(I, ISD::SDIV);
1558  case Instruction::UDiv:
1559  return selectBinaryOp(I, ISD::UDIV);
1560  case Instruction::FDiv:
1561  return selectBinaryOp(I, ISD::FDIV);
1562  case Instruction::SRem:
1563  return selectBinaryOp(I, ISD::SREM);
1564  case Instruction::URem:
1565  return selectBinaryOp(I, ISD::UREM);
1566  case Instruction::FRem:
1567  return selectBinaryOp(I, ISD::FREM);
1568  case Instruction::Shl:
1569  return selectBinaryOp(I, ISD::SHL);
1570  case Instruction::LShr:
1571  return selectBinaryOp(I, ISD::SRL);
1572  case Instruction::AShr:
1573  return selectBinaryOp(I, ISD::SRA);
1574  case Instruction::And:
1575  return selectBinaryOp(I, ISD::AND);
1576  case Instruction::Or:
1577  return selectBinaryOp(I, ISD::OR);
1578  case Instruction::Xor:
1579  return selectBinaryOp(I, ISD::XOR);
1580 
1581  case Instruction::GetElementPtr:
1582  return selectGetElementPtr(I);
1583 
1584  case Instruction::Br: {
1585  const BranchInst *BI = cast<BranchInst>(I);
1586 
1587  if (BI->isUnconditional()) {
1588  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1589  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1590  fastEmitBranch(MSucc, BI->getDebugLoc());
1591  return true;
1592  }
1593 
1594  // Conditional branches are not handed yet.
1595  // Halt "fast" selection and bail.
1596  return false;
1597  }
1598 
1599  case Instruction::Unreachable:
1601  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1602  else
1603  return true;
1604 
1605  case Instruction::Alloca:
1606  // FunctionLowering has the static-sized case covered.
1607  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1608  return true;
1609 
1610  // Dynamic-sized alloca is not handled yet.
1611  return false;
1612 
1613  case Instruction::Call:
1614  return selectCall(I);
1615 
1616  case Instruction::BitCast:
1617  return selectBitCast(I);
1618 
1619  case Instruction::FPToSI:
1620  return selectCast(I, ISD::FP_TO_SINT);
1621  case Instruction::ZExt:
1622  return selectCast(I, ISD::ZERO_EXTEND);
1623  case Instruction::SExt:
1624  return selectCast(I, ISD::SIGN_EXTEND);
1625  case Instruction::Trunc:
1626  return selectCast(I, ISD::TRUNCATE);
1627  case Instruction::SIToFP:
1628  return selectCast(I, ISD::SINT_TO_FP);
1629 
1630  case Instruction::IntToPtr: // Deliberate fall-through.
1631  case Instruction::PtrToInt: {
1632  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1633  EVT DstVT = TLI.getValueType(DL, I->getType());
1634  if (DstVT.bitsGT(SrcVT))
1635  return selectCast(I, ISD::ZERO_EXTEND);
1636  if (DstVT.bitsLT(SrcVT))
1637  return selectCast(I, ISD::TRUNCATE);
1638  unsigned Reg = getRegForValue(I->getOperand(0));
1639  if (!Reg)
1640  return false;
1641  updateValueMap(I, Reg);
1642  return true;
1643  }
1644 
1645  case Instruction::ExtractValue:
1646  return selectExtractValue(I);
1647 
1648  case Instruction::PHI:
1649  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1650 
1651  default:
1652  // Unhandled instruction. Halt "fast" selection and bail.
1653  return false;
1654  }
1655 }
1656 
1658  const TargetLibraryInfo *LibInfo,
1660  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1661  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1662  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1663  TII(*MF->getSubtarget().getInstrInfo()),
1664  TLI(*MF->getSubtarget().getTargetLowering()),
1665  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1666  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1667 
1669 
1670 bool FastISel::fastLowerArguments() { return false; }
1671 
1672 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1673 
1675  return false;
1676 }
1677 
1678 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1679 
1680 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1681  bool /*Op0IsKill*/) {
1682  return 0;
1683 }
1684 
1685 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1686  bool /*Op0IsKill*/, unsigned /*Op1*/,
1687  bool /*Op1IsKill*/) {
1688  return 0;
1689 }
1690 
1691 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1692  return 0;
1693 }
1694 
1695 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1696  const ConstantFP * /*FPImm*/) {
1697  return 0;
1698 }
1699 
1700 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1701  bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1702  return 0;
1703 }
1704 
1705 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1706 /// instruction with an immediate operand using fastEmit_ri.
1707 /// If that fails, it materializes the immediate into a register and try
1708 /// fastEmit_rr instead.
1709 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1710  bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1711  // If this is a multiply by a power of two, emit this as a shift left.
1712  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1713  Opcode = ISD::SHL;
1714  Imm = Log2_64(Imm);
1715  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1716  // div x, 8 -> srl x, 3
1717  Opcode = ISD::SRL;
1718  Imm = Log2_64(Imm);
1719  }
1720 
1721  // Horrible hack (to be removed), check to make sure shift amounts are
1722  // in-range.
1723  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1724  Imm >= VT.getSizeInBits())
1725  return 0;
1726 
1727  // First check if immediate type is legal. If not, we can't use the ri form.
1728  unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1729  if (ResultReg)
1730  return ResultReg;
1731  unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1732  bool IsImmKill = true;
1733  if (!MaterialReg) {
1734  // This is a bit ugly/slow, but failing here means falling out of
1735  // fast-isel, which would be very slow.
1736  IntegerType *ITy =
1738  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1739  if (!MaterialReg)
1740  return 0;
1741  // FIXME: If the materialized register here has no uses yet then this
1742  // will be the first use and we should be able to mark it as killed.
1743  // However, the local value area for materialising constant expressions
1744  // grows down, not up, which means that any constant expressions we generate
1745  // later which also use 'Imm' could be after this instruction and therefore
1746  // after this kill.
1747  IsImmKill = false;
1748  }
1749  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1750 }
1751 
1753  return MRI.createVirtualRegister(RC);
1754 }
1755 
1757  unsigned OpNum) {
1759  const TargetRegisterClass *RegClass =
1760  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1761  if (!MRI.constrainRegClass(Op, RegClass)) {
1762  // If it's not legal to COPY between the register classes, something
1763  // has gone very wrong before we got here.
1764  unsigned NewOp = createResultReg(RegClass);
1766  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1767  return NewOp;
1768  }
1769  }
1770  return Op;
1771 }
1772 
1773 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1774  const TargetRegisterClass *RC) {
1775  unsigned ResultReg = createResultReg(RC);
1776  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1777 
1778  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1779  return ResultReg;
1780 }
1781 
1782 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1783  const TargetRegisterClass *RC, unsigned Op0,
1784  bool Op0IsKill) {
1785  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1786 
1787  unsigned ResultReg = createResultReg(RC);
1788  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1789 
1790  if (II.getNumDefs() >= 1)
1791  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1792  .addReg(Op0, getKillRegState(Op0IsKill));
1793  else {
1795  .addReg(Op0, getKillRegState(Op0IsKill));
1797  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1798  }
1799 
1800  return ResultReg;
1801 }
1802 
1803 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1804  const TargetRegisterClass *RC, unsigned Op0,
1805  bool Op0IsKill, unsigned Op1,
1806  bool Op1IsKill) {
1807  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1808 
1809  unsigned ResultReg = createResultReg(RC);
1810  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1811  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1812 
1813  if (II.getNumDefs() >= 1)
1814  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1815  .addReg(Op0, getKillRegState(Op0IsKill))
1816  .addReg(Op1, getKillRegState(Op1IsKill));
1817  else {
1819  .addReg(Op0, getKillRegState(Op0IsKill))
1820  .addReg(Op1, getKillRegState(Op1IsKill));
1822  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1823  }
1824  return ResultReg;
1825 }
1826 
1827 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1828  const TargetRegisterClass *RC, unsigned Op0,
1829  bool Op0IsKill, unsigned Op1,
1830  bool Op1IsKill, unsigned Op2,
1831  bool Op2IsKill) {
1832  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1833 
1834  unsigned ResultReg = createResultReg(RC);
1835  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1836  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1837  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1838 
1839  if (II.getNumDefs() >= 1)
1840  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1841  .addReg(Op0, getKillRegState(Op0IsKill))
1842  .addReg(Op1, getKillRegState(Op1IsKill))
1843  .addReg(Op2, getKillRegState(Op2IsKill));
1844  else {
1846  .addReg(Op0, getKillRegState(Op0IsKill))
1847  .addReg(Op1, getKillRegState(Op1IsKill))
1848  .addReg(Op2, getKillRegState(Op2IsKill));
1850  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1851  }
1852  return ResultReg;
1853 }
1854 
1855 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1856  const TargetRegisterClass *RC, unsigned Op0,
1857  bool Op0IsKill, uint64_t Imm) {
1858  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1859 
1860  unsigned ResultReg = createResultReg(RC);
1861  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1862 
1863  if (II.getNumDefs() >= 1)
1864  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1865  .addReg(Op0, getKillRegState(Op0IsKill))
1866  .addImm(Imm);
1867  else {
1869  .addReg(Op0, getKillRegState(Op0IsKill))
1870  .addImm(Imm);
1872  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1873  }
1874  return ResultReg;
1875 }
1876 
1877 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1878  const TargetRegisterClass *RC, unsigned Op0,
1879  bool Op0IsKill, uint64_t Imm1,
1880  uint64_t Imm2) {
1881  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1882 
1883  unsigned ResultReg = createResultReg(RC);
1884  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1885 
1886  if (II.getNumDefs() >= 1)
1887  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1888  .addReg(Op0, getKillRegState(Op0IsKill))
1889  .addImm(Imm1)
1890  .addImm(Imm2);
1891  else {
1893  .addReg(Op0, getKillRegState(Op0IsKill))
1894  .addImm(Imm1)
1895  .addImm(Imm2);
1897  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1898  }
1899  return ResultReg;
1900 }
1901 
1902 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1903  const TargetRegisterClass *RC,
1904  const ConstantFP *FPImm) {
1905  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1906 
1907  unsigned ResultReg = createResultReg(RC);
1908 
1909  if (II.getNumDefs() >= 1)
1910  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1911  .addFPImm(FPImm);
1912  else {
1914  .addFPImm(FPImm);
1916  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1917  }
1918  return ResultReg;
1919 }
1920 
1921 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1922  const TargetRegisterClass *RC, unsigned Op0,
1923  bool Op0IsKill, unsigned Op1,
1924  bool Op1IsKill, uint64_t Imm) {
1925  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1926 
1927  unsigned ResultReg = createResultReg(RC);
1928  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1929  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1930 
1931  if (II.getNumDefs() >= 1)
1932  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1933  .addReg(Op0, getKillRegState(Op0IsKill))
1934  .addReg(Op1, getKillRegState(Op1IsKill))
1935  .addImm(Imm);
1936  else {
1938  .addReg(Op0, getKillRegState(Op0IsKill))
1939  .addReg(Op1, getKillRegState(Op1IsKill))
1940  .addImm(Imm);
1942  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1943  }
1944  return ResultReg;
1945 }
1946 
1947 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1948  const TargetRegisterClass *RC, uint64_t Imm) {
1949  unsigned ResultReg = createResultReg(RC);
1950  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1951 
1952  if (II.getNumDefs() >= 1)
1953  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1954  .addImm(Imm);
1955  else {
1958  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1959  }
1960  return ResultReg;
1961 }
1962 
1963 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1964  bool Op0IsKill, uint32_t Idx) {
1965  unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1967  "Cannot yet extract from physregs");
1968  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1970  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1971  ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
1972  return ResultReg;
1973 }
1974 
1975 /// Emit MachineInstrs to compute the value of Op with all but the least
1976 /// significant bit set to zero.
1977 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1978  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1979 }
1980 
1981 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1982 /// Emit code to ensure constants are copied into registers when needed.
1983 /// Remember the virtual registers that need to be added to the Machine PHI
1984 /// nodes as input. We cannot just directly add them, because expansion
1985 /// might result in multiple MBB's for one BB. As such, the start of the
1986 /// BB might correspond to a different MBB than the end.
1987 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1988  const TerminatorInst *TI = LLVMBB->getTerminator();
1989 
1992 
1993  // Check successor nodes' PHI nodes that expect a constant to be available
1994  // from this block.
1995  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1996  const BasicBlock *SuccBB = TI->getSuccessor(succ);
1997  if (!isa<PHINode>(SuccBB->begin()))
1998  continue;
1999  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2000 
2001  // If this terminator has multiple identical successors (common for
2002  // switches), only handle each succ once.
2003  if (!SuccsHandled.insert(SuccMBB).second)
2004  continue;
2005 
2006  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2007 
2008  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2009  // nodes and Machine PHI nodes, but the incoming operands have not been
2010  // emitted yet.
2011  for (BasicBlock::const_iterator I = SuccBB->begin();
2012  const auto *PN = dyn_cast<PHINode>(I); ++I) {
2013 
2014  // Ignore dead phi's.
2015  if (PN->use_empty())
2016  continue;
2017 
2018  // Only handle legal types. Two interesting things to note here. First,
2019  // by bailing out early, we may leave behind some dead instructions,
2020  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2021  // own moves. Second, this check is necessary because FastISel doesn't
2022  // use CreateRegs to create registers, so it always creates
2023  // exactly one register for each non-void instruction.
2024  EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2025  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2026  // Handle integer promotions, though, because they're common and easy.
2027  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2029  return false;
2030  }
2031  }
2032 
2033  const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2034 
2035  // Set the DebugLoc for the copy. Prefer the location of the operand
2036  // if there is one; use the location of the PHI otherwise.
2037  DbgLoc = PN->getDebugLoc();
2038  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2039  DbgLoc = Inst->getDebugLoc();
2040 
2041  unsigned Reg = getRegForValue(PHIOp);
2042  if (!Reg) {
2044  return false;
2045  }
2046  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2047  DbgLoc = DebugLoc();
2048  }
2049  }
2050 
2051  return true;
2052 }
2053 
2054 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2055  assert(LI->hasOneUse() &&
2056  "tryToFoldLoad expected a LoadInst with a single use");
2057  // We know that the load has a single use, but don't know what it is. If it
2058  // isn't one of the folded instructions, then we can't succeed here. Handle
2059  // this by scanning the single-use users of the load until we get to FoldInst.
2060  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2061 
2062  const Instruction *TheUser = LI->user_back();
2063  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2064  // Stay in the right block.
2065  TheUser->getParent() == FoldInst->getParent() &&
2066  --MaxUsers) { // Don't scan too far.
2067  // If there are multiple or no uses of this instruction, then bail out.
2068  if (!TheUser->hasOneUse())
2069  return false;
2070 
2071  TheUser = TheUser->user_back();
2072  }
2073 
2074  // If we didn't find the fold instruction, then we failed to collapse the
2075  // sequence.
2076  if (TheUser != FoldInst)
2077  return false;
2078 
2079  // Don't try to fold volatile loads. Target has to deal with alignment
2080  // constraints.
2081  if (LI->isVolatile())
2082  return false;
2083 
2084  // Figure out which vreg this is going into. If there is no assigned vreg yet
2085  // then there actually was no reference to it. Perhaps the load is referenced
2086  // by a dead instruction.
2087  unsigned LoadReg = getRegForValue(LI);
2088  if (!LoadReg)
2089  return false;
2090 
2091  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2092  // may mean that the instruction got lowered to multiple MIs, or the use of
2093  // the loaded value ended up being multiple operands of the result.
2094  if (!MRI.hasOneUse(LoadReg))
2095  return false;
2096 
2098  MachineInstr *User = RI->getParent();
2099 
2100  // Set the insertion point properly. Folding the load can cause generation of
2101  // other random instructions (like sign extends) for addressing modes; make
2102  // sure they get inserted in a logical place before the new instruction.
2103  FuncInfo.InsertPt = User;
2104  FuncInfo.MBB = User->getParent();
2105 
2106  // Ask the target to try folding the load.
2107  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2108 }
2109 
2111  // Must be an add.
2112  if (!isa<AddOperator>(Add))
2113  return false;
2114  // Type size needs to match.
2115  if (DL.getTypeSizeInBits(GEP->getType()) !=
2116  DL.getTypeSizeInBits(Add->getType()))
2117  return false;
2118  // Must be in the same basic block.
2119  if (isa<Instruction>(Add) &&
2120  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2121  return false;
2122  // Must have a constant operand.
2123  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2124 }
2125 
2128  const Value *Ptr;
2129  Type *ValTy;
2130  unsigned Alignment;
2132  bool IsVolatile;
2133 
2134  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2135  Alignment = LI->getAlignment();
2136  IsVolatile = LI->isVolatile();
2137  Flags = MachineMemOperand::MOLoad;
2138  Ptr = LI->getPointerOperand();
2139  ValTy = LI->getType();
2140  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2141  Alignment = SI->getAlignment();
2142  IsVolatile = SI->isVolatile();
2144  Ptr = SI->getPointerOperand();
2145  ValTy = SI->getValueOperand()->getType();
2146  } else
2147  return nullptr;
2148 
2149  bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2150  bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2151  bool IsDereferenceable =
2153  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2154 
2155  AAMDNodes AAInfo;
2156  I->getAAMetadata(AAInfo);
2157 
2158  if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2159  Alignment = DL.getABITypeAlignment(ValTy);
2160 
2161  unsigned Size = DL.getTypeStoreSize(ValTy);
2162 
2163  if (IsVolatile)
2165  if (IsNonTemporal)
2167  if (IsDereferenceable)
2169  if (IsInvariant)
2171 
2172  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2173  Alignment, AAInfo, Ranges);
2174 }
2175 
2177  // If both operands are the same, then try to optimize or fold the cmp.
2179  if (CI->getOperand(0) != CI->getOperand(1))
2180  return Predicate;
2181 
2182  switch (Predicate) {
2183  default: llvm_unreachable("Invalid predicate!");
2184  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2185  case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2186  case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2187  case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2188  case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2189  case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2190  case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2191  case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2192  case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2193  case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2194  case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2195  case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2196  case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2197  case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2198  case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2199  case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2200 
2201  case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2202  case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2203  case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2204  case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2205  case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2206  case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2207  case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2208  case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2209  case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2210  case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2211  }
2212 
2213  return Predicate;
2214 }
void setHasStackMap(bool s=true)
IterTy arg_end() const
Definition: CallSite.h:532
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:500
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, unsigned Op2, bool Op2IsKill)
Emit a MachineInstr with three register operands and a result register in the given register class...
Definition: FastISel.cpp:1827
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
void setByValAlign(unsigned A)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:870
std::vector< ArgListEntry > ArgListTy
Definition: FastISel.h:58
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero...
Definition: FastISel.cpp:1977
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
This instruction extracts a struct member or array element value from an aggregate value...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:226
MachineConstantPool & MCP
Definition: FastISel.h:201
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1002
static const Value * getFNegArgument(const Value *BinOp)
bool hasName() const
Definition: Value.h:236
STATISTIC(NumFunctions,"Total number of functions")
size_t i
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:468
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:51
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2176
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:216
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
ImmutableCallSite * CS
Definition: FastISel.h:78
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:476
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:376
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:591
This class represents a function call, abstracting a target machine's calling convention.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:289
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
Definition: FastISel.cpp:81
gep_type_iterator gep_type_end(const User *GEP)
unsigned less or equal
Definition: InstrTypes.h:906
unsigned less than
Definition: InstrTypes.h:905
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:886
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2127
unsigned getSizeInBits() const
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:896
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1678
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.cpp:238
arg_iterator arg_end()
Definition: Function.h:559
A debug info location.
Definition: DebugLoc.h:34
Metadata node.
Definition: Metadata.h:830
SmallVector< unsigned, 4 > InRegs
Definition: FastISel.h:89
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Type * getElementType() const
Definition: DerivedTypes.h:462
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1691
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic. ...
Definition: FastISel.h:475
void setPhysRegsDeadExcept(ArrayRef< unsigned > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
uint64_t getOffset() const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:212
unsigned arg_size() const
Definition: CallSite.h:211
void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI)
Determine if any floating-point values are being passed to this variadic function, and set the MachineModuleInfo's usesVAFloatArgument flag if so.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1672
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:195
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:191
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:228
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1338
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
unsigned fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:1877
ArrayRef< unsigned > getIndices() const
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:891
Value * getAddress() const
Definition: IntrinsicInst.h:91
MachineFunction * MF
Definition: FastISel.h:198
DenseMap< const Value *, unsigned > LocalValueMap
Definition: FastISel.h:196
unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1855
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:229
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:890
unsigned fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:1921
bool isUnconditional() const
A description of a memory reference used in the backend.
unsigned getCallFrameDestroyOpcode() const
void setHasPatchPoint(bool s=true)
struct fuzzer::@269 Flags
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:566
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Definition: ISDOpcodes.h:344
Class to represent struct types.
Definition: DerivedTypes.h:199
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
bool hasOptimizedCodeGen(LibFunc::Func F) const
Tests if the function is both available and a candidate for optimized code generation.
unsigned fastEmitInst_i(unsigned MachineInstrOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:1947
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2110
DenseMap< const Value *, unsigned > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getNumArgOperands() const
Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
unsigned fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill)
Emit a MachineInstr with one register operand and a result register in the given register class...
Definition: FastISel.cpp:1782
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:220
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:887
The memory access is dereferenceable (i.e., doesn't trap).
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
bool hasDebugInfo() const
Returns true if valid debug info is present.
void setByValSize(unsigned S)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const TargetMachine & TM
Definition: FastISel.h:203
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:589
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1087
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1237
Context object for machine code objects.
Definition: MCContext.h:51
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
Class to represent function types.
Definition: DerivedTypes.h:102
#define F(x, y, z)
Definition: MD5.cpp:51
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:88
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1756
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:410
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr), and append generated machine instructions to the current block.
Definition: FastISel.cpp:1539
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
unsigned getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value...
Definition: FastISel.cpp:170
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
BasicBlock * getSuccessor(unsigned i) const
unsigned fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1773
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool hasTrivialKill(const Value *V)
Test whether the given value has exactly one use.
Definition: FastISel.cpp:137
MCContext & getContext() const
void setOrigAlign(unsigned A)
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:225
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Class to represent pointers.
Definition: DerivedTypes.h:443
unsigned getKillRegState(bool B)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:453
unsigned lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:287
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
Definition: InstrTypes.h:74
The memory access is volatile.
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const
Searches for a particular function name.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
FunctionType * getFunctionType() const
Definition: CallSite.h:315
virtual ~FastISel()
Definition: FastISel.cpp:1668
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:52
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
Definition: FastISel.h:98
MVT - Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
BasicBlock * getSuccessor(unsigned idx) const
Return the specified successor.
Definition: InstrTypes.h:79
DIExpression * getExpression() const
Definition: IntrinsicInst.h:97
Simple binary floating point operators.
Definition: ISDOpcodes.h:246
Conditional or Unconditional Branch instruction.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
Definition: Constant.h:42
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:354
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:86
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1947
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1952
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
MachineFrameInfo & MFI
Definition: FastISel.h:200
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1680
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:259
bool SkipTargetIndependentISel
Definition: FastISel.h:209
constexpr bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
Definition: MathExtras.h:405
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:154
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2054
uint16_t getParamAlignment(uint16_t i) const
Extract the alignment for a call or parameter (0=unknown).
Definition: CallSite.h:383
uint32_t Offset
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:110
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:880
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned char TargetFlags=0)
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:676
Value * getOperand(unsigned i) const
Definition: User.h:145
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:889
arg_iterator arg_begin()
Definition: Function.h:550
The memory access is non-temporal.
Class to represent integer types.
Definition: DerivedTypes.h:39
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:960
opStatus convertToInteger(integerPart *Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:986
const TargetRegisterInfo & TRI
Definition: FastISel.h:207
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:897
static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeSet representing the attributes applied to the return value of the given call...
Definition: FastISel.cpp:846
EVT - Extended Value Type.
Definition: ValueTypes.h:31
Type * getType() const
getType - Return the type of the instruction that generated this call site
Definition: CallSite.h:258
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:654
bool selectFNeg(const User *I)
Emit an FNeg operation.
Definition: FastISel.cpp:1456
This class contains a discriminated union of information about pointers in memory operands...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:895
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:85
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
const TargetInstrInfo & TII
Definition: FastISel.h:205
MachineBasicBlock * MBB
MBB - The current block.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
signed greater than
Definition: InstrTypes.h:907
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block...
Definition: FastISel.h:215
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:594
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:340
The memory access writes data.
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
Definition: ValueTypes.h:200
IterTy arg_begin() const
Definition: CallSite.h:528
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:709
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:884
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:689
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:234
DIExpression * getExpression() const
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1685
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:425
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is the shared class of boolean and integer constants.
Definition: Constants.h:88
InstrTy * getInstruction() const
Definition: CallSite.h:93
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1700
DenseMap< unsigned, unsigned > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:894
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:59
Provides information about what library functions are available for the current target.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:175
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:27
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1437
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:218
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:208
signed less than
Definition: InstrTypes.h:909
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:625
bool hasOneUse(unsigned RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:490
unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill)
Emit a MachineInstr with two register operands and a result register in the given register class...
Definition: FastISel.cpp:1803
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:298
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:198
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:146
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void startNewBlock()
Set the current block to which generated machine instructions will be appended, and clear the local C...
Definition: FastISel.cpp:98
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
signed less or equal
Definition: InstrTypes.h:910
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1270
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
Definition: APInt.h:77
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type...
Definition: FastISel.cpp:1695
SmallVector< unsigned, 16 > OutRegs
Definition: FastISel.h:87
const DataLayout & DL
Definition: FastISel.h:204
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:385
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
BranchProbabilityInfo * BPI
This file defines the FastISel class.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:400
Value * getValue() const
DebugLoc DbgLoc
Definition: FastISel.h:202
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool selectCall(const User *Call)
Definition: FastISel.cpp:1041
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1942
Flags
Flags values. These may be or'd together.
The memory access reads data.
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:367
Representation of each machine instruction.
Definition: MachineInstr.h:52
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering...
Definition: FastISel.cpp:1674
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:706
bool selectExtractValue(const User *I)
Definition: FastISel.cpp:1499
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:333
MachineRegisterInfo & MRI
Definition: FastISel.h:199
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:114
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:108
unsigned greater or equal
Definition: InstrTypes.h:904
This represents the llvm.dbg.value instruction.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:868
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:665
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:256
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:383
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:359
The memory access always returns the same value (or traps).
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:471
iterator end()
Definition: DenseMap.h:69
bool isTailCall() const
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:888
iterator find(const KeyT &Val)
Definition: DenseMap.h:127
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
size_t size() const
Definition: BasicBlock.h:238
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:391
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block...
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:892
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:309
const TargetLowering & TLI
Definition: FastISel.h:206
unsigned createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1752
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool hasLocalLinkage() const
Definition: GlobalValue.h:415
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:176
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1709
unsigned getReg() const
getReg - Returns the register number.
bool use_empty() const
Definition: Value.h:299
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:1963
MachineBasicBlock::iterator InsertPt
Definition: FastISel.h:303
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:93
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:346
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1657
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty...
Definition: Type.cpp:91
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1670
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:883
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
LLVM Value Representation.
Definition: Value.h:71
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:893
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
DILocalVariable * getVariable() const
static const Function * getParent(const Value *V)
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:533
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
#define DEBUG(X)
Definition: Debug.h:100
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
unsigned greater than
Definition: InstrTypes.h:903
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
MachineModuleInfo & getMMI() const
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
reg_iterator reg_begin(unsigned RegNo) const
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - Get or set the calling convention of this function call...
unsigned fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:1902
bool isValid() const
Check for null.
Conversion operators.
Definition: ISDOpcodes.h:397
int * Ptr
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:197
void setIsDebug(bool Val=true)
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:406
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:885
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor...
Definition: FastISel.cpp:1417
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
This represents the llvm.dbg.declare instruction.
Definition: IntrinsicInst.h:89
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:516
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
getIntegerVT - Returns the EVT that represents an integer with the given number of bits...
Definition: ValueTypes.h:61
static MachineOperand CreateFI(int Idx)
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
Definition: MathExtras.h:519
const BasicBlock * getParent() const
Definition: Instruction.h:62
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:882
signed greater or equal
Definition: InstrTypes.h:908
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
This class contains meta information specific to a module.
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.
unsigned InitializeRegForValue(const Value *V)
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< unsigned, bool > getRegForGEPIndex(const Value *V)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:317