LLVM 19.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Operator.h"
91#include "llvm/IR/Type.h"
92#include "llvm/IR/User.h"
93#include "llvm/IR/Value.h"
94#include "llvm/MC/MCContext.h"
95#include "llvm/MC/MCInstrDesc.h"
97#include "llvm/Support/Debug.h"
103#include <algorithm>
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <optional>
108#include <utility>
109
110using namespace llvm;
111using namespace PatternMatch;
112
113#define DEBUG_TYPE "isel"
114
115STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121/// Set the current block to which generated machine instructions will be
122/// appended.
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
134}
135
136void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156}
157
158/// Return the defined register if this instruction defines exactly one
159/// virtual register and uses no other virtual registers. Otherwise return 0.
161 Register RegDef;
162 for (const MachineOperand &MO : MI.operands()) {
163 if (!MO.isReg())
164 continue;
165 if (MO.isDef()) {
166 if (RegDef)
167 return Register();
168 RegDef = MO.getReg();
169 } else if (MO.getReg().isVirtual()) {
170 // This is another use of a vreg. Don't delete it.
171 return Register();
172 }
173 }
174 return RegDef;
175}
176
177static bool isRegUsedByPhiNodes(Register DefReg,
178 FunctionLoweringInfo &FuncInfo) {
179 for (auto &P : FuncInfo.PHINodesToUpdate)
180 if (P.second == DefReg)
181 return true;
182 return false;
183}
184
185void FastISel::flushLocalValueMap() {
186 // If FastISel bails out, it could leave local value instructions behind
187 // that aren't used for anything. Detect and erase those.
189 // Save the first instruction after local values, for later.
191 ++FirstNonValue;
192
195 : FuncInfo.MBB->rend();
197 for (MachineInstr &LocalMI :
199 Register DefReg = findLocalRegDef(LocalMI);
200 if (!DefReg)
201 continue;
202 if (FuncInfo.RegsWithFixups.count(DefReg))
203 continue;
204 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
205 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
206 if (EmitStartPt == &LocalMI)
208 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
209 << LocalMI);
210 LocalMI.eraseFromParent();
211 }
212 }
213
214 if (FirstNonValue != FuncInfo.MBB->end()) {
215 // See if there are any local value instructions left. If so, we want to
216 // make sure the first one has a debug location; if it doesn't, use the
217 // first non-value instruction's debug location.
218
219 // If EmitStartPt is non-null, this block had copies at the top before
220 // FastISel started doing anything; it points to the last one, so the
221 // first local value instruction is the one after EmitStartPt.
222 // If EmitStartPt is null, the first local value instruction is at the
223 // top of the block.
224 MachineBasicBlock::iterator FirstLocalValue =
226 : FuncInfo.MBB->begin();
227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
229 }
230 }
231
232 LocalValueMap.clear();
235 SavedInsertPt = FuncInfo.InsertPt;
236}
237
239 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
240 // Don't handle non-simple values in FastISel.
241 if (!RealVT.isSimple())
242 return Register();
243
244 // Ignore illegal types. We must do this before looking up the value
245 // in ValueMap because Arguments are given virtual registers regardless
246 // of whether FastISel can handle them.
247 MVT VT = RealVT.getSimpleVT();
248 if (!TLI.isTypeLegal(VT)) {
249 // Handle integer promotions, though, because they're common and easy.
250 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
251 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
252 else
253 return Register();
254 }
255
256 // Look up the value to see if we already have a register for it.
258 if (Reg)
259 return Reg;
260
261 // In bottom-up mode, just create the virtual register which will be used
262 // to hold the value. It will be materialized later.
263 if (isa<Instruction>(V) &&
264 (!isa<AllocaInst>(V) ||
265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
267
268 SavePoint SaveInsertPt = enterLocalValueArea();
269
270 // Materialize the value in a register. Emit any instructions in the
271 // local value area.
272 Reg = materializeRegForValue(V, VT);
273
274 leaveLocalValueArea(SaveInsertPt);
275
276 return Reg;
277}
278
279Register FastISel::materializeConstant(const Value *V, MVT VT) {
280 Register Reg;
281 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
282 if (CI->getValue().getActiveBits() <= 64)
283 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
284 } else if (isa<AllocaInst>(V))
285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286 else if (isa<ConstantPointerNull>(V))
287 // Translate this as an integer zero so that it can be
288 // local-CSE'd with actual integer zeros.
289 Reg =
291 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
292 if (CF->isNullValue())
293 Reg = fastMaterializeFloatZero(CF);
294 else
295 // Try to emit the constant directly.
296 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
297
298 if (!Reg) {
299 // Try to emit the constant by using an integer constant with a cast.
300 const APFloat &Flt = CF->getValueAPF();
301 EVT IntVT = TLI.getPointerTy(DL);
302 uint32_t IntBitWidth = IntVT.getSizeInBits();
303 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
304 bool isExact;
305 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
306 if (isExact) {
307 Register IntegerReg =
308 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
309 if (IntegerReg)
310 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
311 IntegerReg);
312 }
313 }
314 } else if (const auto *Op = dyn_cast<Operator>(V)) {
315 if (!selectOperator(Op, Op->getOpcode()))
316 if (!isa<Instruction>(Op) ||
317 !fastSelectInstruction(cast<Instruction>(Op)))
318 return 0;
320 } else if (isa<UndefValue>(V)) {
323 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
324 }
325 return Reg;
326}
327
328/// Helper for getRegForValue. This function is called when the value isn't
329/// already available in a register and must be materialized with new
330/// instructions.
331Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
333 // Give the target-specific code a try first.
334 if (isa<Constant>(V))
335 Reg = fastMaterializeConstant(cast<Constant>(V));
336
337 // If target-specific code couldn't or didn't want to handle the value, then
338 // give target-independent code a try.
339 if (!Reg)
340 Reg = materializeConstant(V, VT);
341
342 // Don't cache constant materializations in the general ValueMap.
343 // To do so would require tracking what uses they dominate.
344 if (Reg) {
347 }
348 return Reg;
349}
350
352 // Look up the value to see if we already have a register for it. We
353 // cache values defined by Instructions across blocks, and other values
354 // only locally. This is because Instructions already have the SSA
355 // def-dominates-use requirement enforced.
357 if (I != FuncInfo.ValueMap.end())
358 return I->second;
359 return LocalValueMap[V];
360}
361
362void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
363 if (!isa<Instruction>(I)) {
364 LocalValueMap[I] = Reg;
365 return;
366 }
367
368 Register &AssignedReg = FuncInfo.ValueMap[I];
369 if (!AssignedReg)
370 // Use the new register.
371 AssignedReg = Reg;
372 else if (Reg != AssignedReg) {
373 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
374 for (unsigned i = 0; i < NumRegs; i++) {
375 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
377 }
378
379 AssignedReg = Reg;
380 }
381}
382
385 if (!IdxN)
386 // Unhandled operand. Halt "fast" selection and bail.
387 return Register();
388
389 // If the index is smaller or larger than intptr_t, truncate or extend it.
390 MVT PtrVT = TLI.getPointerTy(DL);
391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392 if (IdxVT.bitsLT(PtrVT)) {
393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394 } else if (IdxVT.bitsGT(PtrVT)) {
395 IdxN =
396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397 }
398 return IdxN;
399}
400
402 if (getLastLocalValue()) {
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
406 } else
408}
409
412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413 "Invalid iterator!");
414 while (I != E) {
415 if (SavedInsertPt == I)
416 SavedInsertPt = E;
417 if (EmitStartPt == I)
418 EmitStartPt = E.isValid() ? &*E : nullptr;
419 if (LastLocalValue == I)
420 LastLocalValue = E.isValid() ? &*E : nullptr;
421
422 MachineInstr *Dead = &*I;
423 ++I;
424 Dead->eraseFromParent();
425 ++NumFastIselDead;
426 }
428}
429
431 SavePoint OldInsertPt = FuncInfo.InsertPt;
433 return OldInsertPt;
434}
435
438 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439
440 // Restore the previous insert position.
441 FuncInfo.InsertPt = OldInsertPt;
442}
443
444bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446 if (VT == MVT::Other || !VT.isSimple())
447 // Unhandled type. Halt "fast" selection and bail.
448 return false;
449
450 // We only handle legal types. For example, on x86-32 the instruction
451 // selector contains all of the 64-bit instructions from x86-64,
452 // under the assumption that i64 won't be used if the target doesn't
453 // support it.
454 if (!TLI.isTypeLegal(VT)) {
455 // MVT::i1 is special. Allow AND, OR, or XOR because they
456 // don't require additional zeroing, which makes them easy.
457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
458 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
459 else
460 return false;
461 }
462
463 // Check if the first operand is a constant, and handle it as "ri". At -O0,
464 // we don't have anything that canonicalizes operand order.
465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
466 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
467 Register Op1 = getRegForValue(I->getOperand(1));
468 if (!Op1)
469 return false;
470
471 Register ResultReg =
472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
473 VT.getSimpleVT());
474 if (!ResultReg)
475 return false;
476
477 // We successfully emitted code for the given LLVM Instruction.
478 updateValueMap(I, ResultReg);
479 return true;
480 }
481
482 Register Op0 = getRegForValue(I->getOperand(0));
483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
484 return false;
485
486 // Check if the second operand is a constant and handle it appropriately.
487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
488 uint64_t Imm = CI->getSExtValue();
489
490 // Transform "sdiv exact X, 8" -> "sra X, 3".
491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
493 Imm = Log2_64(Imm);
494 ISDOpcode = ISD::SRA;
495 }
496
497 // Transform "urem x, pow2" -> "and x, pow2-1".
498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
499 isPowerOf2_64(Imm)) {
500 --Imm;
501 ISDOpcode = ISD::AND;
502 }
503
504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
505 VT.getSimpleVT());
506 if (!ResultReg)
507 return false;
508
509 // We successfully emitted code for the given LLVM Instruction.
510 updateValueMap(I, ResultReg);
511 return true;
512 }
513
514 Register Op1 = getRegForValue(I->getOperand(1));
515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
516 return false;
517
518 // Now we have both operands in registers. Emit the instruction.
519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
520 ISDOpcode, Op0, Op1);
521 if (!ResultReg)
522 // Target-specific code wasn't able to find a machine opcode for
523 // the given ISD opcode and type. Halt "fast" selection and bail.
524 return false;
525
526 // We successfully emitted code for the given LLVM Instruction.
527 updateValueMap(I, ResultReg);
528 return true;
529}
530
532 Register N = getRegForValue(I->getOperand(0));
533 if (!N) // Unhandled operand. Halt "fast" selection and bail.
534 return false;
535
536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
537 // and bail.
538 if (isa<VectorType>(I->getType()))
539 return false;
540
541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
542 // into a single N = N + TotalOffset.
543 uint64_t TotalOffs = 0;
544 // FIXME: What's a good SWAG number for MaxOffs?
545 uint64_t MaxOffs = 2048;
546 MVT VT = TLI.getPointerTy(DL);
548 GTI != E; ++GTI) {
549 const Value *Idx = GTI.getOperand();
550 if (StructType *StTy = GTI.getStructTypeOrNull()) {
551 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
552 if (Field) {
553 // N = N + Offset
554 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
555 if (TotalOffs >= MaxOffs) {
556 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
557 if (!N) // Unhandled operand. Halt "fast" selection and bail.
558 return false;
559 TotalOffs = 0;
560 }
561 }
562 } else {
563 // If this is a constant subscript, handle it quickly.
564 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
565 if (CI->isZero())
566 continue;
567 // N = N + Offset
568 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
569 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
570 if (TotalOffs >= MaxOffs) {
571 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
572 if (!N) // Unhandled operand. Halt "fast" selection and bail.
573 return false;
574 TotalOffs = 0;
575 }
576 continue;
577 }
578 if (TotalOffs) {
579 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
580 if (!N) // Unhandled operand. Halt "fast" selection and bail.
581 return false;
582 TotalOffs = 0;
583 }
584
585 // N = N + Idx * ElementSize;
586 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
588 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
589 return false;
590
591 if (ElementSize != 1) {
592 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
593 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
594 return false;
595 }
596 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
597 if (!N) // Unhandled operand. Halt "fast" selection and bail.
598 return false;
599 }
600 }
601 if (TotalOffs) {
602 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
603 if (!N) // Unhandled operand. Halt "fast" selection and bail.
604 return false;
605 }
606
607 // We successfully emitted code for the given LLVM Instruction.
609 return true;
610}
611
612bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
613 const CallInst *CI, unsigned StartIdx) {
614 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
615 Value *Val = CI->getArgOperand(i);
616 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
617 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
618 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
619 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
620 } else if (isa<ConstantPointerNull>(Val)) {
621 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
623 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
624 // Values coming from a stack location also require a special encoding,
625 // but that is added later on by the target specific frame index
626 // elimination implementation.
627 auto SI = FuncInfo.StaticAllocaMap.find(AI);
628 if (SI != FuncInfo.StaticAllocaMap.end())
630 else
631 return false;
632 } else {
634 if (!Reg)
635 return false;
636 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
637 }
638 }
639 return true;
640}
641
643 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
644 // [live variables...])
645 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
646 "Stackmap cannot return a value.");
647
648 // The stackmap intrinsic only records the live variables (the arguments
649 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
650 // intrinsic, this won't be lowered to a function call. This means we don't
651 // have to worry about calling conventions and target-specific lowering code.
652 // Instead we perform the call lowering right here.
653 //
654 // CALLSEQ_START(0, 0...)
655 // STACKMAP(id, nbytes, ...)
656 // CALLSEQ_END(0, 0)
657 //
659
660 // Add the <id> and <numBytes> constants.
661 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
662 "Expected a constant integer.");
663 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
664 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
665
666 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
667 "Expected a constant integer.");
668 const auto *NumBytes =
669 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
670 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
671
672 // Push live variables for the stack map (skipping the first two arguments
673 // <id> and <numBytes>).
674 if (!addStackMapLiveVars(Ops, I, 2))
675 return false;
676
677 // We are not adding any register mask info here, because the stackmap doesn't
678 // clobber anything.
679
680 // Add scratch registers as implicit def and early clobber.
681 CallingConv::ID CC = I->getCallingConv();
682 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
683 for (unsigned i = 0; ScratchRegs[i]; ++i)
685 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
686 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
687
688 // Issue CALLSEQ_START
689 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
690 auto Builder =
691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
692 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
693 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
694 Builder.addImm(0);
695
696 // Issue STACKMAP.
698 TII.get(TargetOpcode::STACKMAP));
699 for (auto const &MO : Ops)
700 MIB.add(MO);
701
702 // Issue CALLSEQ_END
703 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
705 .addImm(0)
706 .addImm(0);
707
708 // Inform the Frame Information that we have a stackmap in this function.
710
711 return true;
712}
713
714/// Lower an argument list according to the target calling convention.
715///
716/// This is a helper for lowering intrinsics that follow a target calling
717/// convention or require stack pointer adjustment. Only a subset of the
718/// intrinsic's operands need to participate in the calling convention.
719bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
720 unsigned NumArgs, const Value *Callee,
721 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
722 ArgListTy Args;
723 Args.reserve(NumArgs);
724
725 // Populate the argument list.
726 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
727 Value *V = CI->getOperand(ArgI);
728
729 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
730
731 ArgListEntry Entry;
732 Entry.Val = V;
733 Entry.Ty = V->getType();
734 Entry.setAttributes(CI, ArgI);
735 Args.push_back(Entry);
736 }
737
738 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
739 : CI->getType();
740 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
741
742 return lowerCallTo(CLI);
743}
744
746 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
747 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
748 SmallString<32> MangledName;
749 Mangler::getNameWithPrefix(MangledName, Target, DL);
750 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
751 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
752}
753
755 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
756 // i32 <numBytes>,
757 // i8* <target>,
758 // i32 <numArgs>,
759 // [Args...],
760 // [live variables...])
761 CallingConv::ID CC = I->getCallingConv();
762 bool IsAnyRegCC = CC == CallingConv::AnyReg;
763 bool HasDef = !I->getType()->isVoidTy();
764 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
765
766 // Get the real number of arguments participating in the call <numArgs>
767 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
768 "Expected a constant integer.");
769 const auto *NumArgsVal =
770 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
771 unsigned NumArgs = NumArgsVal->getZExtValue();
772
773 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
774 // This includes all meta-operands up to but not including CC.
775 unsigned NumMetaOpers = PatchPointOpers::CCPos;
776 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
777 "Not enough arguments provided to the patchpoint intrinsic");
778
779 // For AnyRegCC the arguments are lowered later on manually.
780 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
782 CLI.setIsPatchPoint();
783 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
784 return false;
785
786 assert(CLI.Call && "No call instruction specified.");
787
789
790 // Add an explicit result reg if we use the anyreg calling convention.
791 if (IsAnyRegCC && HasDef) {
792 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
794 CLI.NumResultRegs = 1;
795 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
796 }
797
798 // Add the <id> and <numBytes> constants.
799 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
800 "Expected a constant integer.");
801 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
802 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
803
804 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
805 "Expected a constant integer.");
806 const auto *NumBytes =
807 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
808 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
809
810 // Add the call target.
811 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
812 uint64_t CalleeConstAddr =
813 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
814 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
815 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
816 if (C->getOpcode() == Instruction::IntToPtr) {
817 uint64_t CalleeConstAddr =
818 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
819 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
820 } else
821 llvm_unreachable("Unsupported ConstantExpr.");
822 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
824 } else if (isa<ConstantPointerNull>(Callee))
826 else
827 llvm_unreachable("Unsupported callee address.");
828
829 // Adjust <numArgs> to account for any arguments that have been passed on
830 // the stack instead.
831 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
832 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
833
834 // Add the calling convention
835 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
836
837 // Add the arguments we omitted previously. The register allocator should
838 // place these in any free register.
839 if (IsAnyRegCC) {
840 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
841 Register Reg = getRegForValue(I->getArgOperand(i));
842 if (!Reg)
843 return false;
844 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
845 }
846 }
847
848 // Push the arguments from the call instruction.
849 for (auto Reg : CLI.OutRegs)
850 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
851
852 // Push live variables for the stack map.
853 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
854 return false;
855
856 // Push the register mask info.
859
860 // Add scratch registers as implicit def and early clobber.
861 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
862 for (unsigned i = 0; ScratchRegs[i]; ++i)
864 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
865 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
866
867 // Add implicit defs (return values).
868 for (auto Reg : CLI.InRegs)
869 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
870 /*isImp=*/true));
871
872 // Insert the patchpoint instruction before the call generated by the target.
874 TII.get(TargetOpcode::PATCHPOINT));
875
876 for (auto &MO : Ops)
877 MIB.add(MO);
878
880
881 // Delete the original call instruction.
882 CLI.Call->eraseFromParent();
883
884 // Inform the Frame Information that we have a patchpoint in this function.
886
887 if (CLI.NumResultRegs)
889 return true;
890}
891
893 const auto &Triple = TM.getTargetTriple();
895 return true; // don't do anything to this instruction.
898 /*isDef=*/false));
900 /*isDef=*/false));
903 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
904 for (auto &MO : Ops)
905 MIB.add(MO);
906
907 // Insert the Patchable Event Call instruction, that gets lowered properly.
908 return true;
909}
910
912 const auto &Triple = TM.getTargetTriple();
914 return true; // don't do anything to this instruction.
917 /*isDef=*/false));
919 /*isDef=*/false));
921 /*isDef=*/false));
924 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
925 for (auto &MO : Ops)
926 MIB.add(MO);
927
928 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
929 return true;
930}
931
932/// Returns an AttributeList representing the attributes applied to the return
933/// value of the given call.
936 if (CLI.RetSExt)
937 Attrs.push_back(Attribute::SExt);
938 if (CLI.RetZExt)
939 Attrs.push_back(Attribute::ZExt);
940 if (CLI.IsInReg)
941 Attrs.push_back(Attribute::InReg);
942
944 Attrs);
945}
946
947bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
948 unsigned NumArgs) {
949 MCContext &Ctx = MF->getContext();
950 SmallString<32> MangledName;
951 Mangler::getNameWithPrefix(MangledName, SymName, DL);
952 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
953 return lowerCallTo(CI, Sym, NumArgs);
954}
955
957 unsigned NumArgs) {
958 FunctionType *FTy = CI->getFunctionType();
959 Type *RetTy = CI->getType();
960
961 ArgListTy Args;
962 Args.reserve(NumArgs);
963
964 // Populate the argument list.
965 // Attributes for args start at offset 1, after the return attribute.
966 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
967 Value *V = CI->getOperand(ArgI);
968
969 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
970
971 ArgListEntry Entry;
972 Entry.Val = V;
973 Entry.Ty = V->getType();
974 Entry.setAttributes(CI, ArgI);
975 Args.push_back(Entry);
976 }
978
980 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
981
982 return lowerCallTo(CLI);
983}
984
986 // Handle the incoming return values from the call.
987 CLI.clearIns();
988 SmallVector<EVT, 4> RetTys;
989 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
990
992 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
993
994 bool CanLowerReturn = TLI.CanLowerReturn(
995 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
996
997 // FIXME: sret demotion isn't supported yet - bail out.
998 if (!CanLowerReturn)
999 return false;
1000
1001 for (EVT VT : RetTys) {
1002 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1003 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1004 for (unsigned i = 0; i != NumRegs; ++i) {
1005 ISD::InputArg MyFlags;
1006 MyFlags.VT = RegisterVT;
1007 MyFlags.ArgVT = VT;
1008 MyFlags.Used = CLI.IsReturnValueUsed;
1009 if (CLI.RetSExt)
1010 MyFlags.Flags.setSExt();
1011 if (CLI.RetZExt)
1012 MyFlags.Flags.setZExt();
1013 if (CLI.IsInReg)
1014 MyFlags.Flags.setInReg();
1015 CLI.Ins.push_back(MyFlags);
1016 }
1017 }
1018
1019 // Handle all of the outgoing arguments.
1020 CLI.clearOuts();
1021 for (auto &Arg : CLI.getArgs()) {
1022 Type *FinalType = Arg.Ty;
1023 if (Arg.IsByVal)
1024 FinalType = Arg.IndirectType;
1026 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1027
1028 ISD::ArgFlagsTy Flags;
1029 if (Arg.IsZExt)
1030 Flags.setZExt();
1031 if (Arg.IsSExt)
1032 Flags.setSExt();
1033 if (Arg.IsInReg)
1034 Flags.setInReg();
1035 if (Arg.IsSRet)
1036 Flags.setSRet();
1037 if (Arg.IsSwiftSelf)
1038 Flags.setSwiftSelf();
1039 if (Arg.IsSwiftAsync)
1040 Flags.setSwiftAsync();
1041 if (Arg.IsSwiftError)
1042 Flags.setSwiftError();
1043 if (Arg.IsCFGuardTarget)
1044 Flags.setCFGuardTarget();
1045 if (Arg.IsByVal)
1046 Flags.setByVal();
1047 if (Arg.IsInAlloca) {
1048 Flags.setInAlloca();
1049 // Set the byval flag for CCAssignFn callbacks that don't know about
1050 // inalloca. This way we can know how many bytes we should've allocated
1051 // and how many bytes a callee cleanup function will pop. If we port
1052 // inalloca to more targets, we'll have to add custom inalloca handling in
1053 // the various CC lowering callbacks.
1054 Flags.setByVal();
1055 }
1056 if (Arg.IsPreallocated) {
1057 Flags.setPreallocated();
1058 // Set the byval flag for CCAssignFn callbacks that don't know about
1059 // preallocated. This way we can know how many bytes we should've
1060 // allocated and how many bytes a callee cleanup function will pop. If we
1061 // port preallocated to more targets, we'll have to add custom
1062 // preallocated handling in the various CC lowering callbacks.
1063 Flags.setByVal();
1064 }
1065 MaybeAlign MemAlign = Arg.Alignment;
1066 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1067 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1068
1069 // For ByVal, alignment should come from FE. BE will guess if this info
1070 // is not there, but there are cases it cannot get right.
1071 if (!MemAlign)
1072 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1073 Flags.setByValSize(FrameSize);
1074 } else if (!MemAlign) {
1075 MemAlign = DL.getABITypeAlign(Arg.Ty);
1076 }
1077 Flags.setMemAlign(*MemAlign);
1078 if (Arg.IsNest)
1079 Flags.setNest();
1080 if (NeedsRegBlock)
1081 Flags.setInConsecutiveRegs();
1082 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1083 CLI.OutVals.push_back(Arg.Val);
1084 CLI.OutFlags.push_back(Flags);
1085 }
1086
1087 if (!fastLowerCall(CLI))
1088 return false;
1089
1090 // Set all unused physreg defs as dead.
1091 assert(CLI.Call && "No call instruction specified.");
1093
1094 if (CLI.NumResultRegs && CLI.CB)
1096
1097 // Set labels for heapallocsite call.
1098 if (CLI.CB)
1099 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1100 CLI.Call->setHeapAllocMarker(*MF, MD);
1101
1102 return true;
1103}
1104
1106 FunctionType *FuncTy = CI->getFunctionType();
1107 Type *RetTy = CI->getType();
1108
1109 ArgListTy Args;
1110 ArgListEntry Entry;
1111 Args.reserve(CI->arg_size());
1112
1113 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1114 Value *V = *i;
1115
1116 // Skip empty types
1117 if (V->getType()->isEmptyTy())
1118 continue;
1119
1120 Entry.Val = V;
1121 Entry.Ty = V->getType();
1122
1123 // Skip the first return-type Attribute to get to params.
1124 Entry.setAttributes(CI, i - CI->arg_begin());
1125 Args.push_back(Entry);
1126 }
1127
1128 // Check if target-independent constraints permit a tail call here.
1129 // Target-dependent constraints are checked within fastLowerCall.
1130 bool IsTailCall = CI->isTailCall();
1131 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1132 IsTailCall = false;
1133 if (IsTailCall && !CI->isMustTailCall() &&
1134 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1135 IsTailCall = false;
1136
1137 CallLoweringInfo CLI;
1138 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1139 .setTailCall(IsTailCall);
1140
1141 diagnoseDontCall(*CI);
1142
1143 return lowerCallTo(CLI);
1144}
1145
1147 const CallInst *Call = cast<CallInst>(I);
1148
1149 // Handle simple inline asms.
1150 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1151 // Don't attempt to handle constraints.
1152 if (!IA->getConstraintString().empty())
1153 return false;
1154
1155 unsigned ExtraInfo = 0;
1156 if (IA->hasSideEffects())
1158 if (IA->isAlignStack())
1159 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1160 if (Call->isConvergent())
1161 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1162 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1163
1165 TII.get(TargetOpcode::INLINEASM));
1166 MIB.addExternalSymbol(IA->getAsmString().c_str());
1167 MIB.addImm(ExtraInfo);
1168
1169 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1170 if (SrcLoc)
1171 MIB.addMetadata(SrcLoc);
1172
1173 return true;
1174 }
1175
1176 // Handle intrinsic function calls.
1177 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1178 return selectIntrinsicCall(II);
1179
1180 return lowerCall(Call);
1181}
1182
1184 if (!II->hasDbgRecords())
1185 return;
1186
1187 // Clear any metadata.
1188 MIMD = MIMetadata();
1189
1190 // Reverse order of debug records, because fast-isel walks through backwards.
1191 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1192 flushLocalValueMap();
1194
1195 if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
1196 assert(DPL->getLabel() && "Missing label");
1197 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1198 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DPL << "\n");
1199 continue;
1200 }
1201
1202 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DPL->getDebugLoc(),
1203 TII.get(TargetOpcode::DBG_LABEL))
1204 .addMetadata(DPL->getLabel());
1205 continue;
1206 }
1207
1208 DPValue &DPV = cast<DPValue>(DR);
1209
1210 Value *V = nullptr;
1211 if (!DPV.hasArgList())
1212 V = DPV.getVariableLocationOp(0);
1213
1214 bool Res = false;
1217 Res = lowerDbgValue(V, DPV.getExpression(), DPV.getVariable(),
1218 DPV.getDebugLoc());
1219 } else {
1221 if (FuncInfo.PreprocessedDPVDeclares.contains(&DPV))
1222 continue;
1223 Res = lowerDbgDeclare(V, DPV.getExpression(), DPV.getVariable(),
1224 DPV.getDebugLoc());
1225 }
1226
1227 if (!Res)
1228 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DPV << "\n";);
1229 }
1230}
1231
1233 DILocalVariable *Var, const DebugLoc &DL) {
1234 // This form of DBG_VALUE is target-independent.
1235 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1236 if (!V || isa<UndefValue>(V)) {
1237 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1238 // undef DBG_VALUE to terminate any prior location.
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1240 return true;
1241 }
1242 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1243 // See if there's an expression to constant-fold.
1244 if (Expr)
1245 std::tie(Expr, CI) = Expr->constantFold(CI);
1246 if (CI->getBitWidth() > 64)
1248 .addCImm(CI)
1249 .addImm(0U)
1250 .addMetadata(Var)
1251 .addMetadata(Expr);
1252 else
1254 .addImm(CI->getZExtValue())
1255 .addImm(0U)
1256 .addMetadata(Var)
1257 .addMetadata(Expr);
1258 return true;
1259 }
1260 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1262 .addFPImm(CF)
1263 .addImm(0U)
1264 .addMetadata(Var)
1265 .addMetadata(Expr);
1266 return true;
1267 }
1268 if (const auto *Arg = dyn_cast<Argument>(V);
1269 Arg && Expr && Expr->isEntryValue()) {
1270 // As per the Verifier, this case is only valid for swift async Args.
1271 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1272
1273 Register Reg = getRegForValue(Arg);
1274 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1275 if (Reg == VirtReg || Reg == PhysReg) {
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1277 PhysReg, Var, Expr);
1278 return true;
1279 }
1280
1281 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1282 "couldn't find a physical register\n");
1283 return false;
1284 }
1285 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1286 SI != FuncInfo.StaticAllocaMap.end()) {
1287 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1288 bool IsIndirect = false;
1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1290 Var, Expr);
1291 return true;
1292 }
1293 if (Register Reg = lookUpRegForValue(V)) {
1294 // FIXME: This does not handle register-indirect values at offset 0.
1295 if (!FuncInfo.MF->useDebugInstrRef()) {
1296 bool IsIndirect = false;
1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1298 Expr);
1299 return true;
1300 }
1301 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1302 // to be later patched up by finalizeDebugInstrRefs.
1304 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1305 /* isKill */ false, /* isDead */ false,
1306 /* isUndef */ false, /* isEarlyClobber */ false,
1307 /* SubReg */ 0, /* isDebug */ true)});
1309 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1311 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1312 Var, NewExpr);
1313 return true;
1314 }
1315 return false;
1316}
1317
1319 DILocalVariable *Var, const DebugLoc &DL) {
1320 if (!Address || isa<UndefValue>(Address)) {
1321 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1322 return false;
1323 }
1324
1325 std::optional<MachineOperand> Op;
1327 Op = MachineOperand::CreateReg(Reg, false);
1328
1329 // If we have a VLA that has a "use" in a metadata node that's then used
1330 // here but it has no other uses, then we have a problem. E.g.,
1331 //
1332 // int foo (const int *x) {
1333 // char a[*x];
1334 // return 0;
1335 // }
1336 //
1337 // If we assign 'a' a vreg and fast isel later on has to use the selection
1338 // DAG isel, it will want to copy the value to the vreg. However, there are
1339 // no uses, which goes counter to what selection DAG isel expects.
1340 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1341 (!isa<AllocaInst>(Address) ||
1342 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1344 false);
1345
1346 if (Op) {
1348 "Expected inlined-at fields to agree");
1349 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1350 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1351 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1352 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1354 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1355 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1357 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1358 Var, NewExpr);
1359 return true;
1360 }
1361
1362 // A dbg.declare describes the address of a source variable, so lower it
1363 // into an indirect DBG_VALUE.
1365 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1366 Expr);
1367 return true;
1368 }
1369
1370 // We can't yet handle anything else here because it would require
1371 // generating code, thus altering codegen because of debug info.
1372 LLVM_DEBUG(
1373 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1374 return false;
1375}
1376
1378 switch (II->getIntrinsicID()) {
1379 default:
1380 break;
1381 // At -O0 we don't care about the lifetime intrinsics.
1382 case Intrinsic::lifetime_start:
1383 case Intrinsic::lifetime_end:
1384 // The donothing intrinsic does, well, nothing.
1385 case Intrinsic::donothing:
1386 // Neither does the sideeffect intrinsic.
1387 case Intrinsic::sideeffect:
1388 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1389 case Intrinsic::assume:
1390 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1391 case Intrinsic::experimental_noalias_scope_decl:
1392 return true;
1393 case Intrinsic::dbg_declare: {
1394 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1395 assert(DI->getVariable() && "Missing variable");
1396 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1397 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1398 << " (!hasDebugInfo)\n");
1399 return true;
1400 }
1401
1402 if (FuncInfo.PreprocessedDbgDeclares.contains(DI))
1403 return true;
1404
1405 const Value *Address = DI->getAddress();
1407 MIMD.getDL()))
1408 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI);
1409
1410 return true;
1411 }
1412 case Intrinsic::dbg_assign:
1413 // A dbg.assign is a dbg.value with more information, typically produced
1414 // during optimisation. If one reaches fastisel then something odd has
1415 // happened (such as an optimised function being always-inlined into an
1416 // optnone function). We will not be using the extra information in the
1417 // dbg.assign in that case, just use its dbg.value fields.
1419 case Intrinsic::dbg_value: {
1420 // This form of DBG_VALUE is target-independent.
1421 const DbgValueInst *DI = cast<DbgValueInst>(II);
1422 const Value *V = DI->getValue();
1423 DIExpression *Expr = DI->getExpression();
1424 DILocalVariable *Var = DI->getVariable();
1425 if (DI->hasArgList())
1426 // Signal that we don't have a location for this.
1427 V = nullptr;
1428
1430 "Expected inlined-at fields to agree");
1431
1432 if (!lowerDbgValue(V, Expr, Var, MIMD.getDL()))
1433 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1434
1435 return true;
1436 }
1437 case Intrinsic::dbg_label: {
1438 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1439 assert(DI->getLabel() && "Missing label");
1440 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1441 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1442 return true;
1443 }
1444
1446 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1447 return true;
1448 }
1449 case Intrinsic::objectsize:
1450 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1451
1452 case Intrinsic::is_constant:
1453 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1454
1455 case Intrinsic::launder_invariant_group:
1456 case Intrinsic::strip_invariant_group:
1457 case Intrinsic::expect: {
1458 Register ResultReg = getRegForValue(II->getArgOperand(0));
1459 if (!ResultReg)
1460 return false;
1461 updateValueMap(II, ResultReg);
1462 return true;
1463 }
1464 case Intrinsic::experimental_stackmap:
1465 return selectStackmap(II);
1466 case Intrinsic::experimental_patchpoint_void:
1467 case Intrinsic::experimental_patchpoint_i64:
1468 return selectPatchpoint(II);
1469
1470 case Intrinsic::xray_customevent:
1471 return selectXRayCustomEvent(II);
1472 case Intrinsic::xray_typedevent:
1473 return selectXRayTypedEvent(II);
1474 }
1475
1476 return fastLowerIntrinsicCall(II);
1477}
1478
1479bool FastISel::selectCast(const User *I, unsigned Opcode) {
1480 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1481 EVT DstVT = TLI.getValueType(DL, I->getType());
1482
1483 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1484 !DstVT.isSimple())
1485 // Unhandled type. Halt "fast" selection and bail.
1486 return false;
1487
1488 // Check if the destination type is legal.
1489 if (!TLI.isTypeLegal(DstVT))
1490 return false;
1491
1492 // Check if the source operand is legal.
1493 if (!TLI.isTypeLegal(SrcVT))
1494 return false;
1495
1496 Register InputReg = getRegForValue(I->getOperand(0));
1497 if (!InputReg)
1498 // Unhandled operand. Halt "fast" selection and bail.
1499 return false;
1500
1501 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1502 Opcode, InputReg);
1503 if (!ResultReg)
1504 return false;
1505
1506 updateValueMap(I, ResultReg);
1507 return true;
1508}
1509
1511 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1512 EVT DstEVT = TLI.getValueType(DL, I->getType());
1513 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1514 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1515 // Unhandled type. Halt "fast" selection and bail.
1516 return false;
1517
1518 MVT SrcVT = SrcEVT.getSimpleVT();
1519 MVT DstVT = DstEVT.getSimpleVT();
1520 Register Op0 = getRegForValue(I->getOperand(0));
1521 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1522 return false;
1523
1524 // If the bitcast doesn't change the type, just use the operand value.
1525 if (SrcVT == DstVT) {
1526 updateValueMap(I, Op0);
1527 return true;
1528 }
1529
1530 // Otherwise, select a BITCAST opcode.
1531 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1532 if (!ResultReg)
1533 return false;
1534
1535 updateValueMap(I, ResultReg);
1536 return true;
1537}
1538
1540 Register Reg = getRegForValue(I->getOperand(0));
1541 if (!Reg)
1542 // Unhandled operand.
1543 return false;
1544
1545 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1546 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1547 // Unhandled type, bail out.
1548 return false;
1549
1550 MVT Ty = ETy.getSimpleVT();
1551 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1552 Register ResultReg = createResultReg(TyRegClass);
1554 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1555
1556 updateValueMap(I, ResultReg);
1557 return true;
1558}
1559
1560// Remove local value instructions starting from the instruction after
1561// SavedLastLocalValue to the current function insert point.
1562void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1563{
1564 MachineInstr *CurLastLocalValue = getLastLocalValue();
1565 if (CurLastLocalValue != SavedLastLocalValue) {
1566 // Find the first local value instruction to be deleted.
1567 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1568 // Otherwise it's the first instruction in the block.
1569 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1570 if (SavedLastLocalValue)
1571 ++FirstDeadInst;
1572 else
1573 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1574 setLastLocalValue(SavedLastLocalValue);
1575 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1576 }
1577}
1578
1580 // Flush the local value map before starting each instruction.
1581 // This improves locality and debugging, and can reduce spills.
1582 // Reuse of values across IR instructions is relatively uncommon.
1583 flushLocalValueMap();
1584
1585 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1586 // Just before the terminator instruction, insert instructions to
1587 // feed PHI nodes in successor blocks.
1588 if (I->isTerminator()) {
1589 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1590 // PHI node handling may have generated local value instructions,
1591 // even though it failed to handle all PHI nodes.
1592 // We remove these instructions because SelectionDAGISel will generate
1593 // them again.
1594 removeDeadLocalValueCode(SavedLastLocalValue);
1595 return false;
1596 }
1597 }
1598
1599 // FastISel does not handle any operand bundles except OB_funclet.
1600 if (auto *Call = dyn_cast<CallBase>(I))
1601 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1602 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1603 return false;
1604
1605 MIMD = MIMetadata(*I);
1606
1607 SavedInsertPt = FuncInfo.InsertPt;
1608
1609 if (const auto *Call = dyn_cast<CallInst>(I)) {
1610 const Function *F = Call->getCalledFunction();
1611 LibFunc Func;
1612
1613 // As a special case, don't handle calls to builtin library functions that
1614 // may be translated directly to target instructions.
1615 if (F && !F->hasLocalLinkage() && F->hasName() &&
1616 LibInfo->getLibFunc(F->getName(), Func) &&
1618 return false;
1619
1620 // Don't handle Intrinsic::trap if a trap function is specified.
1621 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1622 Call->hasFnAttr("trap-func-name"))
1623 return false;
1624 }
1625
1626 // First, try doing target-independent selection.
1628 if (selectOperator(I, I->getOpcode())) {
1629 ++NumFastIselSuccessIndependent;
1630 MIMD = {};
1631 return true;
1632 }
1633 // Remove dead code.
1635 if (SavedInsertPt != FuncInfo.InsertPt)
1636 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1637 SavedInsertPt = FuncInfo.InsertPt;
1638 }
1639 // Next, try calling the target to attempt to handle the instruction.
1640 if (fastSelectInstruction(I)) {
1641 ++NumFastIselSuccessTarget;
1642 MIMD = {};
1643 return true;
1644 }
1645 // Remove dead code.
1647 if (SavedInsertPt != FuncInfo.InsertPt)
1648 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1649
1650 MIMD = {};
1651 // Undo phi node updates, because they will be added again by SelectionDAG.
1652 if (I->isTerminator()) {
1653 // PHI node handling may have generated local value instructions.
1654 // We remove them because SelectionDAGISel will generate them again.
1655 removeDeadLocalValueCode(SavedLastLocalValue);
1657 }
1658 return false;
1659}
1660
1661/// Emit an unconditional branch to the given block, unless it is the immediate
1662/// (fall-through) successor, and update the CFG.
1664 const DebugLoc &DbgLoc) {
1666 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1667 // For more accurate line information if this is the only non-debug
1668 // instruction in the block then emit it, otherwise we have the
1669 // unconditional fall-through case, which needs no instructions.
1670 } else {
1671 // The unconditional branch case.
1672 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1674 }
1675 if (FuncInfo.BPI) {
1679 } else
1681}
1682
1684 MachineBasicBlock *TrueMBB,
1685 MachineBasicBlock *FalseMBB) {
1686 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1687 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1688 // successor/predecessor lists.
1689 if (TrueMBB != FalseMBB) {
1690 if (FuncInfo.BPI) {
1691 auto BranchProbability =
1692 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1694 } else
1696 }
1697
1698 fastEmitBranch(FalseMBB, MIMD.getDL());
1699}
1700
1701/// Emit an FNeg operation.
1702bool FastISel::selectFNeg(const User *I, const Value *In) {
1703 Register OpReg = getRegForValue(In);
1704 if (!OpReg)
1705 return false;
1706
1707 // If the target has ISD::FNEG, use it.
1708 EVT VT = TLI.getValueType(DL, I->getType());
1709 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1710 OpReg);
1711 if (ResultReg) {
1712 updateValueMap(I, ResultReg);
1713 return true;
1714 }
1715
1716 // Bitcast the value to integer, twiddle the sign bit with xor,
1717 // and then bitcast it back to floating-point.
1718 if (VT.getSizeInBits() > 64)
1719 return false;
1720 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1721 if (!TLI.isTypeLegal(IntVT))
1722 return false;
1723
1724 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1725 ISD::BITCAST, OpReg);
1726 if (!IntReg)
1727 return false;
1728
1729 Register IntResultReg = fastEmit_ri_(
1730 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1731 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1732 if (!IntResultReg)
1733 return false;
1734
1735 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1736 IntResultReg);
1737 if (!ResultReg)
1738 return false;
1739
1740 updateValueMap(I, ResultReg);
1741 return true;
1742}
1743
1745 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1746 if (!EVI)
1747 return false;
1748
1749 // Make sure we only try to handle extracts with a legal result. But also
1750 // allow i1 because it's easy.
1751 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1752 if (!RealVT.isSimple())
1753 return false;
1754 MVT VT = RealVT.getSimpleVT();
1755 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1756 return false;
1757
1758 const Value *Op0 = EVI->getOperand(0);
1759 Type *AggTy = Op0->getType();
1760
1761 // Get the base result register.
1762 unsigned ResultReg;
1764 if (I != FuncInfo.ValueMap.end())
1765 ResultReg = I->second;
1766 else if (isa<Instruction>(Op0))
1767 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1768 else
1769 return false; // fast-isel can't handle aggregate constants at the moment
1770
1771 // Get the actual result register, which is an offset from the base register.
1772 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1773
1774 SmallVector<EVT, 4> AggValueVTs;
1775 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1776
1777 for (unsigned i = 0; i < VTIndex; i++)
1778 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1779
1780 updateValueMap(EVI, ResultReg);
1781 return true;
1782}
1783
1784bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1785 switch (Opcode) {
1786 case Instruction::Add:
1787 return selectBinaryOp(I, ISD::ADD);
1788 case Instruction::FAdd:
1789 return selectBinaryOp(I, ISD::FADD);
1790 case Instruction::Sub:
1791 return selectBinaryOp(I, ISD::SUB);
1792 case Instruction::FSub:
1793 return selectBinaryOp(I, ISD::FSUB);
1794 case Instruction::Mul:
1795 return selectBinaryOp(I, ISD::MUL);
1796 case Instruction::FMul:
1797 return selectBinaryOp(I, ISD::FMUL);
1798 case Instruction::SDiv:
1799 return selectBinaryOp(I, ISD::SDIV);
1800 case Instruction::UDiv:
1801 return selectBinaryOp(I, ISD::UDIV);
1802 case Instruction::FDiv:
1803 return selectBinaryOp(I, ISD::FDIV);
1804 case Instruction::SRem:
1805 return selectBinaryOp(I, ISD::SREM);
1806 case Instruction::URem:
1807 return selectBinaryOp(I, ISD::UREM);
1808 case Instruction::FRem:
1809 return selectBinaryOp(I, ISD::FREM);
1810 case Instruction::Shl:
1811 return selectBinaryOp(I, ISD::SHL);
1812 case Instruction::LShr:
1813 return selectBinaryOp(I, ISD::SRL);
1814 case Instruction::AShr:
1815 return selectBinaryOp(I, ISD::SRA);
1816 case Instruction::And:
1817 return selectBinaryOp(I, ISD::AND);
1818 case Instruction::Or:
1819 return selectBinaryOp(I, ISD::OR);
1820 case Instruction::Xor:
1821 return selectBinaryOp(I, ISD::XOR);
1822
1823 case Instruction::FNeg:
1824 return selectFNeg(I, I->getOperand(0));
1825
1826 case Instruction::GetElementPtr:
1827 return selectGetElementPtr(I);
1828
1829 case Instruction::Br: {
1830 const BranchInst *BI = cast<BranchInst>(I);
1831
1832 if (BI->isUnconditional()) {
1833 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1834 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1835 fastEmitBranch(MSucc, BI->getDebugLoc());
1836 return true;
1837 }
1838
1839 // Conditional branches are not handed yet.
1840 // Halt "fast" selection and bail.
1841 return false;
1842 }
1843
1844 case Instruction::Unreachable:
1846 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1847 else
1848 return true;
1849
1850 case Instruction::Alloca:
1851 // FunctionLowering has the static-sized case covered.
1852 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1853 return true;
1854
1855 // Dynamic-sized alloca is not handled yet.
1856 return false;
1857
1858 case Instruction::Call:
1859 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1860 // callee of the direct function call instruction will be mapped to the
1861 // symbol for the function's entry point, which is distinct from the
1862 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1863 // name is the C-linkage name of the source level function.
1864 // But fast isel still has the ability to do selection for intrinsics.
1865 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1866 return false;
1867 return selectCall(I);
1868
1869 case Instruction::BitCast:
1870 return selectBitCast(I);
1871
1872 case Instruction::FPToSI:
1873 return selectCast(I, ISD::FP_TO_SINT);
1874 case Instruction::ZExt:
1875 return selectCast(I, ISD::ZERO_EXTEND);
1876 case Instruction::SExt:
1877 return selectCast(I, ISD::SIGN_EXTEND);
1878 case Instruction::Trunc:
1879 return selectCast(I, ISD::TRUNCATE);
1880 case Instruction::SIToFP:
1881 return selectCast(I, ISD::SINT_TO_FP);
1882
1883 case Instruction::IntToPtr: // Deliberate fall-through.
1884 case Instruction::PtrToInt: {
1885 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1886 EVT DstVT = TLI.getValueType(DL, I->getType());
1887 if (DstVT.bitsGT(SrcVT))
1888 return selectCast(I, ISD::ZERO_EXTEND);
1889 if (DstVT.bitsLT(SrcVT))
1890 return selectCast(I, ISD::TRUNCATE);
1891 Register Reg = getRegForValue(I->getOperand(0));
1892 if (!Reg)
1893 return false;
1894 updateValueMap(I, Reg);
1895 return true;
1896 }
1897
1898 case Instruction::ExtractValue:
1899 return selectExtractValue(I);
1900
1901 case Instruction::Freeze:
1902 return selectFreeze(I);
1903
1904 case Instruction::PHI:
1905 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1906
1907 default:
1908 // Unhandled instruction. Halt "fast" selection and bail.
1909 return false;
1910 }
1911}
1912
1916 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1917 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1918 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1919 TII(*MF->getSubtarget().getInstrInfo()),
1920 TLI(*MF->getSubtarget().getTargetLowering()),
1921 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1923
1924FastISel::~FastISel() = default;
1925
1926bool FastISel::fastLowerArguments() { return false; }
1927
1928bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1929
1931 return false;
1932}
1933
1934unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1935
1936unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1937 return 0;
1938}
1939
1940unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1941 unsigned /*Op1*/) {
1942 return 0;
1943}
1944
1945unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1946 return 0;
1947}
1948
1949unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1950 const ConstantFP * /*FPImm*/) {
1951 return 0;
1952}
1953
1954unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1955 uint64_t /*Imm*/) {
1956 return 0;
1957}
1958
1959/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1960/// instruction with an immediate operand using fastEmit_ri.
1961/// If that fails, it materializes the immediate into a register and try
1962/// fastEmit_rr instead.
1963Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1964 uint64_t Imm, MVT ImmType) {
1965 // If this is a multiply by a power of two, emit this as a shift left.
1966 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1967 Opcode = ISD::SHL;
1968 Imm = Log2_64(Imm);
1969 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1970 // div x, 8 -> srl x, 3
1971 Opcode = ISD::SRL;
1972 Imm = Log2_64(Imm);
1973 }
1974
1975 // Horrible hack (to be removed), check to make sure shift amounts are
1976 // in-range.
1977 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1978 Imm >= VT.getSizeInBits())
1979 return 0;
1980
1981 // First check if immediate type is legal. If not, we can't use the ri form.
1982 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1983 if (ResultReg)
1984 return ResultReg;
1985 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1986 if (!MaterialReg) {
1987 // This is a bit ugly/slow, but failing here means falling out of
1988 // fast-isel, which would be very slow.
1989 IntegerType *ITy =
1991 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1992 if (!MaterialReg)
1993 return 0;
1994 }
1995 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1996}
1997
1999 return MRI.createVirtualRegister(RC);
2000}
2001
2003 unsigned OpNum) {
2004 if (Op.isVirtual()) {
2005 const TargetRegisterClass *RegClass =
2006 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2007 if (!MRI.constrainRegClass(Op, RegClass)) {
2008 // If it's not legal to COPY between the register classes, something
2009 // has gone very wrong before we got here.
2010 Register NewOp = createResultReg(RegClass);
2012 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2013 return NewOp;
2014 }
2015 }
2016 return Op;
2017}
2018
2019Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2020 const TargetRegisterClass *RC) {
2021 Register ResultReg = createResultReg(RC);
2022 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2023
2024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
2025 return ResultReg;
2026}
2027
2028Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2029 const TargetRegisterClass *RC, unsigned Op0) {
2030 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2031
2032 Register ResultReg = createResultReg(RC);
2033 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2034
2035 if (II.getNumDefs() >= 1)
2036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2037 .addReg(Op0);
2038 else {
2040 .addReg(Op0);
2041 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2042 ResultReg)
2043 .addReg(II.implicit_defs()[0]);
2044 }
2045
2046 return ResultReg;
2047}
2048
2049Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2050 const TargetRegisterClass *RC, unsigned Op0,
2051 unsigned Op1) {
2052 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2053
2054 Register ResultReg = createResultReg(RC);
2055 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2056 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2057
2058 if (II.getNumDefs() >= 1)
2059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2060 .addReg(Op0)
2061 .addReg(Op1);
2062 else {
2064 .addReg(Op0)
2065 .addReg(Op1);
2066 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2067 ResultReg)
2068 .addReg(II.implicit_defs()[0]);
2069 }
2070 return ResultReg;
2071}
2072
2073Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2074 const TargetRegisterClass *RC, unsigned Op0,
2075 unsigned Op1, unsigned Op2) {
2076 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2077
2078 Register ResultReg = createResultReg(RC);
2079 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2080 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2081 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2082
2083 if (II.getNumDefs() >= 1)
2084 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2085 .addReg(Op0)
2086 .addReg(Op1)
2087 .addReg(Op2);
2088 else {
2090 .addReg(Op0)
2091 .addReg(Op1)
2092 .addReg(Op2);
2093 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2094 ResultReg)
2095 .addReg(II.implicit_defs()[0]);
2096 }
2097 return ResultReg;
2098}
2099
2100Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2101 const TargetRegisterClass *RC, unsigned Op0,
2102 uint64_t Imm) {
2103 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2104
2105 Register ResultReg = createResultReg(RC);
2106 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2107
2108 if (II.getNumDefs() >= 1)
2109 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2110 .addReg(Op0)
2111 .addImm(Imm);
2112 else {
2114 .addReg(Op0)
2115 .addImm(Imm);
2116 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2117 ResultReg)
2118 .addReg(II.implicit_defs()[0]);
2119 }
2120 return ResultReg;
2121}
2122
2123Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2124 const TargetRegisterClass *RC, unsigned Op0,
2125 uint64_t Imm1, uint64_t Imm2) {
2126 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2127
2128 Register ResultReg = createResultReg(RC);
2129 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2130
2131 if (II.getNumDefs() >= 1)
2132 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2133 .addReg(Op0)
2134 .addImm(Imm1)
2135 .addImm(Imm2);
2136 else {
2138 .addReg(Op0)
2139 .addImm(Imm1)
2140 .addImm(Imm2);
2141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2142 ResultReg)
2143 .addReg(II.implicit_defs()[0]);
2144 }
2145 return ResultReg;
2146}
2147
2148Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2149 const TargetRegisterClass *RC,
2150 const ConstantFP *FPImm) {
2151 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2152
2153 Register ResultReg = createResultReg(RC);
2154
2155 if (II.getNumDefs() >= 1)
2156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2157 .addFPImm(FPImm);
2158 else {
2160 .addFPImm(FPImm);
2161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2162 ResultReg)
2163 .addReg(II.implicit_defs()[0]);
2164 }
2165 return ResultReg;
2166}
2167
2168Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2169 const TargetRegisterClass *RC, unsigned Op0,
2170 unsigned Op1, uint64_t Imm) {
2171 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2172
2173 Register ResultReg = createResultReg(RC);
2174 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2175 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2176
2177 if (II.getNumDefs() >= 1)
2178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2179 .addReg(Op0)
2180 .addReg(Op1)
2181 .addImm(Imm);
2182 else {
2184 .addReg(Op0)
2185 .addReg(Op1)
2186 .addImm(Imm);
2187 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2188 ResultReg)
2189 .addReg(II.implicit_defs()[0]);
2190 }
2191 return ResultReg;
2192}
2193
2194Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2195 const TargetRegisterClass *RC, uint64_t Imm) {
2196 Register ResultReg = createResultReg(RC);
2197 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2198
2199 if (II.getNumDefs() >= 1)
2200 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2201 .addImm(Imm);
2202 else {
2204 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2205 ResultReg)
2206 .addReg(II.implicit_defs()[0]);
2207 }
2208 return ResultReg;
2209}
2210
2212 uint32_t Idx) {
2213 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2215 "Cannot yet extract from physregs");
2216 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2218 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2219 ResultReg).addReg(Op0, 0, Idx);
2220 return ResultReg;
2221}
2222
2223/// Emit MachineInstrs to compute the value of Op with all but the least
2224/// significant bit set to zero.
2226 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2227}
2228
2229/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2230/// Emit code to ensure constants are copied into registers when needed.
2231/// Remember the virtual registers that need to be added to the Machine PHI
2232/// nodes as input. We cannot just directly add them, because expansion
2233/// might result in multiple MBB's for one BB. As such, the start of the
2234/// BB might correspond to a different MBB than the end.
2235bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2238
2239 // Check successor nodes' PHI nodes that expect a constant to be available
2240 // from this block.
2241 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2242 if (!isa<PHINode>(SuccBB->begin()))
2243 continue;
2244 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2245
2246 // If this terminator has multiple identical successors (common for
2247 // switches), only handle each succ once.
2248 if (!SuccsHandled.insert(SuccMBB).second)
2249 continue;
2250
2252
2253 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2254 // nodes and Machine PHI nodes, but the incoming operands have not been
2255 // emitted yet.
2256 for (const PHINode &PN : SuccBB->phis()) {
2257 // Ignore dead phi's.
2258 if (PN.use_empty())
2259 continue;
2260
2261 // Only handle legal types. Two interesting things to note here. First,
2262 // by bailing out early, we may leave behind some dead instructions,
2263 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2264 // own moves. Second, this check is necessary because FastISel doesn't
2265 // use CreateRegs to create registers, so it always creates
2266 // exactly one register for each non-void instruction.
2267 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2268 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2269 // Handle integer promotions, though, because they're common and easy.
2270 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2272 return false;
2273 }
2274 }
2275
2276 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2277
2278 // Set the DebugLoc for the copy. Use the location of the operand if
2279 // there is one; otherwise no location, flushLocalValueMap will fix it.
2280 MIMD = {};
2281 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2282 MIMD = MIMetadata(*Inst);
2283
2284 Register Reg = getRegForValue(PHIOp);
2285 if (!Reg) {
2287 return false;
2288 }
2289 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2290 MIMD = {};
2291 }
2292 }
2293
2294 return true;
2295}
2296
2297bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2298 assert(LI->hasOneUse() &&
2299 "tryToFoldLoad expected a LoadInst with a single use");
2300 // We know that the load has a single use, but don't know what it is. If it
2301 // isn't one of the folded instructions, then we can't succeed here. Handle
2302 // this by scanning the single-use users of the load until we get to FoldInst.
2303 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2304
2305 const Instruction *TheUser = LI->user_back();
2306 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2307 // Stay in the right block.
2308 TheUser->getParent() == FoldInst->getParent() &&
2309 --MaxUsers) { // Don't scan too far.
2310 // If there are multiple or no uses of this instruction, then bail out.
2311 if (!TheUser->hasOneUse())
2312 return false;
2313
2314 TheUser = TheUser->user_back();
2315 }
2316
2317 // If we didn't find the fold instruction, then we failed to collapse the
2318 // sequence.
2319 if (TheUser != FoldInst)
2320 return false;
2321
2322 // Don't try to fold volatile loads. Target has to deal with alignment
2323 // constraints.
2324 if (LI->isVolatile())
2325 return false;
2326
2327 // Figure out which vreg this is going into. If there is no assigned vreg yet
2328 // then there actually was no reference to it. Perhaps the load is referenced
2329 // by a dead instruction.
2330 Register LoadReg = getRegForValue(LI);
2331 if (!LoadReg)
2332 return false;
2333
2334 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2335 // may mean that the instruction got lowered to multiple MIs, or the use of
2336 // the loaded value ended up being multiple operands of the result.
2337 if (!MRI.hasOneUse(LoadReg))
2338 return false;
2339
2340 // If the register has fixups, there may be additional uses through a
2341 // different alias of the register.
2342 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2343 return false;
2344
2346 MachineInstr *User = RI->getParent();
2347
2348 // Set the insertion point properly. Folding the load can cause generation of
2349 // other random instructions (like sign extends) for addressing modes; make
2350 // sure they get inserted in a logical place before the new instruction.
2352 FuncInfo.MBB = User->getParent();
2353
2354 // Ask the target to try folding the load.
2355 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2356}
2357
2359 // Must be an add.
2360 if (!isa<AddOperator>(Add))
2361 return false;
2362 // Type size needs to match.
2363 if (DL.getTypeSizeInBits(GEP->getType()) !=
2364 DL.getTypeSizeInBits(Add->getType()))
2365 return false;
2366 // Must be in the same basic block.
2367 if (isa<Instruction>(Add) &&
2368 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2369 return false;
2370 // Must have a constant operand.
2371 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2372}
2373
2376 const Value *Ptr;
2377 Type *ValTy;
2378 MaybeAlign Alignment;
2380 bool IsVolatile;
2381
2382 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2383 Alignment = LI->getAlign();
2384 IsVolatile = LI->isVolatile();
2386 Ptr = LI->getPointerOperand();
2387 ValTy = LI->getType();
2388 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2389 Alignment = SI->getAlign();
2390 IsVolatile = SI->isVolatile();
2392 Ptr = SI->getPointerOperand();
2393 ValTy = SI->getValueOperand()->getType();
2394 } else
2395 return nullptr;
2396
2397 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2398 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2399 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2400 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2401
2402 AAMDNodes AAInfo = I->getAAMetadata();
2403
2404 if (!Alignment) // Ensure that codegen never sees alignment 0.
2405 Alignment = DL.getABITypeAlign(ValTy);
2406
2407 unsigned Size = DL.getTypeStoreSize(ValTy);
2408
2409 if (IsVolatile)
2411 if (IsNonTemporal)
2413 if (IsDereferenceable)
2415 if (IsInvariant)
2417
2419 *Alignment, AAInfo, Ranges);
2420}
2421
2423 // If both operands are the same, then try to optimize or fold the cmp.
2424 CmpInst::Predicate Predicate = CI->getPredicate();
2425 if (CI->getOperand(0) != CI->getOperand(1))
2426 return Predicate;
2427
2428 switch (Predicate) {
2429 default: llvm_unreachable("Invalid predicate!");
2430 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2431 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2432 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2433 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2434 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2435 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2436 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2437 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2438 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2439 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2440 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2441 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2442 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2443 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2444 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2445 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2446
2447 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2448 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2449 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2450 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2451 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2452 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2453 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2454 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2455 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2456 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2457 }
2458
2459 return Predicate;
2460}
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:301
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition: FastISel.cpp:160
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition: FastISel.cpp:177
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:934
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:23
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:335
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
filter_iterator< BasicBlock::const_iterator, std::function< bool(constInstruction &)> >::difference_type sizeWithoutDebug() const
Return the size of the basic block ignoring debug instructions.
Definition: BasicBlock.cpp:254
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1761
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1623
Value * getCalledOperand() const
Definition: InstrTypes.h:1696
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1648
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1629
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1561
unsigned arg_size() const
Definition: InstrTypes.h:1646
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:955
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:968
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:982
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:994
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:995
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:971
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:980
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:969
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:970
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:989
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:988
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:992
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:979
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:973
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:976
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:990
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:977
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:972
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:974
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
@ ICMP_NE
not equal
Definition: InstrTypes.h:987
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:993
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:981
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:991
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:978
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:967
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:975
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1066
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:267
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Records a position in IR for a source label (DILabel).
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
LocationType getType() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:720
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:878
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:865
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:472
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
A debug info location.
Definition: DebugLoc.h:33
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition: FastISel.h:205
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2211
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:214
const DataLayout & DL
Definition: FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:531
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:237
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:642
bool selectExtractValue(const User *U)
Definition: FastISel.cpp:1744
DenseMap< const Value *, Register > LocalValueMap
Definition: FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
Definition: FastISel.cpp:1663
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition: FastISel.h:480
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:226
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:892
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:2019
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1930
virtual bool lowerDbgDeclare(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1318
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:233
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1105
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1954
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:436
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:956
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:2028
void handleDbgInfo(const Instruction *II)
Target-independent lowering of non-instruction debug info associated with this instruction.
Definition: FastISel.cpp:1183
bool selectFreeze(const User *I)
Definition: FastISel.cpp:1539
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1377
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1479
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2297
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition: FastISel.cpp:238
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:410
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition: FastISel.cpp:123
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2375
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2194
MachineFrameInfo & MFI
Definition: FastISel.h:206
MachineFunction * MF
Definition: FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2358
virtual bool lowerDbgValue(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1232
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1934
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1579
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1928
bool selectXRayTypedEvent(const CallInst *II)
Definition: FastISel.cpp:911
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2049
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:2123
Register createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1998
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1949
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1926
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
Definition: FastISel.cpp:1702
const TargetInstrInfo & TII
Definition: FastISel.h:211
bool selectCall(const User *I)
Definition: FastISel.cpp:1146
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:351
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2422
void finishBasicBlock()
Flush the local value map.
Definition: FastISel.cpp:136
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:203
Register getRegForGEPIndex(const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:383
MachineConstantPool & MCP
Definition: FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
Definition: FastISel.cpp:1784
bool SkipTargetIndependentISel
Definition: FastISel.h:215
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:2148
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:2002
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:362
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:444
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1913
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:754
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:401
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1940
const TargetMachine & TM
Definition: FastISel.h:209
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1963
Register fastEmitZExtFromI1(MVT VT, unsigned Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Definition: FastISel.cpp:2225
MIMetadata MIMD
Definition: FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition: FastISel.h:221
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:2168
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:138
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:430
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1683
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1510
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:2100
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1936
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:476
virtual ~FastISel()
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1945
const TargetRegisterInfo & TRI
Definition: FastISel.h:213
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, unsigned Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
Definition: FastISel.cpp:2073
TargetLoweringBase::ArgListEntry ArgListEntry
Definition: FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
DenseSet< Register > RegsWithFixups
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
SmallPtrSet< const DPValue *, 8 > PreprocessedDPVDeclares
Register InitializeRegForValue(const Value *V)
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
DenseMap< Register, Register > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
MachineRegisterInfo * RegInfo
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Class to represent function types.
Definition: DerivedTypes.h:103
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:695
arg_iterator arg_end()
Definition: Function.h:822
arg_iterator arg_begin()
Definition: Function.h:813
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
Definition: Instruction.h:83
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:453
const BasicBlock * getParent() const
Definition: Instruction.h:151
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:148
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:358
bool hasDbgRecords() const
Returns true if any DbgRecords are attached to this instruction.
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
An instruction for reading from memory.
Definition: Instructions.h:184
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
Context object for machine code objects.
Definition: MCContext.h:76
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:200
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:579
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
Metadata node.
Definition: Metadata.h:1067
Set of metadata that should be preserved when using BuildMI().
const DebugLoc & getDL() const
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineInstrBundleIterator< MachineInstr > iterator
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
bool hasDebugInfo() const
Returns true if valid debug info is present.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
reg_iterator reg_begin(Register RegNo) const
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
ArrayRef< std::pair< MCRegister, Register > > liveins() const
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:119
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:651
Class to represent struct types.
Definition: DerivedTypes.h:216
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
unsigned getCallFrameDestroyOpcode() const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
const Triple & getTargetTriple() const
TargetOptions Options
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
bool isOSAIX() const
Tests whether the OS is AIX.
Definition: Triple.h:694
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:895
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:790
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:903
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:930
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:836
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1208
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
Definition: ISDOpcodes.h:1415
Reg
All possible values of the reg field in the ModR/M byte.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:146
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:307
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:319
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ Add
Sum of integers.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
gep_type_iterator gep_type_begin(const User *GEP)
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:535
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:234
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:274
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:290
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:628
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition: FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition: FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition: FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition: FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117