LLVM 17.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
67#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CallingConv.h"
71#include "llvm/IR/Constant.h"
72#include "llvm/IR/Constants.h"
73#include "llvm/IR/DataLayout.h"
74#include "llvm/IR/DebugLoc.h"
77#include "llvm/IR/Function.h"
79#include "llvm/IR/GlobalValue.h"
80#include "llvm/IR/InlineAsm.h"
81#include "llvm/IR/InstrTypes.h"
82#include "llvm/IR/Instruction.h"
85#include "llvm/IR/LLVMContext.h"
86#include "llvm/IR/Mangler.h"
87#include "llvm/IR/Metadata.h"
88#include "llvm/IR/Operator.h"
90#include "llvm/IR/Type.h"
91#include "llvm/IR/User.h"
92#include "llvm/IR/Value.h"
93#include "llvm/MC/MCContext.h"
94#include "llvm/MC/MCInstrDesc.h"
96#include "llvm/Support/Debug.h"
103#include <algorithm>
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <optional>
108#include <utility>
109
110using namespace llvm;
111using namespace PatternMatch;
112
113#define DEBUG_TYPE "isel"
114
115STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121/// Set the current block to which generated machine instructions will be
122/// appended.
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
134}
135
136void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156}
157
158/// Return the defined register if this instruction defines exactly one
159/// virtual register and uses no other virtual registers. Otherwise return 0.
161 Register RegDef;
162 for (const MachineOperand &MO : MI.operands()) {
163 if (!MO.isReg())
164 continue;
165 if (MO.isDef()) {
166 if (RegDef)
167 return Register();
168 RegDef = MO.getReg();
169 } else if (MO.getReg().isVirtual()) {
170 // This is another use of a vreg. Don't delete it.
171 return Register();
172 }
173 }
174 return RegDef;
175}
176
177static bool isRegUsedByPhiNodes(Register DefReg,
178 FunctionLoweringInfo &FuncInfo) {
179 for (auto &P : FuncInfo.PHINodesToUpdate)
180 if (P.second == DefReg)
181 return true;
182 return false;
183}
184
185void FastISel::flushLocalValueMap() {
186 // If FastISel bails out, it could leave local value instructions behind
187 // that aren't used for anything. Detect and erase those.
189 // Save the first instruction after local values, for later.
191 ++FirstNonValue;
192
195 : FuncInfo.MBB->rend();
197 for (MachineInstr &LocalMI :
199 Register DefReg = findLocalRegDef(LocalMI);
200 if (!DefReg)
201 continue;
202 if (FuncInfo.RegsWithFixups.count(DefReg))
203 continue;
204 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
205 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
206 if (EmitStartPt == &LocalMI)
208 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
209 << LocalMI);
210 LocalMI.eraseFromParent();
211 }
212 }
213
214 if (FirstNonValue != FuncInfo.MBB->end()) {
215 // See if there are any local value instructions left. If so, we want to
216 // make sure the first one has a debug location; if it doesn't, use the
217 // first non-value instruction's debug location.
218
219 // If EmitStartPt is non-null, this block had copies at the top before
220 // FastISel started doing anything; it points to the last one, so the
221 // first local value instruction is the one after EmitStartPt.
222 // If EmitStartPt is null, the first local value instruction is at the
223 // top of the block.
224 MachineBasicBlock::iterator FirstLocalValue =
226 : FuncInfo.MBB->begin();
227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
229 }
230 }
231
232 LocalValueMap.clear();
235 SavedInsertPt = FuncInfo.InsertPt;
236}
237
239 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
240 // Don't handle non-simple values in FastISel.
241 if (!RealVT.isSimple())
242 return Register();
243
244 // Ignore illegal types. We must do this before looking up the value
245 // in ValueMap because Arguments are given virtual registers regardless
246 // of whether FastISel can handle them.
247 MVT VT = RealVT.getSimpleVT();
248 if (!TLI.isTypeLegal(VT)) {
249 // Handle integer promotions, though, because they're common and easy.
250 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
252 else
253 return Register();
254 }
255
256 // Look up the value to see if we already have a register for it.
258 if (Reg)
259 return Reg;
260
261 // In bottom-up mode, just create the virtual register which will be used
262 // to hold the value. It will be materialized later.
263 if (isa<Instruction>(V) &&
264 (!isa<AllocaInst>(V) ||
265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
267
268 SavePoint SaveInsertPt = enterLocalValueArea();
269
270 // Materialize the value in a register. Emit any instructions in the
271 // local value area.
272 Reg = materializeRegForValue(V, VT);
273
274 leaveLocalValueArea(SaveInsertPt);
275
276 return Reg;
277}
278
279Register FastISel::materializeConstant(const Value *V, MVT VT) {
280 Register Reg;
281 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
282 if (CI->getValue().getActiveBits() <= 64)
283 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
284 } else if (isa<AllocaInst>(V))
285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286 else if (isa<ConstantPointerNull>(V))
287 // Translate this as an integer zero so that it can be
288 // local-CSE'd with actual integer zeros.
289 Reg =
291 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
292 if (CF->isNullValue())
293 Reg = fastMaterializeFloatZero(CF);
294 else
295 // Try to emit the constant directly.
296 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
297
298 if (!Reg) {
299 // Try to emit the constant by using an integer constant with a cast.
300 const APFloat &Flt = CF->getValueAPF();
301 EVT IntVT = TLI.getPointerTy(DL);
302 uint32_t IntBitWidth = IntVT.getSizeInBits();
303 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
304 bool isExact;
305 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
306 if (isExact) {
307 Register IntegerReg =
309 if (IntegerReg)
310 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
311 IntegerReg);
312 }
313 }
314 } else if (const auto *Op = dyn_cast<Operator>(V)) {
315 if (!selectOperator(Op, Op->getOpcode()))
316 if (!isa<Instruction>(Op) ||
317 !fastSelectInstruction(cast<Instruction>(Op)))
318 return 0;
320 } else if (isa<UndefValue>(V)) {
323 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
324 }
325 return Reg;
326}
327
328/// Helper for getRegForValue. This function is called when the value isn't
329/// already available in a register and must be materialized with new
330/// instructions.
331Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
333 // Give the target-specific code a try first.
334 if (isa<Constant>(V))
335 Reg = fastMaterializeConstant(cast<Constant>(V));
336
337 // If target-specific code couldn't or didn't want to handle the value, then
338 // give target-independent code a try.
339 if (!Reg)
340 Reg = materializeConstant(V, VT);
341
342 // Don't cache constant materializations in the general ValueMap.
343 // To do so would require tracking what uses they dominate.
344 if (Reg) {
345 LocalValueMap[V] = Reg;
347 }
348 return Reg;
349}
350
352 // Look up the value to see if we already have a register for it. We
353 // cache values defined by Instructions across blocks, and other values
354 // only locally. This is because Instructions already have the SSA
355 // def-dominates-use requirement enforced.
357 if (I != FuncInfo.ValueMap.end())
358 return I->second;
359 return LocalValueMap[V];
360}
361
362void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
363 if (!isa<Instruction>(I)) {
364 LocalValueMap[I] = Reg;
365 return;
366 }
367
368 Register &AssignedReg = FuncInfo.ValueMap[I];
369 if (!AssignedReg)
370 // Use the new register.
371 AssignedReg = Reg;
372 else if (Reg != AssignedReg) {
373 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
374 for (unsigned i = 0; i < NumRegs; i++) {
375 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
376 FuncInfo.RegsWithFixups.insert(Reg + i);
377 }
378
379 AssignedReg = Reg;
380 }
381}
382
385 if (!IdxN)
386 // Unhandled operand. Halt "fast" selection and bail.
387 return Register();
388
389 // If the index is smaller or larger than intptr_t, truncate or extend it.
390 MVT PtrVT = TLI.getPointerTy(DL);
391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392 if (IdxVT.bitsLT(PtrVT)) {
393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394 } else if (IdxVT.bitsGT(PtrVT)) {
395 IdxN =
396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397 }
398 return IdxN;
399}
400
402 if (getLastLocalValue()) {
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
406 } else
408}
409
412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413 "Invalid iterator!");
414 while (I != E) {
415 if (SavedInsertPt == I)
416 SavedInsertPt = E;
417 if (EmitStartPt == I)
418 EmitStartPt = E.isValid() ? &*E : nullptr;
419 if (LastLocalValue == I)
420 LastLocalValue = E.isValid() ? &*E : nullptr;
421
422 MachineInstr *Dead = &*I;
423 ++I;
424 Dead->eraseFromParent();
425 ++NumFastIselDead;
426 }
428}
429
431 SavePoint OldInsertPt = FuncInfo.InsertPt;
433 return OldInsertPt;
434}
435
438 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439
440 // Restore the previous insert position.
441 FuncInfo.InsertPt = OldInsertPt;
442}
443
444bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446 if (VT == MVT::Other || !VT.isSimple())
447 // Unhandled type. Halt "fast" selection and bail.
448 return false;
449
450 // We only handle legal types. For example, on x86-32 the instruction
451 // selector contains all of the 64-bit instructions from x86-64,
452 // under the assumption that i64 won't be used if the target doesn't
453 // support it.
454 if (!TLI.isTypeLegal(VT)) {
455 // MVT::i1 is special. Allow AND, OR, or XOR because they
456 // don't require additional zeroing, which makes them easy.
457 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
458 ISDOpcode == ISD::XOR))
459 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
460 else
461 return false;
462 }
463
464 // Check if the first operand is a constant, and handle it as "ri". At -O0,
465 // we don't have anything that canonicalizes operand order.
466 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
467 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
468 Register Op1 = getRegForValue(I->getOperand(1));
469 if (!Op1)
470 return false;
471
472 Register ResultReg =
473 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
474 VT.getSimpleVT());
475 if (!ResultReg)
476 return false;
477
478 // We successfully emitted code for the given LLVM Instruction.
479 updateValueMap(I, ResultReg);
480 return true;
481 }
482
483 Register Op0 = getRegForValue(I->getOperand(0));
484 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
485 return false;
486
487 // Check if the second operand is a constant and handle it appropriately.
488 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
489 uint64_t Imm = CI->getSExtValue();
490
491 // Transform "sdiv exact X, 8" -> "sra X, 3".
492 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
493 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
494 Imm = Log2_64(Imm);
495 ISDOpcode = ISD::SRA;
496 }
497
498 // Transform "urem x, pow2" -> "and x, pow2-1".
499 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
500 isPowerOf2_64(Imm)) {
501 --Imm;
502 ISDOpcode = ISD::AND;
503 }
504
505 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
506 VT.getSimpleVT());
507 if (!ResultReg)
508 return false;
509
510 // We successfully emitted code for the given LLVM Instruction.
511 updateValueMap(I, ResultReg);
512 return true;
513 }
514
515 Register Op1 = getRegForValue(I->getOperand(1));
516 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
517 return false;
518
519 // Now we have both operands in registers. Emit the instruction.
520 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
521 ISDOpcode, Op0, Op1);
522 if (!ResultReg)
523 // Target-specific code wasn't able to find a machine opcode for
524 // the given ISD opcode and type. Halt "fast" selection and bail.
525 return false;
526
527 // We successfully emitted code for the given LLVM Instruction.
528 updateValueMap(I, ResultReg);
529 return true;
530}
531
533 Register N = getRegForValue(I->getOperand(0));
534 if (!N) // Unhandled operand. Halt "fast" selection and bail.
535 return false;
536
537 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
538 // and bail.
539 if (isa<VectorType>(I->getType()))
540 return false;
541
542 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
543 // into a single N = N + TotalOffset.
544 uint64_t TotalOffs = 0;
545 // FIXME: What's a good SWAG number for MaxOffs?
546 uint64_t MaxOffs = 2048;
547 MVT VT = TLI.getPointerTy(DL);
549 GTI != E; ++GTI) {
550 const Value *Idx = GTI.getOperand();
551 if (StructType *StTy = GTI.getStructTypeOrNull()) {
552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553 if (Field) {
554 // N = N + Offset
555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556 if (TotalOffs >= MaxOffs) {
557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558 if (!N) // Unhandled operand. Halt "fast" selection and bail.
559 return false;
560 TotalOffs = 0;
561 }
562 }
563 } else {
564 Type *Ty = GTI.getIndexedType();
565
566 // If this is a constant subscript, handle it quickly.
567 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
568 if (CI->isZero())
569 continue;
570 // N = N + Offset
571 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
572 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
573 if (TotalOffs >= MaxOffs) {
574 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
575 if (!N) // Unhandled operand. Halt "fast" selection and bail.
576 return false;
577 TotalOffs = 0;
578 }
579 continue;
580 }
581 if (TotalOffs) {
582 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
583 if (!N) // Unhandled operand. Halt "fast" selection and bail.
584 return false;
585 TotalOffs = 0;
586 }
587
588 // N = N + Idx * ElementSize;
589 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
591 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
592 return false;
593
594 if (ElementSize != 1) {
595 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
596 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
597 return false;
598 }
599 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
600 if (!N) // Unhandled operand. Halt "fast" selection and bail.
601 return false;
602 }
603 }
604 if (TotalOffs) {
605 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
606 if (!N) // Unhandled operand. Halt "fast" selection and bail.
607 return false;
608 }
609
610 // We successfully emitted code for the given LLVM Instruction.
612 return true;
613}
614
615bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
616 const CallInst *CI, unsigned StartIdx) {
617 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
618 Value *Val = CI->getArgOperand(i);
619 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
620 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
621 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
622 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
623 } else if (isa<ConstantPointerNull>(Val)) {
624 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
626 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
627 // Values coming from a stack location also require a special encoding,
628 // but that is added later on by the target specific frame index
629 // elimination implementation.
630 auto SI = FuncInfo.StaticAllocaMap.find(AI);
631 if (SI != FuncInfo.StaticAllocaMap.end())
633 else
634 return false;
635 } else {
637 if (!Reg)
638 return false;
639 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
640 }
641 }
642 return true;
643}
644
646 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
647 // [live variables...])
648 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
649 "Stackmap cannot return a value.");
650
651 // The stackmap intrinsic only records the live variables (the arguments
652 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
653 // intrinsic, this won't be lowered to a function call. This means we don't
654 // have to worry about calling conventions and target-specific lowering code.
655 // Instead we perform the call lowering right here.
656 //
657 // CALLSEQ_START(0, 0...)
658 // STACKMAP(id, nbytes, ...)
659 // CALLSEQ_END(0, 0)
660 //
662
663 // Add the <id> and <numBytes> constants.
664 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
665 "Expected a constant integer.");
666 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
667 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
668
669 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
670 "Expected a constant integer.");
671 const auto *NumBytes =
672 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
673 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
674
675 // Push live variables for the stack map (skipping the first two arguments
676 // <id> and <numBytes>).
677 if (!addStackMapLiveVars(Ops, I, 2))
678 return false;
679
680 // We are not adding any register mask info here, because the stackmap doesn't
681 // clobber anything.
682
683 // Add scratch registers as implicit def and early clobber.
684 CallingConv::ID CC = I->getCallingConv();
685 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
686 for (unsigned i = 0; ScratchRegs[i]; ++i)
688 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
689 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
690
691 // Issue CALLSEQ_START
692 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
693 auto Builder =
694 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
695 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
696 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
697 Builder.addImm(0);
698
699 // Issue STACKMAP.
701 TII.get(TargetOpcode::STACKMAP));
702 for (auto const &MO : Ops)
703 MIB.add(MO);
704
705 // Issue CALLSEQ_END
706 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
708 .addImm(0)
709 .addImm(0);
710
711 // Inform the Frame Information that we have a stackmap in this function.
713
714 return true;
715}
716
717/// Lower an argument list according to the target calling convention.
718///
719/// This is a helper for lowering intrinsics that follow a target calling
720/// convention or require stack pointer adjustment. Only a subset of the
721/// intrinsic's operands need to participate in the calling convention.
722bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
723 unsigned NumArgs, const Value *Callee,
724 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
725 ArgListTy Args;
726 Args.reserve(NumArgs);
727
728 // Populate the argument list.
729 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
730 Value *V = CI->getOperand(ArgI);
731
732 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
733
734 ArgListEntry Entry;
735 Entry.Val = V;
736 Entry.Ty = V->getType();
737 Entry.setAttributes(CI, ArgI);
738 Args.push_back(Entry);
739 }
740
741 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
742 : CI->getType();
743 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
744
745 return lowerCallTo(CLI);
746}
747
749 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
750 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
751 SmallString<32> MangledName;
752 Mangler::getNameWithPrefix(MangledName, Target, DL);
753 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
754 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
755}
756
758 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
759 // i32 <numBytes>,
760 // i8* <target>,
761 // i32 <numArgs>,
762 // [Args...],
763 // [live variables...])
764 CallingConv::ID CC = I->getCallingConv();
765 bool IsAnyRegCC = CC == CallingConv::AnyReg;
766 bool HasDef = !I->getType()->isVoidTy();
767 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
768
769 // Get the real number of arguments participating in the call <numArgs>
770 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
771 "Expected a constant integer.");
772 const auto *NumArgsVal =
773 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
774 unsigned NumArgs = NumArgsVal->getZExtValue();
775
776 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
777 // This includes all meta-operands up to but not including CC.
778 unsigned NumMetaOpers = PatchPointOpers::CCPos;
779 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
780 "Not enough arguments provided to the patchpoint intrinsic");
781
782 // For AnyRegCC the arguments are lowered later on manually.
783 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
785 CLI.setIsPatchPoint();
786 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
787 return false;
788
789 assert(CLI.Call && "No call instruction specified.");
790
792
793 // Add an explicit result reg if we use the anyreg calling convention.
794 if (IsAnyRegCC && HasDef) {
795 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
797 CLI.NumResultRegs = 1;
798 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
799 }
800
801 // Add the <id> and <numBytes> constants.
802 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
803 "Expected a constant integer.");
804 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
805 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
806
807 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
808 "Expected a constant integer.");
809 const auto *NumBytes =
810 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
811 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
812
813 // Add the call target.
814 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
815 uint64_t CalleeConstAddr =
816 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
817 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
818 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
819 if (C->getOpcode() == Instruction::IntToPtr) {
820 uint64_t CalleeConstAddr =
821 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823 } else
824 llvm_unreachable("Unsupported ConstantExpr.");
825 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
827 } else if (isa<ConstantPointerNull>(Callee))
829 else
830 llvm_unreachable("Unsupported callee address.");
831
832 // Adjust <numArgs> to account for any arguments that have been passed on
833 // the stack instead.
834 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
835 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
836
837 // Add the calling convention
838 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
839
840 // Add the arguments we omitted previously. The register allocator should
841 // place these in any free register.
842 if (IsAnyRegCC) {
843 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
844 Register Reg = getRegForValue(I->getArgOperand(i));
845 if (!Reg)
846 return false;
847 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
848 }
849 }
850
851 // Push the arguments from the call instruction.
852 for (auto Reg : CLI.OutRegs)
853 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
854
855 // Push live variables for the stack map.
856 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
857 return false;
858
859 // Push the register mask info.
862
863 // Add scratch registers as implicit def and early clobber.
864 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
865 for (unsigned i = 0; ScratchRegs[i]; ++i)
867 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
868 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
869
870 // Add implicit defs (return values).
871 for (auto Reg : CLI.InRegs)
872 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
873 /*isImp=*/true));
874
875 // Insert the patchpoint instruction before the call generated by the target.
877 TII.get(TargetOpcode::PATCHPOINT));
878
879 for (auto &MO : Ops)
880 MIB.add(MO);
881
883
884 // Delete the original call instruction.
885 CLI.Call->eraseFromParent();
886
887 // Inform the Frame Information that we have a patchpoint in this function.
889
890 if (CLI.NumResultRegs)
892 return true;
893}
894
896 const auto &Triple = TM.getTargetTriple();
898 return true; // don't do anything to this instruction.
901 /*isDef=*/false));
903 /*isDef=*/false));
906 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
907 for (auto &MO : Ops)
908 MIB.add(MO);
909
910 // Insert the Patchable Event Call instruction, that gets lowered properly.
911 return true;
912}
913
915 const auto &Triple = TM.getTargetTriple();
917 return true; // don't do anything to this instruction.
920 /*isDef=*/false));
922 /*isDef=*/false));
924 /*isDef=*/false));
927 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
928 for (auto &MO : Ops)
929 MIB.add(MO);
930
931 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
932 return true;
933}
934
935/// Returns an AttributeList representing the attributes applied to the return
936/// value of the given call.
939 if (CLI.RetSExt)
940 Attrs.push_back(Attribute::SExt);
941 if (CLI.RetZExt)
942 Attrs.push_back(Attribute::ZExt);
943 if (CLI.IsInReg)
944 Attrs.push_back(Attribute::InReg);
945
947 Attrs);
948}
949
950bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
951 unsigned NumArgs) {
952 MCContext &Ctx = MF->getContext();
953 SmallString<32> MangledName;
954 Mangler::getNameWithPrefix(MangledName, SymName, DL);
955 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
956 return lowerCallTo(CI, Sym, NumArgs);
957}
958
960 unsigned NumArgs) {
961 FunctionType *FTy = CI->getFunctionType();
962 Type *RetTy = CI->getType();
963
964 ArgListTy Args;
965 Args.reserve(NumArgs);
966
967 // Populate the argument list.
968 // Attributes for args start at offset 1, after the return attribute.
969 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
970 Value *V = CI->getOperand(ArgI);
971
972 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
973
974 ArgListEntry Entry;
975 Entry.Val = V;
976 Entry.Ty = V->getType();
977 Entry.setAttributes(CI, ArgI);
978 Args.push_back(Entry);
979 }
981
983 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
984
985 return lowerCallTo(CLI);
986}
987
989 // Handle the incoming return values from the call.
990 CLI.clearIns();
991 SmallVector<EVT, 4> RetTys;
992 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
993
995 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
996
997 bool CanLowerReturn = TLI.CanLowerReturn(
998 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
999
1000 // FIXME: sret demotion isn't supported yet - bail out.
1001 if (!CanLowerReturn)
1002 return false;
1003
1004 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1005 EVT VT = RetTys[I];
1006 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1007 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1008 for (unsigned i = 0; i != NumRegs; ++i) {
1009 ISD::InputArg MyFlags;
1010 MyFlags.VT = RegisterVT;
1011 MyFlags.ArgVT = VT;
1012 MyFlags.Used = CLI.IsReturnValueUsed;
1013 if (CLI.RetSExt)
1014 MyFlags.Flags.setSExt();
1015 if (CLI.RetZExt)
1016 MyFlags.Flags.setZExt();
1017 if (CLI.IsInReg)
1018 MyFlags.Flags.setInReg();
1019 CLI.Ins.push_back(MyFlags);
1020 }
1021 }
1022
1023 // Handle all of the outgoing arguments.
1024 CLI.clearOuts();
1025 for (auto &Arg : CLI.getArgs()) {
1026 Type *FinalType = Arg.Ty;
1027 if (Arg.IsByVal)
1028 FinalType = Arg.IndirectType;
1030 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1031
1032 ISD::ArgFlagsTy Flags;
1033 if (Arg.IsZExt)
1034 Flags.setZExt();
1035 if (Arg.IsSExt)
1036 Flags.setSExt();
1037 if (Arg.IsInReg)
1038 Flags.setInReg();
1039 if (Arg.IsSRet)
1040 Flags.setSRet();
1041 if (Arg.IsSwiftSelf)
1042 Flags.setSwiftSelf();
1043 if (Arg.IsSwiftAsync)
1044 Flags.setSwiftAsync();
1045 if (Arg.IsSwiftError)
1046 Flags.setSwiftError();
1047 if (Arg.IsCFGuardTarget)
1048 Flags.setCFGuardTarget();
1049 if (Arg.IsByVal)
1050 Flags.setByVal();
1051 if (Arg.IsInAlloca) {
1052 Flags.setInAlloca();
1053 // Set the byval flag for CCAssignFn callbacks that don't know about
1054 // inalloca. This way we can know how many bytes we should've allocated
1055 // and how many bytes a callee cleanup function will pop. If we port
1056 // inalloca to more targets, we'll have to add custom inalloca handling in
1057 // the various CC lowering callbacks.
1058 Flags.setByVal();
1059 }
1060 if (Arg.IsPreallocated) {
1061 Flags.setPreallocated();
1062 // Set the byval flag for CCAssignFn callbacks that don't know about
1063 // preallocated. This way we can know how many bytes we should've
1064 // allocated and how many bytes a callee cleanup function will pop. If we
1065 // port preallocated to more targets, we'll have to add custom
1066 // preallocated handling in the various CC lowering callbacks.
1067 Flags.setByVal();
1068 }
1069 MaybeAlign MemAlign = Arg.Alignment;
1070 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1071 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1072
1073 // For ByVal, alignment should come from FE. BE will guess if this info
1074 // is not there, but there are cases it cannot get right.
1075 if (!MemAlign)
1076 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1077 Flags.setByValSize(FrameSize);
1078 } else if (!MemAlign) {
1079 MemAlign = DL.getABITypeAlign(Arg.Ty);
1080 }
1081 Flags.setMemAlign(*MemAlign);
1082 if (Arg.IsNest)
1083 Flags.setNest();
1084 if (NeedsRegBlock)
1085 Flags.setInConsecutiveRegs();
1086 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1087 CLI.OutVals.push_back(Arg.Val);
1088 CLI.OutFlags.push_back(Flags);
1089 }
1090
1091 if (!fastLowerCall(CLI))
1092 return false;
1093
1094 // Set all unused physreg defs as dead.
1095 assert(CLI.Call && "No call instruction specified.");
1097
1098 if (CLI.NumResultRegs && CLI.CB)
1100
1101 // Set labels for heapallocsite call.
1102 if (CLI.CB)
1103 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1104 CLI.Call->setHeapAllocMarker(*MF, MD);
1105
1106 return true;
1107}
1108
1110 FunctionType *FuncTy = CI->getFunctionType();
1111 Type *RetTy = CI->getType();
1112
1113 ArgListTy Args;
1114 ArgListEntry Entry;
1115 Args.reserve(CI->arg_size());
1116
1117 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1118 Value *V = *i;
1119
1120 // Skip empty types
1121 if (V->getType()->isEmptyTy())
1122 continue;
1123
1124 Entry.Val = V;
1125 Entry.Ty = V->getType();
1126
1127 // Skip the first return-type Attribute to get to params.
1128 Entry.setAttributes(CI, i - CI->arg_begin());
1129 Args.push_back(Entry);
1130 }
1131
1132 // Check if target-independent constraints permit a tail call here.
1133 // Target-dependent constraints are checked within fastLowerCall.
1134 bool IsTailCall = CI->isTailCall();
1135 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1136 IsTailCall = false;
1137 if (IsTailCall && !CI->isMustTailCall() &&
1138 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1139 IsTailCall = false;
1140
1141 CallLoweringInfo CLI;
1142 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1143 .setTailCall(IsTailCall);
1144
1145 diagnoseDontCall(*CI);
1146
1147 return lowerCallTo(CLI);
1148}
1149
1151 const CallInst *Call = cast<CallInst>(I);
1152
1153 // Handle simple inline asms.
1154 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1155 // Don't attempt to handle constraints.
1156 if (!IA->getConstraintString().empty())
1157 return false;
1158
1159 unsigned ExtraInfo = 0;
1160 if (IA->hasSideEffects())
1162 if (IA->isAlignStack())
1163 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1164 if (Call->isConvergent())
1165 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1166 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1167
1169 TII.get(TargetOpcode::INLINEASM));
1170 MIB.addExternalSymbol(IA->getAsmString().c_str());
1171 MIB.addImm(ExtraInfo);
1172
1173 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1174 if (SrcLoc)
1175 MIB.addMetadata(SrcLoc);
1176
1177 return true;
1178 }
1179
1180 // Handle intrinsic function calls.
1181 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1182 return selectIntrinsicCall(II);
1183
1184 return lowerCall(Call);
1185}
1186
1188 switch (II->getIntrinsicID()) {
1189 default:
1190 break;
1191 // At -O0 we don't care about the lifetime intrinsics.
1192 case Intrinsic::lifetime_start:
1193 case Intrinsic::lifetime_end:
1194 // The donothing intrinsic does, well, nothing.
1195 case Intrinsic::donothing:
1196 // Neither does the sideeffect intrinsic.
1197 case Intrinsic::sideeffect:
1198 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1199 case Intrinsic::assume:
1200 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1201 case Intrinsic::experimental_noalias_scope_decl:
1202 return true;
1203 case Intrinsic::dbg_declare: {
1204 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1205 assert(DI->getVariable() && "Missing variable");
1206 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1207 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1208 << " (!hasDebugInfo)\n");
1209 return true;
1210 }
1211
1212 const Value *Address = DI->getAddress();
1213 if (!Address || isa<UndefValue>(Address)) {
1214 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1215 << " (bad/undef address)\n");
1216 return true;
1217 }
1218
1219 // Byval arguments with frame indices were already handled after argument
1220 // lowering and before isel.
1221 const auto *Arg =
1222 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1223 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1224 return true;
1225
1226 std::optional<MachineOperand> Op;
1228 Op = MachineOperand::CreateReg(Reg, false);
1229
1230 // If we have a VLA that has a "use" in a metadata node that's then used
1231 // here but it has no other uses, then we have a problem. E.g.,
1232 //
1233 // int foo (const int *x) {
1234 // char a[*x];
1235 // return 0;
1236 // }
1237 //
1238 // If we assign 'a' a vreg and fast isel later on has to use the selection
1239 // DAG isel, it will want to copy the value to the vreg. However, there are
1240 // no uses, which goes counter to what selection DAG isel expects.
1241 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1242 (!isa<AllocaInst>(Address) ||
1243 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1245 false);
1246
1247 if (Op) {
1249 "Expected inlined-at fields to agree");
1250 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1251 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1252 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1253 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1255 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1258 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1259 DI->getVariable(), NewExpr);
1260 } else {
1261 // A dbg.declare describes the address of a source variable, so lower it
1262 // into an indirect DBG_VALUE.
1264 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op,
1265 DI->getVariable(), DI->getExpression());
1266 }
1267 } else {
1268 // We can't yet handle anything else here because it would require
1269 // generating code, thus altering codegen because of debug info.
1270 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1271 << " (no materialized reg for address)\n");
1272 }
1273 return true;
1274 }
1275 case Intrinsic::dbg_value: {
1276 // This form of DBG_VALUE is target-independent.
1277 const DbgValueInst *DI = cast<DbgValueInst>(II);
1278 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1279 const Value *V = DI->getValue();
1281 "Expected inlined-at fields to agree");
1282 if (!V || isa<UndefValue>(V) || DI->hasArgList()) {
1283 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1284 // undef DBG_VALUE to terminate any prior location.
1285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, false, 0U,
1286 DI->getVariable(), DI->getExpression());
1287 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1288 // See if there's an expression to constant-fold.
1289 DIExpression *Expr = DI->getExpression();
1290 if (Expr)
1291 std::tie(Expr, CI) = Expr->constantFold(CI);
1292 if (CI->getBitWidth() > 64)
1294 .addCImm(CI)
1295 .addImm(0U)
1296 .addMetadata(DI->getVariable())
1297 .addMetadata(Expr);
1298 else
1300 .addImm(CI->getZExtValue())
1301 .addImm(0U)
1302 .addMetadata(DI->getVariable())
1303 .addMetadata(Expr);
1304 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1306 .addFPImm(CF)
1307 .addImm(0U)
1308 .addMetadata(DI->getVariable())
1309 .addMetadata(DI->getExpression());
1310 } else if (Register Reg = lookUpRegForValue(V)) {
1311 // FIXME: This does not handle register-indirect values at offset 0.
1312 if (!FuncInfo.MF->useDebugInstrRef()) {
1313 bool IsIndirect = false;
1314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, IsIndirect,
1315 Reg, DI->getVariable(), DI->getExpression());
1316 } else {
1317 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1318 // to be later patched up by finalizeDebugInstrRefs.
1320 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1321 /* isKill */ false, /* isDead */ false,
1322 /* isUndef */ false, /* isEarlyClobber */ false,
1323 /* SubReg */ 0, /* isDebug */ true)});
1327 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1328 DI->getVariable(), NewExpr);
1329 }
1330 } else {
1331 // We don't know how to handle other cases, so we drop.
1332 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1333 }
1334 return true;
1335 }
1336 case Intrinsic::dbg_label: {
1337 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1338 assert(DI->getLabel() && "Missing label");
1339 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1340 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1341 return true;
1342 }
1343
1345 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1346 return true;
1347 }
1348 case Intrinsic::objectsize:
1349 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1350
1351 case Intrinsic::is_constant:
1352 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1353
1354 case Intrinsic::launder_invariant_group:
1355 case Intrinsic::strip_invariant_group:
1356 case Intrinsic::expect: {
1357 Register ResultReg = getRegForValue(II->getArgOperand(0));
1358 if (!ResultReg)
1359 return false;
1360 updateValueMap(II, ResultReg);
1361 return true;
1362 }
1363 case Intrinsic::experimental_stackmap:
1364 return selectStackmap(II);
1365 case Intrinsic::experimental_patchpoint_void:
1366 case Intrinsic::experimental_patchpoint_i64:
1367 return selectPatchpoint(II);
1368
1369 case Intrinsic::xray_customevent:
1370 return selectXRayCustomEvent(II);
1371 case Intrinsic::xray_typedevent:
1372 return selectXRayTypedEvent(II);
1373 }
1374
1375 return fastLowerIntrinsicCall(II);
1376}
1377
1378bool FastISel::selectCast(const User *I, unsigned Opcode) {
1379 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1380 EVT DstVT = TLI.getValueType(DL, I->getType());
1381
1382 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1383 !DstVT.isSimple())
1384 // Unhandled type. Halt "fast" selection and bail.
1385 return false;
1386
1387 // Check if the destination type is legal.
1388 if (!TLI.isTypeLegal(DstVT))
1389 return false;
1390
1391 // Check if the source operand is legal.
1392 if (!TLI.isTypeLegal(SrcVT))
1393 return false;
1394
1395 Register InputReg = getRegForValue(I->getOperand(0));
1396 if (!InputReg)
1397 // Unhandled operand. Halt "fast" selection and bail.
1398 return false;
1399
1400 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1401 Opcode, InputReg);
1402 if (!ResultReg)
1403 return false;
1404
1405 updateValueMap(I, ResultReg);
1406 return true;
1407}
1408
1410 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1411 EVT DstEVT = TLI.getValueType(DL, I->getType());
1412 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1413 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1414 // Unhandled type. Halt "fast" selection and bail.
1415 return false;
1416
1417 MVT SrcVT = SrcEVT.getSimpleVT();
1418 MVT DstVT = DstEVT.getSimpleVT();
1419 Register Op0 = getRegForValue(I->getOperand(0));
1420 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1421 return false;
1422
1423 // If the bitcast doesn't change the type, just use the operand value.
1424 if (SrcVT == DstVT) {
1425 updateValueMap(I, Op0);
1426 return true;
1427 }
1428
1429 // Otherwise, select a BITCAST opcode.
1430 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1431 if (!ResultReg)
1432 return false;
1433
1434 updateValueMap(I, ResultReg);
1435 return true;
1436}
1437
1439 Register Reg = getRegForValue(I->getOperand(0));
1440 if (!Reg)
1441 // Unhandled operand.
1442 return false;
1443
1444 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1445 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1446 // Unhandled type, bail out.
1447 return false;
1448
1449 MVT Ty = ETy.getSimpleVT();
1450 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1451 Register ResultReg = createResultReg(TyRegClass);
1453 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1454
1455 updateValueMap(I, ResultReg);
1456 return true;
1457}
1458
1459// Remove local value instructions starting from the instruction after
1460// SavedLastLocalValue to the current function insert point.
1461void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1462{
1463 MachineInstr *CurLastLocalValue = getLastLocalValue();
1464 if (CurLastLocalValue != SavedLastLocalValue) {
1465 // Find the first local value instruction to be deleted.
1466 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1467 // Otherwise it's the first instruction in the block.
1468 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1469 if (SavedLastLocalValue)
1470 ++FirstDeadInst;
1471 else
1472 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1473 setLastLocalValue(SavedLastLocalValue);
1474 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1475 }
1476}
1477
1479 // Flush the local value map before starting each instruction.
1480 // This improves locality and debugging, and can reduce spills.
1481 // Reuse of values across IR instructions is relatively uncommon.
1482 flushLocalValueMap();
1483
1484 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1485 // Just before the terminator instruction, insert instructions to
1486 // feed PHI nodes in successor blocks.
1487 if (I->isTerminator()) {
1488 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1489 // PHI node handling may have generated local value instructions,
1490 // even though it failed to handle all PHI nodes.
1491 // We remove these instructions because SelectionDAGISel will generate
1492 // them again.
1493 removeDeadLocalValueCode(SavedLastLocalValue);
1494 return false;
1495 }
1496 }
1497
1498 // FastISel does not handle any operand bundles except OB_funclet.
1499 if (auto *Call = dyn_cast<CallBase>(I))
1500 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1501 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1502 return false;
1503
1504 MIMD = MIMetadata(*I);
1505
1506 SavedInsertPt = FuncInfo.InsertPt;
1507
1508 if (const auto *Call = dyn_cast<CallInst>(I)) {
1509 const Function *F = Call->getCalledFunction();
1510 LibFunc Func;
1511
1512 // As a special case, don't handle calls to builtin library functions that
1513 // may be translated directly to target instructions.
1514 if (F && !F->hasLocalLinkage() && F->hasName() &&
1515 LibInfo->getLibFunc(F->getName(), Func) &&
1517 return false;
1518
1519 // Don't handle Intrinsic::trap if a trap function is specified.
1520 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1521 Call->hasFnAttr("trap-func-name"))
1522 return false;
1523 }
1524
1525 // First, try doing target-independent selection.
1527 if (selectOperator(I, I->getOpcode())) {
1528 ++NumFastIselSuccessIndependent;
1529 MIMD = {};
1530 return true;
1531 }
1532 // Remove dead code.
1534 if (SavedInsertPt != FuncInfo.InsertPt)
1535 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1536 SavedInsertPt = FuncInfo.InsertPt;
1537 }
1538 // Next, try calling the target to attempt to handle the instruction.
1539 if (fastSelectInstruction(I)) {
1540 ++NumFastIselSuccessTarget;
1541 MIMD = {};
1542 return true;
1543 }
1544 // Remove dead code.
1546 if (SavedInsertPt != FuncInfo.InsertPt)
1547 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1548
1549 MIMD = {};
1550 // Undo phi node updates, because they will be added again by SelectionDAG.
1551 if (I->isTerminator()) {
1552 // PHI node handling may have generated local value instructions.
1553 // We remove them because SelectionDAGISel will generate them again.
1554 removeDeadLocalValueCode(SavedLastLocalValue);
1556 }
1557 return false;
1558}
1559
1560/// Emit an unconditional branch to the given block, unless it is the immediate
1561/// (fall-through) successor, and update the CFG.
1563 const DebugLoc &DbgLoc) {
1565 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1566 // For more accurate line information if this is the only non-debug
1567 // instruction in the block then emit it, otherwise we have the
1568 // unconditional fall-through case, which needs no instructions.
1569 } else {
1570 // The unconditional branch case.
1571 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1573 }
1574 if (FuncInfo.BPI) {
1578 } else
1580}
1581
1583 MachineBasicBlock *TrueMBB,
1584 MachineBasicBlock *FalseMBB) {
1585 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1586 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1587 // successor/predecessor lists.
1588 if (TrueMBB != FalseMBB) {
1589 if (FuncInfo.BPI) {
1590 auto BranchProbability =
1591 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1593 } else
1595 }
1596
1597 fastEmitBranch(FalseMBB, MIMD.getDL());
1598}
1599
1600/// Emit an FNeg operation.
1601bool FastISel::selectFNeg(const User *I, const Value *In) {
1602 Register OpReg = getRegForValue(In);
1603 if (!OpReg)
1604 return false;
1605
1606 // If the target has ISD::FNEG, use it.
1607 EVT VT = TLI.getValueType(DL, I->getType());
1608 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1609 OpReg);
1610 if (ResultReg) {
1611 updateValueMap(I, ResultReg);
1612 return true;
1613 }
1614
1615 // Bitcast the value to integer, twiddle the sign bit with xor,
1616 // and then bitcast it back to floating-point.
1617 if (VT.getSizeInBits() > 64)
1618 return false;
1619 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1620 if (!TLI.isTypeLegal(IntVT))
1621 return false;
1622
1623 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1624 ISD::BITCAST, OpReg);
1625 if (!IntReg)
1626 return false;
1627
1628 Register IntResultReg = fastEmit_ri_(
1629 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1630 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1631 if (!IntResultReg)
1632 return false;
1633
1634 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1635 IntResultReg);
1636 if (!ResultReg)
1637 return false;
1638
1639 updateValueMap(I, ResultReg);
1640 return true;
1641}
1642
1644 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1645 if (!EVI)
1646 return false;
1647
1648 // Make sure we only try to handle extracts with a legal result. But also
1649 // allow i1 because it's easy.
1650 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1651 if (!RealVT.isSimple())
1652 return false;
1653 MVT VT = RealVT.getSimpleVT();
1654 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1655 return false;
1656
1657 const Value *Op0 = EVI->getOperand(0);
1658 Type *AggTy = Op0->getType();
1659
1660 // Get the base result register.
1661 unsigned ResultReg;
1663 if (I != FuncInfo.ValueMap.end())
1664 ResultReg = I->second;
1665 else if (isa<Instruction>(Op0))
1666 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1667 else
1668 return false; // fast-isel can't handle aggregate constants at the moment
1669
1670 // Get the actual result register, which is an offset from the base register.
1671 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1672
1673 SmallVector<EVT, 4> AggValueVTs;
1674 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1675
1676 for (unsigned i = 0; i < VTIndex; i++)
1677 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1678
1679 updateValueMap(EVI, ResultReg);
1680 return true;
1681}
1682
1683bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1684 switch (Opcode) {
1685 case Instruction::Add:
1686 return selectBinaryOp(I, ISD::ADD);
1687 case Instruction::FAdd:
1688 return selectBinaryOp(I, ISD::FADD);
1689 case Instruction::Sub:
1690 return selectBinaryOp(I, ISD::SUB);
1691 case Instruction::FSub:
1692 return selectBinaryOp(I, ISD::FSUB);
1693 case Instruction::Mul:
1694 return selectBinaryOp(I, ISD::MUL);
1695 case Instruction::FMul:
1696 return selectBinaryOp(I, ISD::FMUL);
1697 case Instruction::SDiv:
1698 return selectBinaryOp(I, ISD::SDIV);
1699 case Instruction::UDiv:
1700 return selectBinaryOp(I, ISD::UDIV);
1701 case Instruction::FDiv:
1702 return selectBinaryOp(I, ISD::FDIV);
1703 case Instruction::SRem:
1704 return selectBinaryOp(I, ISD::SREM);
1705 case Instruction::URem:
1706 return selectBinaryOp(I, ISD::UREM);
1707 case Instruction::FRem:
1708 return selectBinaryOp(I, ISD::FREM);
1709 case Instruction::Shl:
1710 return selectBinaryOp(I, ISD::SHL);
1711 case Instruction::LShr:
1712 return selectBinaryOp(I, ISD::SRL);
1713 case Instruction::AShr:
1714 return selectBinaryOp(I, ISD::SRA);
1715 case Instruction::And:
1716 return selectBinaryOp(I, ISD::AND);
1717 case Instruction::Or:
1718 return selectBinaryOp(I, ISD::OR);
1719 case Instruction::Xor:
1720 return selectBinaryOp(I, ISD::XOR);
1721
1722 case Instruction::FNeg:
1723 return selectFNeg(I, I->getOperand(0));
1724
1725 case Instruction::GetElementPtr:
1726 return selectGetElementPtr(I);
1727
1728 case Instruction::Br: {
1729 const BranchInst *BI = cast<BranchInst>(I);
1730
1731 if (BI->isUnconditional()) {
1732 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1733 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1734 fastEmitBranch(MSucc, BI->getDebugLoc());
1735 return true;
1736 }
1737
1738 // Conditional branches are not handed yet.
1739 // Halt "fast" selection and bail.
1740 return false;
1741 }
1742
1743 case Instruction::Unreachable:
1746 else
1747 return true;
1748
1749 case Instruction::Alloca:
1750 // FunctionLowering has the static-sized case covered.
1751 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1752 return true;
1753
1754 // Dynamic-sized alloca is not handled yet.
1755 return false;
1756
1757 case Instruction::Call:
1758 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1759 // callee of the direct function call instruction will be mapped to the
1760 // symbol for the function's entry point, which is distinct from the
1761 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1762 // name is the C-linkage name of the source level function.
1763 // But fast isel still has the ability to do selection for intrinsics.
1764 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1765 return false;
1766 return selectCall(I);
1767
1768 case Instruction::BitCast:
1769 return selectBitCast(I);
1770
1771 case Instruction::FPToSI:
1772 return selectCast(I, ISD::FP_TO_SINT);
1773 case Instruction::ZExt:
1774 return selectCast(I, ISD::ZERO_EXTEND);
1775 case Instruction::SExt:
1776 return selectCast(I, ISD::SIGN_EXTEND);
1777 case Instruction::Trunc:
1778 return selectCast(I, ISD::TRUNCATE);
1779 case Instruction::SIToFP:
1780 return selectCast(I, ISD::SINT_TO_FP);
1781
1782 case Instruction::IntToPtr: // Deliberate fall-through.
1783 case Instruction::PtrToInt: {
1784 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1785 EVT DstVT = TLI.getValueType(DL, I->getType());
1786 if (DstVT.bitsGT(SrcVT))
1787 return selectCast(I, ISD::ZERO_EXTEND);
1788 if (DstVT.bitsLT(SrcVT))
1789 return selectCast(I, ISD::TRUNCATE);
1790 Register Reg = getRegForValue(I->getOperand(0));
1791 if (!Reg)
1792 return false;
1793 updateValueMap(I, Reg);
1794 return true;
1795 }
1796
1797 case Instruction::ExtractValue:
1798 return selectExtractValue(I);
1799
1800 case Instruction::Freeze:
1801 return selectFreeze(I);
1802
1803 case Instruction::PHI:
1804 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1805
1806 default:
1807 // Unhandled instruction. Halt "fast" selection and bail.
1808 return false;
1809 }
1810}
1811
1815 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1816 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1817 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1818 TII(*MF->getSubtarget().getInstrInfo()),
1819 TLI(*MF->getSubtarget().getTargetLowering()),
1820 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1822
1823FastISel::~FastISel() = default;
1824
1825bool FastISel::fastLowerArguments() { return false; }
1826
1827bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1828
1830 return false;
1831}
1832
1833unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1834
1835unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1836 return 0;
1837}
1838
1839unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1840 unsigned /*Op1*/) {
1841 return 0;
1842}
1843
1844unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1845 return 0;
1846}
1847
1848unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1849 const ConstantFP * /*FPImm*/) {
1850 return 0;
1851}
1852
1853unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1854 uint64_t /*Imm*/) {
1855 return 0;
1856}
1857
1858/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1859/// instruction with an immediate operand using fastEmit_ri.
1860/// If that fails, it materializes the immediate into a register and try
1861/// fastEmit_rr instead.
1862Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1863 uint64_t Imm, MVT ImmType) {
1864 // If this is a multiply by a power of two, emit this as a shift left.
1865 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1866 Opcode = ISD::SHL;
1867 Imm = Log2_64(Imm);
1868 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1869 // div x, 8 -> srl x, 3
1870 Opcode = ISD::SRL;
1871 Imm = Log2_64(Imm);
1872 }
1873
1874 // Horrible hack (to be removed), check to make sure shift amounts are
1875 // in-range.
1876 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1877 Imm >= VT.getSizeInBits())
1878 return 0;
1879
1880 // First check if immediate type is legal. If not, we can't use the ri form.
1881 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1882 if (ResultReg)
1883 return ResultReg;
1884 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1885 if (!MaterialReg) {
1886 // This is a bit ugly/slow, but failing here means falling out of
1887 // fast-isel, which would be very slow.
1888 IntegerType *ITy =
1890 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1891 if (!MaterialReg)
1892 return 0;
1893 }
1894 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1895}
1896
1898 return MRI.createVirtualRegister(RC);
1899}
1900
1902 unsigned OpNum) {
1903 if (Op.isVirtual()) {
1904 const TargetRegisterClass *RegClass =
1905 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1906 if (!MRI.constrainRegClass(Op, RegClass)) {
1907 // If it's not legal to COPY between the register classes, something
1908 // has gone very wrong before we got here.
1909 Register NewOp = createResultReg(RegClass);
1911 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1912 return NewOp;
1913 }
1914 }
1915 return Op;
1916}
1917
1918Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1919 const TargetRegisterClass *RC) {
1920 Register ResultReg = createResultReg(RC);
1921 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1922
1923 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
1924 return ResultReg;
1925}
1926
1927Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1928 const TargetRegisterClass *RC, unsigned Op0) {
1929 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1930
1931 Register ResultReg = createResultReg(RC);
1932 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1933
1934 if (II.getNumDefs() >= 1)
1935 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1936 .addReg(Op0);
1937 else {
1939 .addReg(Op0);
1940 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1941 ResultReg)
1942 .addReg(II.implicit_defs()[0]);
1943 }
1944
1945 return ResultReg;
1946}
1947
1948Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1949 const TargetRegisterClass *RC, unsigned Op0,
1950 unsigned Op1) {
1951 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1952
1953 Register ResultReg = createResultReg(RC);
1954 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1955 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1956
1957 if (II.getNumDefs() >= 1)
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1959 .addReg(Op0)
1960 .addReg(Op1);
1961 else {
1963 .addReg(Op0)
1964 .addReg(Op1);
1965 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1966 ResultReg)
1967 .addReg(II.implicit_defs()[0]);
1968 }
1969 return ResultReg;
1970}
1971
1972Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1973 const TargetRegisterClass *RC, unsigned Op0,
1974 unsigned Op1, unsigned Op2) {
1975 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1976
1977 Register ResultReg = createResultReg(RC);
1978 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1979 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1980 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1981
1982 if (II.getNumDefs() >= 1)
1983 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1984 .addReg(Op0)
1985 .addReg(Op1)
1986 .addReg(Op2);
1987 else {
1989 .addReg(Op0)
1990 .addReg(Op1)
1991 .addReg(Op2);
1992 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1993 ResultReg)
1994 .addReg(II.implicit_defs()[0]);
1995 }
1996 return ResultReg;
1997}
1998
1999Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2000 const TargetRegisterClass *RC, unsigned Op0,
2001 uint64_t Imm) {
2002 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2003
2004 Register ResultReg = createResultReg(RC);
2005 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2006
2007 if (II.getNumDefs() >= 1)
2008 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2009 .addReg(Op0)
2010 .addImm(Imm);
2011 else {
2013 .addReg(Op0)
2014 .addImm(Imm);
2015 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2016 ResultReg)
2017 .addReg(II.implicit_defs()[0]);
2018 }
2019 return ResultReg;
2020}
2021
2022Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2023 const TargetRegisterClass *RC, unsigned Op0,
2024 uint64_t Imm1, uint64_t Imm2) {
2025 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2026
2027 Register ResultReg = createResultReg(RC);
2028 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2029
2030 if (II.getNumDefs() >= 1)
2031 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2032 .addReg(Op0)
2033 .addImm(Imm1)
2034 .addImm(Imm2);
2035 else {
2037 .addReg(Op0)
2038 .addImm(Imm1)
2039 .addImm(Imm2);
2040 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2041 ResultReg)
2042 .addReg(II.implicit_defs()[0]);
2043 }
2044 return ResultReg;
2045}
2046
2047Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2048 const TargetRegisterClass *RC,
2049 const ConstantFP *FPImm) {
2050 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2051
2052 Register ResultReg = createResultReg(RC);
2053
2054 if (II.getNumDefs() >= 1)
2055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2056 .addFPImm(FPImm);
2057 else {
2059 .addFPImm(FPImm);
2060 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2061 ResultReg)
2062 .addReg(II.implicit_defs()[0]);
2063 }
2064 return ResultReg;
2065}
2066
2067Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2068 const TargetRegisterClass *RC, unsigned Op0,
2069 unsigned Op1, uint64_t Imm) {
2070 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2071
2072 Register ResultReg = createResultReg(RC);
2073 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2074 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2075
2076 if (II.getNumDefs() >= 1)
2077 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2078 .addReg(Op0)
2079 .addReg(Op1)
2080 .addImm(Imm);
2081 else {
2083 .addReg(Op0)
2084 .addReg(Op1)
2085 .addImm(Imm);
2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2087 ResultReg)
2088 .addReg(II.implicit_defs()[0]);
2089 }
2090 return ResultReg;
2091}
2092
2093Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2094 const TargetRegisterClass *RC, uint64_t Imm) {
2095 Register ResultReg = createResultReg(RC);
2096 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2097
2098 if (II.getNumDefs() >= 1)
2099 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2100 .addImm(Imm);
2101 else {
2103 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2104 ResultReg)
2105 .addReg(II.implicit_defs()[0]);
2106 }
2107 return ResultReg;
2108}
2109
2111 uint32_t Idx) {
2112 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2114 "Cannot yet extract from physregs");
2115 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2118 ResultReg).addReg(Op0, 0, Idx);
2119 return ResultReg;
2120}
2121
2122/// Emit MachineInstrs to compute the value of Op with all but the least
2123/// significant bit set to zero.
2125 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2126}
2127
2128/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2129/// Emit code to ensure constants are copied into registers when needed.
2130/// Remember the virtual registers that need to be added to the Machine PHI
2131/// nodes as input. We cannot just directly add them, because expansion
2132/// might result in multiple MBB's for one BB. As such, the start of the
2133/// BB might correspond to a different MBB than the end.
2134bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2135 const Instruction *TI = LLVMBB->getTerminator();
2136
2139
2140 // Check successor nodes' PHI nodes that expect a constant to be available
2141 // from this block.
2142 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2143 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2144 if (!isa<PHINode>(SuccBB->begin()))
2145 continue;
2146 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2147
2148 // If this terminator has multiple identical successors (common for
2149 // switches), only handle each succ once.
2150 if (!SuccsHandled.insert(SuccMBB).second)
2151 continue;
2152
2154
2155 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2156 // nodes and Machine PHI nodes, but the incoming operands have not been
2157 // emitted yet.
2158 for (const PHINode &PN : SuccBB->phis()) {
2159 // Ignore dead phi's.
2160 if (PN.use_empty())
2161 continue;
2162
2163 // Only handle legal types. Two interesting things to note here. First,
2164 // by bailing out early, we may leave behind some dead instructions,
2165 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2166 // own moves. Second, this check is necessary because FastISel doesn't
2167 // use CreateRegs to create registers, so it always creates
2168 // exactly one register for each non-void instruction.
2169 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2170 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2171 // Handle integer promotions, though, because they're common and easy.
2172 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2174 return false;
2175 }
2176 }
2177
2178 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2179
2180 // Set the DebugLoc for the copy. Use the location of the operand if
2181 // there is one; otherwise no location, flushLocalValueMap will fix it.
2182 MIMD = {};
2183 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2184 MIMD = MIMetadata(*Inst);
2185
2186 Register Reg = getRegForValue(PHIOp);
2187 if (!Reg) {
2189 return false;
2190 }
2191 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2192 MIMD = {};
2193 }
2194 }
2195
2196 return true;
2197}
2198
2199bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2200 assert(LI->hasOneUse() &&
2201 "tryToFoldLoad expected a LoadInst with a single use");
2202 // We know that the load has a single use, but don't know what it is. If it
2203 // isn't one of the folded instructions, then we can't succeed here. Handle
2204 // this by scanning the single-use users of the load until we get to FoldInst.
2205 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2206
2207 const Instruction *TheUser = LI->user_back();
2208 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2209 // Stay in the right block.
2210 TheUser->getParent() == FoldInst->getParent() &&
2211 --MaxUsers) { // Don't scan too far.
2212 // If there are multiple or no uses of this instruction, then bail out.
2213 if (!TheUser->hasOneUse())
2214 return false;
2215
2216 TheUser = TheUser->user_back();
2217 }
2218
2219 // If we didn't find the fold instruction, then we failed to collapse the
2220 // sequence.
2221 if (TheUser != FoldInst)
2222 return false;
2223
2224 // Don't try to fold volatile loads. Target has to deal with alignment
2225 // constraints.
2226 if (LI->isVolatile())
2227 return false;
2228
2229 // Figure out which vreg this is going into. If there is no assigned vreg yet
2230 // then there actually was no reference to it. Perhaps the load is referenced
2231 // by a dead instruction.
2232 Register LoadReg = getRegForValue(LI);
2233 if (!LoadReg)
2234 return false;
2235
2236 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2237 // may mean that the instruction got lowered to multiple MIs, or the use of
2238 // the loaded value ended up being multiple operands of the result.
2239 if (!MRI.hasOneUse(LoadReg))
2240 return false;
2241
2242 // If the register has fixups, there may be additional uses through a
2243 // different alias of the register.
2244 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2245 return false;
2246
2248 MachineInstr *User = RI->getParent();
2249
2250 // Set the insertion point properly. Folding the load can cause generation of
2251 // other random instructions (like sign extends) for addressing modes; make
2252 // sure they get inserted in a logical place before the new instruction.
2254 FuncInfo.MBB = User->getParent();
2255
2256 // Ask the target to try folding the load.
2257 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2258}
2259
2261 // Must be an add.
2262 if (!isa<AddOperator>(Add))
2263 return false;
2264 // Type size needs to match.
2265 if (DL.getTypeSizeInBits(GEP->getType()) !=
2266 DL.getTypeSizeInBits(Add->getType()))
2267 return false;
2268 // Must be in the same basic block.
2269 if (isa<Instruction>(Add) &&
2270 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2271 return false;
2272 // Must have a constant operand.
2273 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2274}
2275
2278 const Value *Ptr;
2279 Type *ValTy;
2280 MaybeAlign Alignment;
2282 bool IsVolatile;
2283
2284 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2285 Alignment = LI->getAlign();
2286 IsVolatile = LI->isVolatile();
2288 Ptr = LI->getPointerOperand();
2289 ValTy = LI->getType();
2290 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2291 Alignment = SI->getAlign();
2292 IsVolatile = SI->isVolatile();
2294 Ptr = SI->getPointerOperand();
2295 ValTy = SI->getValueOperand()->getType();
2296 } else
2297 return nullptr;
2298
2299 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2300 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2301 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2302 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2303
2304 AAMDNodes AAInfo = I->getAAMetadata();
2305
2306 if (!Alignment) // Ensure that codegen never sees alignment 0.
2307 Alignment = DL.getABITypeAlign(ValTy);
2308
2309 unsigned Size = DL.getTypeStoreSize(ValTy);
2310
2311 if (IsVolatile)
2313 if (IsNonTemporal)
2315 if (IsDereferenceable)
2317 if (IsInvariant)
2319
2321 *Alignment, AAInfo, Ranges);
2322}
2323
2325 // If both operands are the same, then try to optimize or fold the cmp.
2326 CmpInst::Predicate Predicate = CI->getPredicate();
2327 if (CI->getOperand(0) != CI->getOperand(1))
2328 return Predicate;
2329
2330 switch (Predicate) {
2331 default: llvm_unreachable("Invalid predicate!");
2332 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2333 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2334 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2335 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2336 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2337 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2338 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2339 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2340 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2341 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2342 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2343 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2344 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2345 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2346 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2347 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2348
2349 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2350 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2351 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2352 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2353 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2354 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2355 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2356 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2357 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2358 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2359 }
2360
2361 return Predicate;
2362}
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
assume Assume Builder
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition: FastISel.cpp:160
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition: FastISel.cpp:177
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:937
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define P(N)
@ SI
@ VI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file describes how to lower LLVM code to machine code.
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:23
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:298
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:314
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:372
filter_iterator< BasicBlock::const_iterator, std::function< bool(constInstruction &)> >::difference_type sizeWithoutDebug() const
Return the size of the basic block ignoring debug instructions.
Definition: BasicBlock.cpp:123
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1465
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1326
Value * getCalledOperand() const
Definition: InstrTypes.h:1399
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1351
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1332
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1264
unsigned arg_size() const
Definition: InstrTypes.h:1349
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:708
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:718
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:721
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:735
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:747
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:748
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:724
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:733
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:722
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:723
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:742
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:741
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:745
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:732
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:726
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:729
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:743
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:730
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:725
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:727
@ ICMP_EQ
equal
Definition: InstrTypes.h:739
@ ICMP_NE
not equal
Definition: InstrTypes.h:740
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:746
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:734
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:744
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:731
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:720
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:728
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:808
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:256
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:887
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
DWARF expression.
std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:114
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:681
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:849
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:836
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:507
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:676
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:475
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
A debug info location.
Definition: DebugLoc.h:33
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition: FastISel.h:205
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2110
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:214
const DataLayout & DL
Definition: FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:532
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:237
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:645
bool selectExtractValue(const User *U)
Definition: FastISel.cpp:1643
DenseMap< const Value *, Register > LocalValueMap
Definition: FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
Definition: FastISel.cpp:1562
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition: FastISel.h:476
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:226
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:895
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1918
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1829
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:233
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1109
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1853
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:436
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:959
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:1927
bool selectFreeze(const User *I)
Definition: FastISel.cpp:1438
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1187
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1378
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2199
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition: FastISel.cpp:238
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:410
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition: FastISel.cpp:123
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2277
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2093
MachineFrameInfo & MFI
Definition: FastISel.h:206
MachineFunction * MF
Definition: FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2260
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1833
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1478
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:469
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1827
bool selectXRayTypedEvent(const CallInst *II)
Definition: FastISel.cpp:914
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:1948
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:2022
Register createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1897
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1848
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1825
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
Definition: FastISel.cpp:1601
const TargetInstrInfo & TII
Definition: FastISel.h:211
bool selectCall(const User *I)
Definition: FastISel.cpp:1150
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:351
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2324
void finishBasicBlock()
Flush the local value map.
Definition: FastISel.cpp:136
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:203
Register getRegForGEPIndex(const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:383
MachineConstantPool & MCP
Definition: FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
Definition: FastISel.cpp:1683
bool SkipTargetIndependentISel
Definition: FastISel.h:215
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:2047
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1901
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:362
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:444
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1812
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:757
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:401
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1839
const TargetMachine & TM
Definition: FastISel.h:209
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1862
Register fastEmitZExtFromI1(MVT VT, unsigned Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Definition: FastISel.cpp:2124
MIMetadata MIMD
Definition: FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition: FastISel.h:221
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:2067
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:138
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:430
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1582
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1409
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1999
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1835
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:472
virtual ~FastISel()
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1844
const TargetRegisterInfo & TRI
Definition: FastISel.h:213
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, unsigned Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
Definition: FastISel.cpp:1972
TargetLoweringBase::ArgListEntry ArgListEntry
Definition: FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
DenseSet< Register > RegsWithFixups
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
Register InitializeRegForValue(const Value *V)
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
DenseMap< Register, Register > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Class to represent function types.
Definition: DerivedTypes.h:103
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:666
arg_iterator arg_end()
Definition: Function.h:775
arg_iterator arg_begin()
Definition: Function.h:766
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:315
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:358
const BasicBlock * getParent() const
Definition: Instruction.h:90
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:87
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:275
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:313
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
An instruction for reading from memory.
Definition: Instructions.h:177
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:214
Context object for machine code objects.
Definition: MCContext.h:76
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:201
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:247
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:577
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:943
Set of metadata that should be preserved when using BuildMI().
const DebugLoc & getDL() const
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineInstrBundleIterator< MachineInstr > iterator
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
bool hasDebugInfo() const
Returns true if valid debug info is present.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
reg_iterator reg_begin(Register RegNo) const
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:119
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:655
Class to represent struct types.
Definition: DerivedTypes.h:213
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
unsigned getCallFrameDestroyOpcode() const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
const Triple & getTargetTriple() const
TargetOptions Options
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:355
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:637
bool isOSAIX() const
Tests whether the OS is AIX.
Definition: Triple.h:669
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ AnyReg
Used for dynamic register based calls (e.g.
Definition: CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:773
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:885
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:760
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:910
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:691
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:763
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:819
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:666
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1132
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:769
Reg
All possible values of the reg field in the ModR/M byte.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:146
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:306
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void diagnoseDontCall(const CallInst &CI)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:721
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:379
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ Add
Sum of integers.
gep_type_iterator gep_type_begin(const User *GEP)
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:523
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:121
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:204
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:129
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:256
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:272
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:340
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:603
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:288
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition: FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition: FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition: FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition: FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117