LLVM 20.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Module.h"
90#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/User.h"
94#include "llvm/IR/Value.h"
95#include "llvm/MC/MCContext.h"
96#include "llvm/MC/MCInstrDesc.h"
98#include "llvm/Support/Debug.h"
104#include <algorithm>
105#include <cassert>
106#include <cstdint>
107#include <iterator>
108#include <optional>
109#include <utility>
110
111using namespace llvm;
112using namespace PatternMatch;
113
114#define DEBUG_TYPE "isel"
115
116STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117 "target-independent selector");
118STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119 "target-specific selector");
120STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121
122/// Set the current block to which generated machine instructions will be
123/// appended.
125 assert(LocalValueMap.empty() &&
126 "local values should be cleared after finishing a BB");
127
128 // Instructions are appended to FuncInfo.MBB. If the basic block already
129 // contains labels or copies, use the last instruction as the last local
130 // value.
131 EmitStartPt = nullptr;
132 if (!FuncInfo.MBB->empty())
135}
136
137void FastISel::finishBasicBlock() { flushLocalValueMap(); }
138
141 // Fallback to SDISel argument lowering code to deal with sret pointer
142 // parameter.
143 return false;
144
145 if (!fastLowerArguments())
146 return false;
147
148 // Enter arguments into ValueMap for uses in non-entry BBs.
150 E = FuncInfo.Fn->arg_end();
151 I != E; ++I) {
153 assert(VI != LocalValueMap.end() && "Missed an argument?");
154 FuncInfo.ValueMap[&*I] = VI->second;
155 }
156 return true;
157}
158
159/// Return the defined register if this instruction defines exactly one
160/// virtual register and uses no other virtual registers. Otherwise return 0.
162 Register RegDef;
163 for (const MachineOperand &MO : MI.operands()) {
164 if (!MO.isReg())
165 continue;
166 if (MO.isDef()) {
167 if (RegDef)
168 return Register();
169 RegDef = MO.getReg();
170 } else if (MO.getReg().isVirtual()) {
171 // This is another use of a vreg. Don't delete it.
172 return Register();
173 }
174 }
175 return RegDef;
176}
177
178static bool isRegUsedByPhiNodes(Register DefReg,
179 FunctionLoweringInfo &FuncInfo) {
180 for (auto &P : FuncInfo.PHINodesToUpdate)
181 if (P.second == DefReg)
182 return true;
183 return false;
184}
185
186void FastISel::flushLocalValueMap() {
187 // If FastISel bails out, it could leave local value instructions behind
188 // that aren't used for anything. Detect and erase those.
190 // Save the first instruction after local values, for later.
192 ++FirstNonValue;
193
196 : FuncInfo.MBB->rend();
198 for (MachineInstr &LocalMI :
200 Register DefReg = findLocalRegDef(LocalMI);
201 if (!DefReg)
202 continue;
203 if (FuncInfo.RegsWithFixups.count(DefReg))
204 continue;
205 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
206 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
207 if (EmitStartPt == &LocalMI)
209 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
210 << LocalMI);
211 LocalMI.eraseFromParent();
212 }
213 }
214
215 if (FirstNonValue != FuncInfo.MBB->end()) {
216 // See if there are any local value instructions left. If so, we want to
217 // make sure the first one has a debug location; if it doesn't, use the
218 // first non-value instruction's debug location.
219
220 // If EmitStartPt is non-null, this block had copies at the top before
221 // FastISel started doing anything; it points to the last one, so the
222 // first local value instruction is the one after EmitStartPt.
223 // If EmitStartPt is null, the first local value instruction is at the
224 // top of the block.
225 MachineBasicBlock::iterator FirstLocalValue =
227 : FuncInfo.MBB->begin();
228 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
229 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
230 }
231 }
232
233 LocalValueMap.clear();
236 SavedInsertPt = FuncInfo.InsertPt;
237}
238
240 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
241 // Don't handle non-simple values in FastISel.
242 if (!RealVT.isSimple())
243 return Register();
244
245 // Ignore illegal types. We must do this before looking up the value
246 // in ValueMap because Arguments are given virtual registers regardless
247 // of whether FastISel can handle them.
248 MVT VT = RealVT.getSimpleVT();
249 if (!TLI.isTypeLegal(VT)) {
250 // Handle integer promotions, though, because they're common and easy.
251 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
252 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
253 else
254 return Register();
255 }
256
257 // Look up the value to see if we already have a register for it.
259 if (Reg)
260 return Reg;
261
262 // In bottom-up mode, just create the virtual register which will be used
263 // to hold the value. It will be materialized later.
264 if (isa<Instruction>(V) &&
265 (!isa<AllocaInst>(V) ||
266 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
268
269 SavePoint SaveInsertPt = enterLocalValueArea();
270
271 // Materialize the value in a register. Emit any instructions in the
272 // local value area.
273 Reg = materializeRegForValue(V, VT);
274
275 leaveLocalValueArea(SaveInsertPt);
276
277 return Reg;
278}
279
280Register FastISel::materializeConstant(const Value *V, MVT VT) {
281 Register Reg;
282 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
283 if (CI->getValue().getActiveBits() <= 64)
284 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
285 } else if (isa<AllocaInst>(V))
286 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
287 else if (isa<ConstantPointerNull>(V))
288 // Translate this as an integer zero so that it can be
289 // local-CSE'd with actual integer zeros.
290 Reg =
292 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
293 if (CF->isNullValue())
294 Reg = fastMaterializeFloatZero(CF);
295 else
296 // Try to emit the constant directly.
297 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
298
299 if (!Reg) {
300 // Try to emit the constant by using an integer constant with a cast.
301 const APFloat &Flt = CF->getValueAPF();
302 EVT IntVT = TLI.getPointerTy(DL);
303 uint32_t IntBitWidth = IntVT.getSizeInBits();
304 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
305 bool isExact;
306 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
307 if (isExact) {
308 Register IntegerReg =
309 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
310 if (IntegerReg)
311 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
312 IntegerReg);
313 }
314 }
315 } else if (const auto *Op = dyn_cast<Operator>(V)) {
316 if (!selectOperator(Op, Op->getOpcode()))
317 if (!isa<Instruction>(Op) ||
318 !fastSelectInstruction(cast<Instruction>(Op)))
319 return 0;
321 } else if (isa<UndefValue>(V)) {
324 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
325 }
326 return Reg;
327}
328
329/// Helper for getRegForValue. This function is called when the value isn't
330/// already available in a register and must be materialized with new
331/// instructions.
332Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
334 // Give the target-specific code a try first.
335 if (isa<Constant>(V))
336 Reg = fastMaterializeConstant(cast<Constant>(V));
337
338 // If target-specific code couldn't or didn't want to handle the value, then
339 // give target-independent code a try.
340 if (!Reg)
341 Reg = materializeConstant(V, VT);
342
343 // Don't cache constant materializations in the general ValueMap.
344 // To do so would require tracking what uses they dominate.
345 if (Reg) {
348 }
349 return Reg;
350}
351
353 // Look up the value to see if we already have a register for it. We
354 // cache values defined by Instructions across blocks, and other values
355 // only locally. This is because Instructions already have the SSA
356 // def-dominates-use requirement enforced.
358 if (I != FuncInfo.ValueMap.end())
359 return I->second;
360 return LocalValueMap[V];
361}
362
363void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
364 if (!isa<Instruction>(I)) {
365 LocalValueMap[I] = Reg;
366 return;
367 }
368
369 Register &AssignedReg = FuncInfo.ValueMap[I];
370 if (!AssignedReg)
371 // Use the new register.
372 AssignedReg = Reg;
373 else if (Reg != AssignedReg) {
374 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
375 for (unsigned i = 0; i < NumRegs; i++) {
376 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
378 }
379
380 AssignedReg = Reg;
381 }
382}
383
386 if (!IdxN)
387 // Unhandled operand. Halt "fast" selection and bail.
388 return Register();
389
390 // If the index is smaller or larger than intptr_t, truncate or extend it.
391 MVT PtrVT = TLI.getPointerTy(DL);
392 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
393 if (IdxVT.bitsLT(PtrVT)) {
394 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
395 } else if (IdxVT.bitsGT(PtrVT)) {
396 IdxN =
397 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
398 }
399 return IdxN;
400}
401
403 if (getLastLocalValue()) {
405 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
407 } else
409}
410
413 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
414 "Invalid iterator!");
415 while (I != E) {
416 if (SavedInsertPt == I)
417 SavedInsertPt = E;
418 if (EmitStartPt == I)
419 EmitStartPt = E.isValid() ? &*E : nullptr;
420 if (LastLocalValue == I)
421 LastLocalValue = E.isValid() ? &*E : nullptr;
422
423 MachineInstr *Dead = &*I;
424 ++I;
425 Dead->eraseFromParent();
426 ++NumFastIselDead;
427 }
429}
430
432 SavePoint OldInsertPt = FuncInfo.InsertPt;
434 return OldInsertPt;
435}
436
439 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
440
441 // Restore the previous insert position.
442 FuncInfo.InsertPt = OldInsertPt;
443}
444
445bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
446 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
447 if (VT == MVT::Other || !VT.isSimple())
448 // Unhandled type. Halt "fast" selection and bail.
449 return false;
450
451 // We only handle legal types. For example, on x86-32 the instruction
452 // selector contains all of the 64-bit instructions from x86-64,
453 // under the assumption that i64 won't be used if the target doesn't
454 // support it.
455 if (!TLI.isTypeLegal(VT)) {
456 // MVT::i1 is special. Allow AND, OR, or XOR because they
457 // don't require additional zeroing, which makes them easy.
458 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
459 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
460 else
461 return false;
462 }
463
464 // Check if the first operand is a constant, and handle it as "ri". At -O0,
465 // we don't have anything that canonicalizes operand order.
466 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
467 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
468 Register Op1 = getRegForValue(I->getOperand(1));
469 if (!Op1)
470 return false;
471
472 Register ResultReg =
473 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
474 VT.getSimpleVT());
475 if (!ResultReg)
476 return false;
477
478 // We successfully emitted code for the given LLVM Instruction.
479 updateValueMap(I, ResultReg);
480 return true;
481 }
482
483 Register Op0 = getRegForValue(I->getOperand(0));
484 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
485 return false;
486
487 // Check if the second operand is a constant and handle it appropriately.
488 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
489 uint64_t Imm = CI->getSExtValue();
490
491 // Transform "sdiv exact X, 8" -> "sra X, 3".
492 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
493 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
494 Imm = Log2_64(Imm);
495 ISDOpcode = ISD::SRA;
496 }
497
498 // Transform "urem x, pow2" -> "and x, pow2-1".
499 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
500 isPowerOf2_64(Imm)) {
501 --Imm;
502 ISDOpcode = ISD::AND;
503 }
504
505 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
506 VT.getSimpleVT());
507 if (!ResultReg)
508 return false;
509
510 // We successfully emitted code for the given LLVM Instruction.
511 updateValueMap(I, ResultReg);
512 return true;
513 }
514
515 Register Op1 = getRegForValue(I->getOperand(1));
516 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
517 return false;
518
519 // Now we have both operands in registers. Emit the instruction.
520 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
521 ISDOpcode, Op0, Op1);
522 if (!ResultReg)
523 // Target-specific code wasn't able to find a machine opcode for
524 // the given ISD opcode and type. Halt "fast" selection and bail.
525 return false;
526
527 // We successfully emitted code for the given LLVM Instruction.
528 updateValueMap(I, ResultReg);
529 return true;
530}
531
533 Register N = getRegForValue(I->getOperand(0));
534 if (!N) // Unhandled operand. Halt "fast" selection and bail.
535 return false;
536
537 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
538 // and bail.
539 if (isa<VectorType>(I->getType()))
540 return false;
541
542 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
543 // into a single N = N + TotalOffset.
544 uint64_t TotalOffs = 0;
545 // FIXME: What's a good SWAG number for MaxOffs?
546 uint64_t MaxOffs = 2048;
547 MVT VT = TLI.getPointerTy(DL);
549 GTI != E; ++GTI) {
550 const Value *Idx = GTI.getOperand();
551 if (StructType *StTy = GTI.getStructTypeOrNull()) {
552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553 if (Field) {
554 // N = N + Offset
555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556 if (TotalOffs >= MaxOffs) {
557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558 if (!N) // Unhandled operand. Halt "fast" selection and bail.
559 return false;
560 TotalOffs = 0;
561 }
562 }
563 } else {
564 // If this is a constant subscript, handle it quickly.
565 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
566 if (CI->isZero())
567 continue;
568 // N = N + Offset
569 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
570 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
571 if (TotalOffs >= MaxOffs) {
572 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
573 if (!N) // Unhandled operand. Halt "fast" selection and bail.
574 return false;
575 TotalOffs = 0;
576 }
577 continue;
578 }
579 if (TotalOffs) {
580 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
581 if (!N) // Unhandled operand. Halt "fast" selection and bail.
582 return false;
583 TotalOffs = 0;
584 }
585
586 // N = N + Idx * ElementSize;
587 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
589 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
590 return false;
591
592 if (ElementSize != 1) {
593 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
594 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
595 return false;
596 }
597 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
598 if (!N) // Unhandled operand. Halt "fast" selection and bail.
599 return false;
600 }
601 }
602 if (TotalOffs) {
603 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
604 if (!N) // Unhandled operand. Halt "fast" selection and bail.
605 return false;
606 }
607
608 // We successfully emitted code for the given LLVM Instruction.
610 return true;
611}
612
613bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
614 const CallInst *CI, unsigned StartIdx) {
615 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
616 Value *Val = CI->getArgOperand(i);
617 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
618 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
619 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
620 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
621 } else if (isa<ConstantPointerNull>(Val)) {
622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
624 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
625 // Values coming from a stack location also require a special encoding,
626 // but that is added later on by the target specific frame index
627 // elimination implementation.
628 auto SI = FuncInfo.StaticAllocaMap.find(AI);
629 if (SI != FuncInfo.StaticAllocaMap.end())
631 else
632 return false;
633 } else {
635 if (!Reg)
636 return false;
637 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
638 }
639 }
640 return true;
641}
642
644 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
645 // [live variables...])
646 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
647 "Stackmap cannot return a value.");
648
649 // The stackmap intrinsic only records the live variables (the arguments
650 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
651 // intrinsic, this won't be lowered to a function call. This means we don't
652 // have to worry about calling conventions and target-specific lowering code.
653 // Instead we perform the call lowering right here.
654 //
655 // CALLSEQ_START(0, 0...)
656 // STACKMAP(id, nbytes, ...)
657 // CALLSEQ_END(0, 0)
658 //
660
661 // Add the <id> and <numBytes> constants.
662 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
663 "Expected a constant integer.");
664 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
665 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
666
667 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
668 "Expected a constant integer.");
669 const auto *NumBytes =
670 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
671 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
672
673 // Push live variables for the stack map (skipping the first two arguments
674 // <id> and <numBytes>).
675 if (!addStackMapLiveVars(Ops, I, 2))
676 return false;
677
678 // We are not adding any register mask info here, because the stackmap doesn't
679 // clobber anything.
680
681 // Add scratch registers as implicit def and early clobber.
682 CallingConv::ID CC = I->getCallingConv();
683 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
684 for (unsigned i = 0; ScratchRegs[i]; ++i)
686 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
687 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
688
689 // Issue CALLSEQ_START
690 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
691 auto Builder =
692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
693 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
694 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
695 Builder.addImm(0);
696
697 // Issue STACKMAP.
699 TII.get(TargetOpcode::STACKMAP));
700 for (auto const &MO : Ops)
701 MIB.add(MO);
702
703 // Issue CALLSEQ_END
704 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
706 .addImm(0)
707 .addImm(0);
708
709 // Inform the Frame Information that we have a stackmap in this function.
711
712 return true;
713}
714
715/// Lower an argument list according to the target calling convention.
716///
717/// This is a helper for lowering intrinsics that follow a target calling
718/// convention or require stack pointer adjustment. Only a subset of the
719/// intrinsic's operands need to participate in the calling convention.
720bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
721 unsigned NumArgs, const Value *Callee,
722 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
723 ArgListTy Args;
724 Args.reserve(NumArgs);
725
726 // Populate the argument list.
727 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
728 Value *V = CI->getOperand(ArgI);
729
730 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
731
732 ArgListEntry Entry;
733 Entry.Val = V;
734 Entry.Ty = V->getType();
735 Entry.setAttributes(CI, ArgI);
736 Args.push_back(Entry);
737 }
738
739 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
740 : CI->getType();
741 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
742
743 return lowerCallTo(CLI);
744}
745
747 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
748 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
749 SmallString<32> MangledName;
750 Mangler::getNameWithPrefix(MangledName, Target, DL);
751 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
752 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
753}
754
756 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
757 // i32 <numBytes>,
758 // i8* <target>,
759 // i32 <numArgs>,
760 // [Args...],
761 // [live variables...])
762 CallingConv::ID CC = I->getCallingConv();
763 bool IsAnyRegCC = CC == CallingConv::AnyReg;
764 bool HasDef = !I->getType()->isVoidTy();
765 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
766
767 // Check if we can lower the return type when using anyregcc.
769 if (IsAnyRegCC && HasDef) {
770 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
771 if (ValueType == MVT::Other)
772 return false;
773 }
774
775 // Get the real number of arguments participating in the call <numArgs>
776 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
777 "Expected a constant integer.");
778 const auto *NumArgsVal =
779 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
780 unsigned NumArgs = NumArgsVal->getZExtValue();
781
782 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
783 // This includes all meta-operands up to but not including CC.
784 unsigned NumMetaOpers = PatchPointOpers::CCPos;
785 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
786 "Not enough arguments provided to the patchpoint intrinsic");
787
788 // For AnyRegCC the arguments are lowered later on manually.
789 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
791 CLI.setIsPatchPoint();
792 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
793 return false;
794
795 assert(CLI.Call && "No call instruction specified.");
796
798
799 // Add an explicit result reg if we use the anyreg calling convention.
800 if (IsAnyRegCC && HasDef) {
801 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
802 assert(ValueType.isValid());
804 CLI.NumResultRegs = 1;
805 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
806 }
807
808 // Add the <id> and <numBytes> constants.
809 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
810 "Expected a constant integer.");
811 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
812 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
813
814 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
815 "Expected a constant integer.");
816 const auto *NumBytes =
817 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
818 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
819
820 // Add the call target.
821 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
822 uint64_t CalleeConstAddr =
823 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
824 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
825 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
826 if (C->getOpcode() == Instruction::IntToPtr) {
827 uint64_t CalleeConstAddr =
828 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
829 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
830 } else
831 llvm_unreachable("Unsupported ConstantExpr.");
832 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
834 } else if (isa<ConstantPointerNull>(Callee))
836 else
837 llvm_unreachable("Unsupported callee address.");
838
839 // Adjust <numArgs> to account for any arguments that have been passed on
840 // the stack instead.
841 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
842 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
843
844 // Add the calling convention
845 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
846
847 // Add the arguments we omitted previously. The register allocator should
848 // place these in any free register.
849 if (IsAnyRegCC) {
850 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
851 Register Reg = getRegForValue(I->getArgOperand(i));
852 if (!Reg)
853 return false;
854 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
855 }
856 }
857
858 // Push the arguments from the call instruction.
859 for (auto Reg : CLI.OutRegs)
860 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
861
862 // Push live variables for the stack map.
863 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
864 return false;
865
866 // Push the register mask info.
869
870 // Add scratch registers as implicit def and early clobber.
871 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
872 for (unsigned i = 0; ScratchRegs[i]; ++i)
874 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
875 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
876
877 // Add implicit defs (return values).
878 for (auto Reg : CLI.InRegs)
879 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
880 /*isImp=*/true));
881
882 // Insert the patchpoint instruction before the call generated by the target.
884 TII.get(TargetOpcode::PATCHPOINT));
885
886 for (auto &MO : Ops)
887 MIB.add(MO);
888
890
891 // Delete the original call instruction.
892 CLI.Call->eraseFromParent();
893
894 // Inform the Frame Information that we have a patchpoint in this function.
896
897 if (CLI.NumResultRegs)
899 return true;
900}
901
903 const auto &Triple = TM.getTargetTriple();
905 return true; // don't do anything to this instruction.
908 /*isDef=*/false));
910 /*isDef=*/false));
913 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
914 for (auto &MO : Ops)
915 MIB.add(MO);
916
917 // Insert the Patchable Event Call instruction, that gets lowered properly.
918 return true;
919}
920
922 const auto &Triple = TM.getTargetTriple();
924 return true; // don't do anything to this instruction.
927 /*isDef=*/false));
929 /*isDef=*/false));
931 /*isDef=*/false));
934 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
935 for (auto &MO : Ops)
936 MIB.add(MO);
937
938 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
939 return true;
940}
941
942/// Returns an AttributeList representing the attributes applied to the return
943/// value of the given call.
946 if (CLI.RetSExt)
947 Attrs.push_back(Attribute::SExt);
948 if (CLI.RetZExt)
949 Attrs.push_back(Attribute::ZExt);
950 if (CLI.IsInReg)
951 Attrs.push_back(Attribute::InReg);
952
954 Attrs);
955}
956
957bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
958 unsigned NumArgs) {
959 MCContext &Ctx = MF->getContext();
960 SmallString<32> MangledName;
961 Mangler::getNameWithPrefix(MangledName, SymName, DL);
962 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
963 return lowerCallTo(CI, Sym, NumArgs);
964}
965
967 unsigned NumArgs) {
968 FunctionType *FTy = CI->getFunctionType();
969 Type *RetTy = CI->getType();
970
971 ArgListTy Args;
972 Args.reserve(NumArgs);
973
974 // Populate the argument list.
975 // Attributes for args start at offset 1, after the return attribute.
976 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
977 Value *V = CI->getOperand(ArgI);
978
979 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
980
981 ArgListEntry Entry;
982 Entry.Val = V;
983 Entry.Ty = V->getType();
984 Entry.setAttributes(CI, ArgI);
985 Args.push_back(Entry);
986 }
988
990 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
991
992 return lowerCallTo(CLI);
993}
994
996 // Handle the incoming return values from the call.
997 CLI.clearIns();
998 SmallVector<EVT, 4> RetTys;
999 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1000
1002 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1003
1004 bool CanLowerReturn = TLI.CanLowerReturn(
1005 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1006
1007 // FIXME: sret demotion isn't supported yet - bail out.
1008 if (!CanLowerReturn)
1009 return false;
1010
1011 for (EVT VT : RetTys) {
1012 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1013 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1014 for (unsigned i = 0; i != NumRegs; ++i) {
1015 ISD::InputArg MyFlags;
1016 MyFlags.VT = RegisterVT;
1017 MyFlags.ArgVT = VT;
1018 MyFlags.Used = CLI.IsReturnValueUsed;
1019 if (CLI.RetSExt)
1020 MyFlags.Flags.setSExt();
1021 if (CLI.RetZExt)
1022 MyFlags.Flags.setZExt();
1023 if (CLI.IsInReg)
1024 MyFlags.Flags.setInReg();
1025 CLI.Ins.push_back(MyFlags);
1026 }
1027 }
1028
1029 // Handle all of the outgoing arguments.
1030 CLI.clearOuts();
1031 for (auto &Arg : CLI.getArgs()) {
1032 Type *FinalType = Arg.Ty;
1033 if (Arg.IsByVal)
1034 FinalType = Arg.IndirectType;
1036 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1037
1038 ISD::ArgFlagsTy Flags;
1039 if (Arg.IsZExt)
1040 Flags.setZExt();
1041 if (Arg.IsSExt)
1042 Flags.setSExt();
1043 if (Arg.IsInReg)
1044 Flags.setInReg();
1045 if (Arg.IsSRet)
1046 Flags.setSRet();
1047 if (Arg.IsSwiftSelf)
1048 Flags.setSwiftSelf();
1049 if (Arg.IsSwiftAsync)
1050 Flags.setSwiftAsync();
1051 if (Arg.IsSwiftError)
1052 Flags.setSwiftError();
1053 if (Arg.IsCFGuardTarget)
1054 Flags.setCFGuardTarget();
1055 if (Arg.IsByVal)
1056 Flags.setByVal();
1057 if (Arg.IsInAlloca) {
1058 Flags.setInAlloca();
1059 // Set the byval flag for CCAssignFn callbacks that don't know about
1060 // inalloca. This way we can know how many bytes we should've allocated
1061 // and how many bytes a callee cleanup function will pop. If we port
1062 // inalloca to more targets, we'll have to add custom inalloca handling in
1063 // the various CC lowering callbacks.
1064 Flags.setByVal();
1065 }
1066 if (Arg.IsPreallocated) {
1067 Flags.setPreallocated();
1068 // Set the byval flag for CCAssignFn callbacks that don't know about
1069 // preallocated. This way we can know how many bytes we should've
1070 // allocated and how many bytes a callee cleanup function will pop. If we
1071 // port preallocated to more targets, we'll have to add custom
1072 // preallocated handling in the various CC lowering callbacks.
1073 Flags.setByVal();
1074 }
1075 MaybeAlign MemAlign = Arg.Alignment;
1076 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1077 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1078
1079 // For ByVal, alignment should come from FE. BE will guess if this info
1080 // is not there, but there are cases it cannot get right.
1081 if (!MemAlign)
1082 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1083 Flags.setByValSize(FrameSize);
1084 } else if (!MemAlign) {
1085 MemAlign = DL.getABITypeAlign(Arg.Ty);
1086 }
1087 Flags.setMemAlign(*MemAlign);
1088 if (Arg.IsNest)
1089 Flags.setNest();
1090 if (NeedsRegBlock)
1091 Flags.setInConsecutiveRegs();
1092 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1093 CLI.OutVals.push_back(Arg.Val);
1094 CLI.OutFlags.push_back(Flags);
1095 }
1096
1097 if (!fastLowerCall(CLI))
1098 return false;
1099
1100 // Set all unused physreg defs as dead.
1101 assert(CLI.Call && "No call instruction specified.");
1103
1104 if (CLI.NumResultRegs && CLI.CB)
1106
1107 // Set labels for heapallocsite call.
1108 if (CLI.CB)
1109 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1110 CLI.Call->setHeapAllocMarker(*MF, MD);
1111
1112 return true;
1113}
1114
1116 FunctionType *FuncTy = CI->getFunctionType();
1117 Type *RetTy = CI->getType();
1118
1119 ArgListTy Args;
1120 ArgListEntry Entry;
1121 Args.reserve(CI->arg_size());
1122
1123 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1124 Value *V = *i;
1125
1126 // Skip empty types
1127 if (V->getType()->isEmptyTy())
1128 continue;
1129
1130 Entry.Val = V;
1131 Entry.Ty = V->getType();
1132
1133 // Skip the first return-type Attribute to get to params.
1134 Entry.setAttributes(CI, i - CI->arg_begin());
1135 Args.push_back(Entry);
1136 }
1137
1138 // Check if target-independent constraints permit a tail call here.
1139 // Target-dependent constraints are checked within fastLowerCall.
1140 bool IsTailCall = CI->isTailCall();
1141 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1142 IsTailCall = false;
1143 if (IsTailCall && !CI->isMustTailCall() &&
1144 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1145 IsTailCall = false;
1146
1147 CallLoweringInfo CLI;
1148 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1149 .setTailCall(IsTailCall);
1150
1151 diagnoseDontCall(*CI);
1152
1153 return lowerCallTo(CLI);
1154}
1155
1157 const CallInst *Call = cast<CallInst>(I);
1158
1159 // Handle simple inline asms.
1160 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1161 // Don't attempt to handle constraints.
1162 if (!IA->getConstraintString().empty())
1163 return false;
1164
1165 unsigned ExtraInfo = 0;
1166 if (IA->hasSideEffects())
1168 if (IA->isAlignStack())
1169 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1170 if (Call->isConvergent())
1171 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1172 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1173
1175 TII.get(TargetOpcode::INLINEASM));
1176 MIB.addExternalSymbol(IA->getAsmString().c_str());
1177 MIB.addImm(ExtraInfo);
1178
1179 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1180 if (SrcLoc)
1181 MIB.addMetadata(SrcLoc);
1182
1183 return true;
1184 }
1185
1186 // Handle intrinsic function calls.
1187 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1188 return selectIntrinsicCall(II);
1189
1190 return lowerCall(Call);
1191}
1192
1194 if (!II->hasDbgRecords())
1195 return;
1196
1197 // Clear any metadata.
1198 MIMD = MIMetadata();
1199
1200 // Reverse order of debug records, because fast-isel walks through backwards.
1201 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1202 flushLocalValueMap();
1204
1205 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1206 assert(DLR->getLabel() && "Missing label");
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(),
1208 TII.get(TargetOpcode::DBG_LABEL))
1209 .addMetadata(DLR->getLabel());
1210 continue;
1211 }
1212
1213 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1214
1215 Value *V = nullptr;
1216 if (!DVR.hasArgList())
1217 V = DVR.getVariableLocationOp(0);
1218
1219 bool Res = false;
1222 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(),
1223 DVR.getDebugLoc());
1224 } else {
1226 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1227 continue;
1228 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(),
1229 DVR.getDebugLoc());
1230 }
1231
1232 if (!Res)
1233 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n";);
1234 }
1235}
1236
1238 DILocalVariable *Var, const DebugLoc &DL) {
1239 // This form of DBG_VALUE is target-independent.
1240 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1241 if (!V || isa<UndefValue>(V)) {
1242 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1243 // undef DBG_VALUE to terminate any prior location.
1244 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1245 return true;
1246 }
1247 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1248 // See if there's an expression to constant-fold.
1249 if (Expr)
1250 std::tie(Expr, CI) = Expr->constantFold(CI);
1251 if (CI->getBitWidth() > 64)
1253 .addCImm(CI)
1254 .addImm(0U)
1255 .addMetadata(Var)
1256 .addMetadata(Expr);
1257 else
1259 .addImm(CI->getZExtValue())
1260 .addImm(0U)
1261 .addMetadata(Var)
1262 .addMetadata(Expr);
1263 return true;
1264 }
1265 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1267 .addFPImm(CF)
1268 .addImm(0U)
1269 .addMetadata(Var)
1270 .addMetadata(Expr);
1271 return true;
1272 }
1273 if (const auto *Arg = dyn_cast<Argument>(V);
1274 Arg && Expr && Expr->isEntryValue()) {
1275 // As per the Verifier, this case is only valid for swift async Args.
1276 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1277
1278 Register Reg = getRegForValue(Arg);
1279 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1280 if (Reg == VirtReg || Reg == PhysReg) {
1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1282 PhysReg, Var, Expr);
1283 return true;
1284 }
1285
1286 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1287 "couldn't find a physical register\n");
1288 return false;
1289 }
1290 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1291 SI != FuncInfo.StaticAllocaMap.end()) {
1292 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1293 bool IsIndirect = false;
1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1295 Var, Expr);
1296 return true;
1297 }
1298 if (Register Reg = lookUpRegForValue(V)) {
1299 // FIXME: This does not handle register-indirect values at offset 0.
1300 if (!FuncInfo.MF->useDebugInstrRef()) {
1301 bool IsIndirect = false;
1302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1303 Expr);
1304 return true;
1305 }
1306 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1307 // to be later patched up by finalizeDebugInstrRefs.
1309 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1310 /* isKill */ false, /* isDead */ false,
1311 /* isUndef */ false, /* isEarlyClobber */ false,
1312 /* SubReg */ 0, /* isDebug */ true)});
1314 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1316 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1317 Var, NewExpr);
1318 return true;
1319 }
1320 return false;
1321}
1322
1324 DILocalVariable *Var, const DebugLoc &DL) {
1325 if (!Address || isa<UndefValue>(Address)) {
1326 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1327 return false;
1328 }
1329
1330 std::optional<MachineOperand> Op;
1332 Op = MachineOperand::CreateReg(Reg, false);
1333
1334 // If we have a VLA that has a "use" in a metadata node that's then used
1335 // here but it has no other uses, then we have a problem. E.g.,
1336 //
1337 // int foo (const int *x) {
1338 // char a[*x];
1339 // return 0;
1340 // }
1341 //
1342 // If we assign 'a' a vreg and fast isel later on has to use the selection
1343 // DAG isel, it will want to copy the value to the vreg. However, there are
1344 // no uses, which goes counter to what selection DAG isel expects.
1345 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1346 (!isa<AllocaInst>(Address) ||
1347 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1349 false);
1350
1351 if (Op) {
1353 "Expected inlined-at fields to agree");
1354 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1355 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1356 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1357 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1359 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1360 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1362 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1363 Var, NewExpr);
1364 return true;
1365 }
1366
1367 // A dbg.declare describes the address of a source variable, so lower it
1368 // into an indirect DBG_VALUE.
1370 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1371 Expr);
1372 return true;
1373 }
1374
1375 // We can't yet handle anything else here because it would require
1376 // generating code, thus altering codegen because of debug info.
1377 LLVM_DEBUG(
1378 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1379 return false;
1380}
1381
1383 switch (II->getIntrinsicID()) {
1384 default:
1385 break;
1386 // At -O0 we don't care about the lifetime intrinsics.
1387 case Intrinsic::lifetime_start:
1388 case Intrinsic::lifetime_end:
1389 // The donothing intrinsic does, well, nothing.
1390 case Intrinsic::donothing:
1391 // Neither does the sideeffect intrinsic.
1392 case Intrinsic::sideeffect:
1393 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1394 case Intrinsic::assume:
1395 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1396 case Intrinsic::experimental_noalias_scope_decl:
1397 return true;
1398 case Intrinsic::dbg_declare: {
1399 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1400 assert(DI->getVariable() && "Missing variable");
1401 if (FuncInfo.PreprocessedDbgDeclares.contains(DI))
1402 return true;
1403
1404 const Value *Address = DI->getAddress();
1406 MIMD.getDL()))
1407 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI);
1408
1409 return true;
1410 }
1411 case Intrinsic::dbg_assign:
1412 // A dbg.assign is a dbg.value with more information, typically produced
1413 // during optimisation. If one reaches fastisel then something odd has
1414 // happened (such as an optimised function being always-inlined into an
1415 // optnone function). We will not be using the extra information in the
1416 // dbg.assign in that case, just use its dbg.value fields.
1417 [[fallthrough]];
1418 case Intrinsic::dbg_value: {
1419 // This form of DBG_VALUE is target-independent.
1420 const DbgValueInst *DI = cast<DbgValueInst>(II);
1421 const Value *V = DI->getValue();
1422 DIExpression *Expr = DI->getExpression();
1423 DILocalVariable *Var = DI->getVariable();
1424 if (DI->hasArgList())
1425 // Signal that we don't have a location for this.
1426 V = nullptr;
1427
1429 "Expected inlined-at fields to agree");
1430
1431 if (!lowerDbgValue(V, Expr, Var, MIMD.getDL()))
1432 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1433
1434 return true;
1435 }
1436 case Intrinsic::dbg_label: {
1437 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1438 assert(DI->getLabel() && "Missing label");
1440 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1441 return true;
1442 }
1443 case Intrinsic::objectsize:
1444 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1445
1446 case Intrinsic::is_constant:
1447 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1448
1449 case Intrinsic::allow_runtime_check:
1450 case Intrinsic::allow_ubsan_check: {
1451 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType()));
1452 if (!ResultReg)
1453 return false;
1454 updateValueMap(II, ResultReg);
1455 return true;
1456 }
1457
1458 case Intrinsic::launder_invariant_group:
1459 case Intrinsic::strip_invariant_group:
1460 case Intrinsic::expect: {
1461 Register ResultReg = getRegForValue(II->getArgOperand(0));
1462 if (!ResultReg)
1463 return false;
1464 updateValueMap(II, ResultReg);
1465 return true;
1466 }
1467 case Intrinsic::experimental_stackmap:
1468 return selectStackmap(II);
1469 case Intrinsic::experimental_patchpoint_void:
1470 case Intrinsic::experimental_patchpoint:
1471 return selectPatchpoint(II);
1472
1473 case Intrinsic::xray_customevent:
1474 return selectXRayCustomEvent(II);
1475 case Intrinsic::xray_typedevent:
1476 return selectXRayTypedEvent(II);
1477 }
1478
1479 return fastLowerIntrinsicCall(II);
1480}
1481
1482bool FastISel::selectCast(const User *I, unsigned Opcode) {
1483 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1484 EVT DstVT = TLI.getValueType(DL, I->getType());
1485
1486 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1487 !DstVT.isSimple())
1488 // Unhandled type. Halt "fast" selection and bail.
1489 return false;
1490
1491 // Check if the destination type is legal.
1492 if (!TLI.isTypeLegal(DstVT))
1493 return false;
1494
1495 // Check if the source operand is legal.
1496 if (!TLI.isTypeLegal(SrcVT))
1497 return false;
1498
1499 Register InputReg = getRegForValue(I->getOperand(0));
1500 if (!InputReg)
1501 // Unhandled operand. Halt "fast" selection and bail.
1502 return false;
1503
1504 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1505 Opcode, InputReg);
1506 if (!ResultReg)
1507 return false;
1508
1509 updateValueMap(I, ResultReg);
1510 return true;
1511}
1512
1514 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1515 EVT DstEVT = TLI.getValueType(DL, I->getType());
1516 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1517 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1518 // Unhandled type. Halt "fast" selection and bail.
1519 return false;
1520
1521 MVT SrcVT = SrcEVT.getSimpleVT();
1522 MVT DstVT = DstEVT.getSimpleVT();
1523 Register Op0 = getRegForValue(I->getOperand(0));
1524 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1525 return false;
1526
1527 // If the bitcast doesn't change the type, just use the operand value.
1528 if (SrcVT == DstVT) {
1529 updateValueMap(I, Op0);
1530 return true;
1531 }
1532
1533 // Otherwise, select a BITCAST opcode.
1534 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1535 if (!ResultReg)
1536 return false;
1537
1538 updateValueMap(I, ResultReg);
1539 return true;
1540}
1541
1543 Register Reg = getRegForValue(I->getOperand(0));
1544 if (!Reg)
1545 // Unhandled operand.
1546 return false;
1547
1548 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1549 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1550 // Unhandled type, bail out.
1551 return false;
1552
1553 MVT Ty = ETy.getSimpleVT();
1554 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1555 Register ResultReg = createResultReg(TyRegClass);
1557 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1558
1559 updateValueMap(I, ResultReg);
1560 return true;
1561}
1562
1563// Remove local value instructions starting from the instruction after
1564// SavedLastLocalValue to the current function insert point.
1565void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1566{
1567 MachineInstr *CurLastLocalValue = getLastLocalValue();
1568 if (CurLastLocalValue != SavedLastLocalValue) {
1569 // Find the first local value instruction to be deleted.
1570 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1571 // Otherwise it's the first instruction in the block.
1572 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1573 if (SavedLastLocalValue)
1574 ++FirstDeadInst;
1575 else
1576 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1577 setLastLocalValue(SavedLastLocalValue);
1578 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1579 }
1580}
1581
1583 // Flush the local value map before starting each instruction.
1584 // This improves locality and debugging, and can reduce spills.
1585 // Reuse of values across IR instructions is relatively uncommon.
1586 flushLocalValueMap();
1587
1588 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1589 // Just before the terminator instruction, insert instructions to
1590 // feed PHI nodes in successor blocks.
1591 if (I->isTerminator()) {
1592 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1593 // PHI node handling may have generated local value instructions,
1594 // even though it failed to handle all PHI nodes.
1595 // We remove these instructions because SelectionDAGISel will generate
1596 // them again.
1597 removeDeadLocalValueCode(SavedLastLocalValue);
1598 return false;
1599 }
1600 }
1601
1602 // FastISel does not handle any operand bundles except OB_funclet.
1603 if (auto *Call = dyn_cast<CallBase>(I))
1604 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1605 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1606 return false;
1607
1608 MIMD = MIMetadata(*I);
1609
1610 SavedInsertPt = FuncInfo.InsertPt;
1611
1612 if (const auto *Call = dyn_cast<CallInst>(I)) {
1613 const Function *F = Call->getCalledFunction();
1614 LibFunc Func;
1615
1616 // As a special case, don't handle calls to builtin library functions that
1617 // may be translated directly to target instructions.
1618 if (F && !F->hasLocalLinkage() && F->hasName() &&
1619 LibInfo->getLibFunc(F->getName(), Func) &&
1621 return false;
1622
1623 // Don't handle Intrinsic::trap if a trap function is specified.
1624 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1625 Call->hasFnAttr("trap-func-name"))
1626 return false;
1627 }
1628
1629 // First, try doing target-independent selection.
1631 if (selectOperator(I, I->getOpcode())) {
1632 ++NumFastIselSuccessIndependent;
1633 MIMD = {};
1634 return true;
1635 }
1636 // Remove dead code.
1638 if (SavedInsertPt != FuncInfo.InsertPt)
1639 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1640 SavedInsertPt = FuncInfo.InsertPt;
1641 }
1642 // Next, try calling the target to attempt to handle the instruction.
1643 if (fastSelectInstruction(I)) {
1644 ++NumFastIselSuccessTarget;
1645 MIMD = {};
1646 return true;
1647 }
1648 // Remove dead code.
1650 if (SavedInsertPt != FuncInfo.InsertPt)
1651 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1652
1653 MIMD = {};
1654 // Undo phi node updates, because they will be added again by SelectionDAG.
1655 if (I->isTerminator()) {
1656 // PHI node handling may have generated local value instructions.
1657 // We remove them because SelectionDAGISel will generate them again.
1658 removeDeadLocalValueCode(SavedLastLocalValue);
1660 }
1661 return false;
1662}
1663
1664/// Emit an unconditional branch to the given block, unless it is the immediate
1665/// (fall-through) successor, and update the CFG.
1667 const DebugLoc &DbgLoc) {
1668 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock();
1669 bool BlockHasMultipleInstrs = &BB->front() != &BB->back();
1670 // Handle legacy case of debug intrinsics
1671 if (BlockHasMultipleInstrs && !BB->getModule()->IsNewDbgInfoFormat)
1672 BlockHasMultipleInstrs = BB->sizeWithoutDebug() > 1;
1673 if (BlockHasMultipleInstrs && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1674 // For more accurate line information if this is the only non-debug
1675 // instruction in the block then emit it, otherwise we have the
1676 // unconditional fall-through case, which needs no instructions.
1677 } else {
1678 // The unconditional branch case.
1679 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1681 }
1682 if (FuncInfo.BPI) {
1686 } else
1688}
1689
1691 MachineBasicBlock *TrueMBB,
1692 MachineBasicBlock *FalseMBB) {
1693 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1694 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1695 // successor/predecessor lists.
1696 if (TrueMBB != FalseMBB) {
1697 if (FuncInfo.BPI) {
1698 auto BranchProbability =
1699 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1701 } else
1703 }
1704
1705 fastEmitBranch(FalseMBB, MIMD.getDL());
1706}
1707
1708/// Emit an FNeg operation.
1709bool FastISel::selectFNeg(const User *I, const Value *In) {
1710 Register OpReg = getRegForValue(In);
1711 if (!OpReg)
1712 return false;
1713
1714 // If the target has ISD::FNEG, use it.
1715 EVT VT = TLI.getValueType(DL, I->getType());
1716 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1717 OpReg);
1718 if (ResultReg) {
1719 updateValueMap(I, ResultReg);
1720 return true;
1721 }
1722
1723 // Bitcast the value to integer, twiddle the sign bit with xor,
1724 // and then bitcast it back to floating-point.
1725 if (VT.getSizeInBits() > 64)
1726 return false;
1727 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1728 if (!TLI.isTypeLegal(IntVT))
1729 return false;
1730
1731 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1732 ISD::BITCAST, OpReg);
1733 if (!IntReg)
1734 return false;
1735
1736 Register IntResultReg = fastEmit_ri_(
1737 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1738 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1739 if (!IntResultReg)
1740 return false;
1741
1742 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1743 IntResultReg);
1744 if (!ResultReg)
1745 return false;
1746
1747 updateValueMap(I, ResultReg);
1748 return true;
1749}
1750
1752 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1753 if (!EVI)
1754 return false;
1755
1756 // Make sure we only try to handle extracts with a legal result. But also
1757 // allow i1 because it's easy.
1758 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1759 if (!RealVT.isSimple())
1760 return false;
1761 MVT VT = RealVT.getSimpleVT();
1762 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1763 return false;
1764
1765 const Value *Op0 = EVI->getOperand(0);
1766 Type *AggTy = Op0->getType();
1767
1768 // Get the base result register.
1769 unsigned ResultReg;
1771 if (I != FuncInfo.ValueMap.end())
1772 ResultReg = I->second;
1773 else if (isa<Instruction>(Op0))
1774 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1775 else
1776 return false; // fast-isel can't handle aggregate constants at the moment
1777
1778 // Get the actual result register, which is an offset from the base register.
1779 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1780
1781 SmallVector<EVT, 4> AggValueVTs;
1782 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1783
1784 for (unsigned i = 0; i < VTIndex; i++)
1785 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1786
1787 updateValueMap(EVI, ResultReg);
1788 return true;
1789}
1790
1791bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1792 switch (Opcode) {
1793 case Instruction::Add:
1794 return selectBinaryOp(I, ISD::ADD);
1795 case Instruction::FAdd:
1796 return selectBinaryOp(I, ISD::FADD);
1797 case Instruction::Sub:
1798 return selectBinaryOp(I, ISD::SUB);
1799 case Instruction::FSub:
1800 return selectBinaryOp(I, ISD::FSUB);
1801 case Instruction::Mul:
1802 return selectBinaryOp(I, ISD::MUL);
1803 case Instruction::FMul:
1804 return selectBinaryOp(I, ISD::FMUL);
1805 case Instruction::SDiv:
1806 return selectBinaryOp(I, ISD::SDIV);
1807 case Instruction::UDiv:
1808 return selectBinaryOp(I, ISD::UDIV);
1809 case Instruction::FDiv:
1810 return selectBinaryOp(I, ISD::FDIV);
1811 case Instruction::SRem:
1812 return selectBinaryOp(I, ISD::SREM);
1813 case Instruction::URem:
1814 return selectBinaryOp(I, ISD::UREM);
1815 case Instruction::FRem:
1816 return selectBinaryOp(I, ISD::FREM);
1817 case Instruction::Shl:
1818 return selectBinaryOp(I, ISD::SHL);
1819 case Instruction::LShr:
1820 return selectBinaryOp(I, ISD::SRL);
1821 case Instruction::AShr:
1822 return selectBinaryOp(I, ISD::SRA);
1823 case Instruction::And:
1824 return selectBinaryOp(I, ISD::AND);
1825 case Instruction::Or:
1826 return selectBinaryOp(I, ISD::OR);
1827 case Instruction::Xor:
1828 return selectBinaryOp(I, ISD::XOR);
1829
1830 case Instruction::FNeg:
1831 return selectFNeg(I, I->getOperand(0));
1832
1833 case Instruction::GetElementPtr:
1834 return selectGetElementPtr(I);
1835
1836 case Instruction::Br: {
1837 const BranchInst *BI = cast<BranchInst>(I);
1838
1839 if (BI->isUnconditional()) {
1840 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1841 MachineBasicBlock *MSucc = FuncInfo.getMBB(LLVMSucc);
1842 fastEmitBranch(MSucc, BI->getDebugLoc());
1843 return true;
1844 }
1845
1846 // Conditional branches are not handed yet.
1847 // Halt "fast" selection and bail.
1848 return false;
1849 }
1850
1851 case Instruction::Unreachable:
1853 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1854 else
1855 return true;
1856
1857 case Instruction::Alloca:
1858 // FunctionLowering has the static-sized case covered.
1859 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1860 return true;
1861
1862 // Dynamic-sized alloca is not handled yet.
1863 return false;
1864
1865 case Instruction::Call:
1866 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1867 // callee of the direct function call instruction will be mapped to the
1868 // symbol for the function's entry point, which is distinct from the
1869 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1870 // name is the C-linkage name of the source level function.
1871 // But fast isel still has the ability to do selection for intrinsics.
1872 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1873 return false;
1874 return selectCall(I);
1875
1876 case Instruction::BitCast:
1877 return selectBitCast(I);
1878
1879 case Instruction::FPToSI:
1880 return selectCast(I, ISD::FP_TO_SINT);
1881 case Instruction::ZExt:
1882 return selectCast(I, ISD::ZERO_EXTEND);
1883 case Instruction::SExt:
1884 return selectCast(I, ISD::SIGN_EXTEND);
1885 case Instruction::Trunc:
1886 return selectCast(I, ISD::TRUNCATE);
1887 case Instruction::SIToFP:
1888 return selectCast(I, ISD::SINT_TO_FP);
1889
1890 case Instruction::IntToPtr: // Deliberate fall-through.
1891 case Instruction::PtrToInt: {
1892 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1893 EVT DstVT = TLI.getValueType(DL, I->getType());
1894 if (DstVT.bitsGT(SrcVT))
1895 return selectCast(I, ISD::ZERO_EXTEND);
1896 if (DstVT.bitsLT(SrcVT))
1897 return selectCast(I, ISD::TRUNCATE);
1898 Register Reg = getRegForValue(I->getOperand(0));
1899 if (!Reg)
1900 return false;
1901 updateValueMap(I, Reg);
1902 return true;
1903 }
1904
1905 case Instruction::ExtractValue:
1906 return selectExtractValue(I);
1907
1908 case Instruction::Freeze:
1909 return selectFreeze(I);
1910
1911 case Instruction::PHI:
1912 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1913
1914 default:
1915 // Unhandled instruction. Halt "fast" selection and bail.
1916 return false;
1917 }
1918}
1919
1923 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1924 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1925 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1926 TII(*MF->getSubtarget().getInstrInfo()),
1927 TLI(*MF->getSubtarget().getTargetLowering()),
1928 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1930
1931FastISel::~FastISel() = default;
1932
1933bool FastISel::fastLowerArguments() { return false; }
1934
1935bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1936
1938 return false;
1939}
1940
1941unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1942
1943unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1944 return 0;
1945}
1946
1947unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1948 unsigned /*Op1*/) {
1949 return 0;
1950}
1951
1952unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1953 return 0;
1954}
1955
1956unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1957 const ConstantFP * /*FPImm*/) {
1958 return 0;
1959}
1960
1961unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1962 uint64_t /*Imm*/) {
1963 return 0;
1964}
1965
1966/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1967/// instruction with an immediate operand using fastEmit_ri.
1968/// If that fails, it materializes the immediate into a register and try
1969/// fastEmit_rr instead.
1970Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1971 uint64_t Imm, MVT ImmType) {
1972 // If this is a multiply by a power of two, emit this as a shift left.
1973 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1974 Opcode = ISD::SHL;
1975 Imm = Log2_64(Imm);
1976 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1977 // div x, 8 -> srl x, 3
1978 Opcode = ISD::SRL;
1979 Imm = Log2_64(Imm);
1980 }
1981
1982 // Horrible hack (to be removed), check to make sure shift amounts are
1983 // in-range.
1984 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1985 Imm >= VT.getSizeInBits())
1986 return 0;
1987
1988 // First check if immediate type is legal. If not, we can't use the ri form.
1989 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1990 if (ResultReg)
1991 return ResultReg;
1992 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1993 if (!MaterialReg) {
1994 // This is a bit ugly/slow, but failing here means falling out of
1995 // fast-isel, which would be very slow.
1996 IntegerType *ITy =
1998 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1999 if (!MaterialReg)
2000 return 0;
2001 }
2002 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
2003}
2004
2006 return MRI.createVirtualRegister(RC);
2007}
2008
2010 unsigned OpNum) {
2011 if (Op.isVirtual()) {
2012 const TargetRegisterClass *RegClass =
2013 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2014 if (!MRI.constrainRegClass(Op, RegClass)) {
2015 // If it's not legal to COPY between the register classes, something
2016 // has gone very wrong before we got here.
2017 Register NewOp = createResultReg(RegClass);
2019 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2020 return NewOp;
2021 }
2022 }
2023 return Op;
2024}
2025
2026Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2027 const TargetRegisterClass *RC) {
2028 Register ResultReg = createResultReg(RC);
2029 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2030
2031 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
2032 return ResultReg;
2033}
2034
2035Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2036 const TargetRegisterClass *RC, unsigned Op0) {
2037 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2038
2039 Register ResultReg = createResultReg(RC);
2040 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2041
2042 if (II.getNumDefs() >= 1)
2043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2044 .addReg(Op0);
2045 else {
2047 .addReg(Op0);
2048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2049 ResultReg)
2050 .addReg(II.implicit_defs()[0]);
2051 }
2052
2053 return ResultReg;
2054}
2055
2056Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2057 const TargetRegisterClass *RC, unsigned Op0,
2058 unsigned Op1) {
2059 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2060
2061 Register ResultReg = createResultReg(RC);
2062 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2063 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2064
2065 if (II.getNumDefs() >= 1)
2066 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2067 .addReg(Op0)
2068 .addReg(Op1);
2069 else {
2071 .addReg(Op0)
2072 .addReg(Op1);
2073 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2074 ResultReg)
2075 .addReg(II.implicit_defs()[0]);
2076 }
2077 return ResultReg;
2078}
2079
2080Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2081 const TargetRegisterClass *RC, unsigned Op0,
2082 unsigned Op1, unsigned Op2) {
2083 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2084
2085 Register ResultReg = createResultReg(RC);
2086 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2087 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2088 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2089
2090 if (II.getNumDefs() >= 1)
2091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2092 .addReg(Op0)
2093 .addReg(Op1)
2094 .addReg(Op2);
2095 else {
2097 .addReg(Op0)
2098 .addReg(Op1)
2099 .addReg(Op2);
2100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2101 ResultReg)
2102 .addReg(II.implicit_defs()[0]);
2103 }
2104 return ResultReg;
2105}
2106
2107Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2108 const TargetRegisterClass *RC, unsigned Op0,
2109 uint64_t Imm) {
2110 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2111
2112 Register ResultReg = createResultReg(RC);
2113 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2114
2115 if (II.getNumDefs() >= 1)
2116 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2117 .addReg(Op0)
2118 .addImm(Imm);
2119 else {
2121 .addReg(Op0)
2122 .addImm(Imm);
2123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2124 ResultReg)
2125 .addReg(II.implicit_defs()[0]);
2126 }
2127 return ResultReg;
2128}
2129
2130Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2131 const TargetRegisterClass *RC, unsigned Op0,
2132 uint64_t Imm1, uint64_t Imm2) {
2133 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2134
2135 Register ResultReg = createResultReg(RC);
2136 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2137
2138 if (II.getNumDefs() >= 1)
2139 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2140 .addReg(Op0)
2141 .addImm(Imm1)
2142 .addImm(Imm2);
2143 else {
2145 .addReg(Op0)
2146 .addImm(Imm1)
2147 .addImm(Imm2);
2148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2149 ResultReg)
2150 .addReg(II.implicit_defs()[0]);
2151 }
2152 return ResultReg;
2153}
2154
2155Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2156 const TargetRegisterClass *RC,
2157 const ConstantFP *FPImm) {
2158 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2159
2160 Register ResultReg = createResultReg(RC);
2161
2162 if (II.getNumDefs() >= 1)
2163 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2164 .addFPImm(FPImm);
2165 else {
2167 .addFPImm(FPImm);
2168 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2169 ResultReg)
2170 .addReg(II.implicit_defs()[0]);
2171 }
2172 return ResultReg;
2173}
2174
2175Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2176 const TargetRegisterClass *RC, unsigned Op0,
2177 unsigned Op1, uint64_t Imm) {
2178 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2179
2180 Register ResultReg = createResultReg(RC);
2181 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2182 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2183
2184 if (II.getNumDefs() >= 1)
2185 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2186 .addReg(Op0)
2187 .addReg(Op1)
2188 .addImm(Imm);
2189 else {
2191 .addReg(Op0)
2192 .addReg(Op1)
2193 .addImm(Imm);
2194 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2195 ResultReg)
2196 .addReg(II.implicit_defs()[0]);
2197 }
2198 return ResultReg;
2199}
2200
2201Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2202 const TargetRegisterClass *RC, uint64_t Imm) {
2203 Register ResultReg = createResultReg(RC);
2204 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2205
2206 if (II.getNumDefs() >= 1)
2207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2208 .addImm(Imm);
2209 else {
2211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2212 ResultReg)
2213 .addReg(II.implicit_defs()[0]);
2214 }
2215 return ResultReg;
2216}
2217
2219 uint32_t Idx) {
2220 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2222 "Cannot yet extract from physregs");
2223 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2225 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2226 ResultReg).addReg(Op0, 0, Idx);
2227 return ResultReg;
2228}
2229
2230/// Emit MachineInstrs to compute the value of Op with all but the least
2231/// significant bit set to zero.
2233 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2234}
2235
2236/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2237/// Emit code to ensure constants are copied into registers when needed.
2238/// Remember the virtual registers that need to be added to the Machine PHI
2239/// nodes as input. We cannot just directly add them, because expansion
2240/// might result in multiple MBB's for one BB. As such, the start of the
2241/// BB might correspond to a different MBB than the end.
2242bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2245
2246 // Check successor nodes' PHI nodes that expect a constant to be available
2247 // from this block.
2248 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2249 if (!isa<PHINode>(SuccBB->begin()))
2250 continue;
2251 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
2252
2253 // If this terminator has multiple identical successors (common for
2254 // switches), only handle each succ once.
2255 if (!SuccsHandled.insert(SuccMBB).second)
2256 continue;
2257
2259
2260 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2261 // nodes and Machine PHI nodes, but the incoming operands have not been
2262 // emitted yet.
2263 for (const PHINode &PN : SuccBB->phis()) {
2264 // Ignore dead phi's.
2265 if (PN.use_empty())
2266 continue;
2267
2268 // Only handle legal types. Two interesting things to note here. First,
2269 // by bailing out early, we may leave behind some dead instructions,
2270 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2271 // own moves. Second, this check is necessary because FastISel doesn't
2272 // use CreateRegs to create registers, so it always creates
2273 // exactly one register for each non-void instruction.
2274 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2275 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2276 // Handle integer promotions, though, because they're common and easy.
2277 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2279 return false;
2280 }
2281 }
2282
2283 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2284
2285 // Set the DebugLoc for the copy. Use the location of the operand if
2286 // there is one; otherwise no location, flushLocalValueMap will fix it.
2287 MIMD = {};
2288 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2289 MIMD = MIMetadata(*Inst);
2290
2291 Register Reg = getRegForValue(PHIOp);
2292 if (!Reg) {
2294 return false;
2295 }
2296 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2297 MIMD = {};
2298 }
2299 }
2300
2301 return true;
2302}
2303
2304bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2305 assert(LI->hasOneUse() &&
2306 "tryToFoldLoad expected a LoadInst with a single use");
2307 // We know that the load has a single use, but don't know what it is. If it
2308 // isn't one of the folded instructions, then we can't succeed here. Handle
2309 // this by scanning the single-use users of the load until we get to FoldInst.
2310 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2311
2312 const Instruction *TheUser = LI->user_back();
2313 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2314 // Stay in the right block.
2315 TheUser->getParent() == FoldInst->getParent() &&
2316 --MaxUsers) { // Don't scan too far.
2317 // If there are multiple or no uses of this instruction, then bail out.
2318 if (!TheUser->hasOneUse())
2319 return false;
2320
2321 TheUser = TheUser->user_back();
2322 }
2323
2324 // If we didn't find the fold instruction, then we failed to collapse the
2325 // sequence.
2326 if (TheUser != FoldInst)
2327 return false;
2328
2329 // Don't try to fold volatile loads. Target has to deal with alignment
2330 // constraints.
2331 if (LI->isVolatile())
2332 return false;
2333
2334 // Figure out which vreg this is going into. If there is no assigned vreg yet
2335 // then there actually was no reference to it. Perhaps the load is referenced
2336 // by a dead instruction.
2337 Register LoadReg = getRegForValue(LI);
2338 if (!LoadReg)
2339 return false;
2340
2341 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2342 // may mean that the instruction got lowered to multiple MIs, or the use of
2343 // the loaded value ended up being multiple operands of the result.
2344 if (!MRI.hasOneUse(LoadReg))
2345 return false;
2346
2347 // If the register has fixups, there may be additional uses through a
2348 // different alias of the register.
2349 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2350 return false;
2351
2353 MachineInstr *User = RI->getParent();
2354
2355 // Set the insertion point properly. Folding the load can cause generation of
2356 // other random instructions (like sign extends) for addressing modes; make
2357 // sure they get inserted in a logical place before the new instruction.
2359 FuncInfo.MBB = User->getParent();
2360
2361 // Ask the target to try folding the load.
2362 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2363}
2364
2366 // Must be an add.
2367 if (!isa<AddOperator>(Add))
2368 return false;
2369 // Type size needs to match.
2370 if (DL.getTypeSizeInBits(GEP->getType()) !=
2371 DL.getTypeSizeInBits(Add->getType()))
2372 return false;
2373 // Must be in the same basic block.
2374 if (isa<Instruction>(Add) &&
2375 FuncInfo.getMBB(cast<Instruction>(Add)->getParent()) != FuncInfo.MBB)
2376 return false;
2377 // Must have a constant operand.
2378 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2379}
2380
2383 const Value *Ptr;
2384 Type *ValTy;
2385 MaybeAlign Alignment;
2387 bool IsVolatile;
2388
2389 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2390 Alignment = LI->getAlign();
2391 IsVolatile = LI->isVolatile();
2393 Ptr = LI->getPointerOperand();
2394 ValTy = LI->getType();
2395 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2396 Alignment = SI->getAlign();
2397 IsVolatile = SI->isVolatile();
2399 Ptr = SI->getPointerOperand();
2400 ValTy = SI->getValueOperand()->getType();
2401 } else
2402 return nullptr;
2403
2404 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2405 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2406 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2407 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2408
2409 AAMDNodes AAInfo = I->getAAMetadata();
2410
2411 if (!Alignment) // Ensure that codegen never sees alignment 0.
2412 Alignment = DL.getABITypeAlign(ValTy);
2413
2414 unsigned Size = DL.getTypeStoreSize(ValTy);
2415
2416 if (IsVolatile)
2418 if (IsNonTemporal)
2420 if (IsDereferenceable)
2422 if (IsInvariant)
2424
2426 *Alignment, AAInfo, Ranges);
2427}
2428
2430 // If both operands are the same, then try to optimize or fold the cmp.
2431 CmpInst::Predicate Predicate = CI->getPredicate();
2432 if (CI->getOperand(0) != CI->getOperand(1))
2433 return Predicate;
2434
2435 switch (Predicate) {
2436 default: llvm_unreachable("Invalid predicate!");
2437 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2438 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2439 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2440 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2441 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2442 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2443 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2444 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2445 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2446 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2447 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2448 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2449 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2450 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2451 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2452 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2453
2454 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2455 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2456 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2457 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2458 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2459 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2460 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2461 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2462 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2463 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2464 }
2465
2466 return Predicate;
2467}
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition: FastISel.cpp:161
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition: FastISel.cpp:178
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:944
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:23
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:378
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction & front() const
Definition: BasicBlock.h:471
filter_iterator< BasicBlock::const_iterator, std::function< bool(constInstruction &)> >::difference_type sizeWithoutDebug() const
Return the size of the basic block ignoring debug instructions.
Definition: BasicBlock.cpp:270
const Instruction & back() const
Definition: BasicBlock.h:473
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1391
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:774
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:759
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:695
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:846
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:838
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:429
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
A debug info location.
Definition: DebugLoc.h:33
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition: FastISel.h:205
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2218
const TargetLibraryInfo * LibInfo
Definition: FastISel.h:214
const DataLayout & DL
Definition: FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:532
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:237
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:643
bool selectExtractValue(const User *U)
Definition: FastISel.cpp:1751
DenseMap< const Value *, Register > LocalValueMap
Definition: FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
Definition: FastISel.cpp:1666
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition: FastISel.h:480
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition: FastISel.h:226
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:902
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:2026
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1937
virtual bool lowerDbgDeclare(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1323
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition: FastISel.h:233
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1115
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1961
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:437
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:966
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:2035
void handleDbgInfo(const Instruction *II)
Target-independent lowering of non-instruction debug info associated with this instruction.
Definition: FastISel.cpp:1193
bool selectFreeze(const User *I)
Definition: FastISel.cpp:1542
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1382
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1482
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2304
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition: FastISel.cpp:239
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:411
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition: FastISel.cpp:124
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2382
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2201
MachineFrameInfo & MFI
Definition: FastISel.h:206
MachineFunction * MF
Definition: FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2365
virtual bool lowerDbgValue(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
Definition: FastISel.cpp:1237
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1941
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1582
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1935
bool selectXRayTypedEvent(const CallInst *II)
Definition: FastISel.cpp:921
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2056
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:2130
Register createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:2005
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1956
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1933
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
Definition: FastISel.cpp:1709
const TargetInstrInfo & TII
Definition: FastISel.h:211
bool selectCall(const User *I)
Definition: FastISel.cpp:1156
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:352
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2429
void finishBasicBlock()
Flush the local value map.
Definition: FastISel.cpp:137
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:203
Register getRegForGEPIndex(const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:384
MachineConstantPool & MCP
Definition: FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
Definition: FastISel.cpp:1791
bool SkipTargetIndependentISel
Definition: FastISel.h:215
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:2155
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:2009
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:363
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:445
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1920
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:755
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:402
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1947
const TargetMachine & TM
Definition: FastISel.h:209
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1970
Register fastEmitZExtFromI1(MVT VT, unsigned Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Definition: FastISel.cpp:2232
MIMetadata MIMD
Definition: FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition: FastISel.h:221
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:2175
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:139
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:431
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1690
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1513
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:2107
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1943
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:476
virtual ~FastISel()
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1952
const TargetRegisterInfo & TRI
Definition: FastISel.h:213
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, unsigned Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
Definition: FastISel.cpp:2080
TargetLoweringBase::ArgListEntry ArgListEntry
Definition: FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseSet< Register > RegsWithFixups
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
Register InitializeRegForValue(const Value *V)
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
DenseMap< Register, Register > RegFixups
RegFixups - Registers which need to be replaced after isel is done.
MachineRegisterInfo * RegInfo
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Class to represent function types.
Definition: DerivedTypes.h:103
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:769
arg_iterator arg_end()
Definition: Function.h:875
arg_iterator arg_begin()
Definition: Function.h:866
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:169
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
An instruction for reading from memory.
Definition: Instructions.h:174
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:203
Context object for machine code objects.
Definition: MCContext.h:83
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:213
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:1069
Set of metadata that should be preserved when using BuildMI().
const DebugLoc & getDL() const
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void addSuccessorWithoutProb(MachineBasicBlock *Succ)
Add Succ as a successor of this MachineBasicBlock.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineInstrBundleIterator< MachineInstr > iterator
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
reg_iterator reg_begin(Register RegNo) const
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
ArrayRef< std::pair< MCRegister, Register > > liveins() const
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:120
bool IsNewDbgInfoFormat
Is this Module using intrinsics to record the position of debugging information, or non-intrinsic rec...
Definition: Module.h:217
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:600
Class to represent struct types.
Definition: DerivedTypes.h:216
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
unsigned getCallFrameDestroyOpcode() const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
const Triple & getTargetTriple() const
TargetOptions Options
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isOSAIX() const
Tests whether the OS is AIX.
Definition: Triple.h:710
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:911
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:840
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:953
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:804
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:980
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:734
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:810
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:886
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1276
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:816
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
Definition: ISDOpcodes.h:1495
Reg
All possible values of the reg field in the ModR/M byte.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:147
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:227
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:307
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ Add
Sum of integers.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:535
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
gep_type_iterator gep_type_begin(const User *GEP)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:258
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:275
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:291
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:359
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:275
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition: FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition: FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition: FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition: FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117