LLVM 22.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Module.h"
90#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/User.h"
94#include "llvm/IR/Value.h"
95#include "llvm/MC/MCContext.h"
96#include "llvm/MC/MCInstrDesc.h"
98#include "llvm/Support/Debug.h"
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <optional>
108#include <utility>
109
110using namespace llvm;
111using namespace PatternMatch;
112
113#define DEBUG_TYPE "isel"
114
115STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121/// Set the current block to which generated machine instructions will be
122/// appended.
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
134}
135
136void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
139 if (!FuncInfo.CanLowerReturn)
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156}
157
158/// Return the defined register if this instruction defines exactly one
159/// virtual register and uses no other virtual registers. Otherwise return
160/// Register();
162 Register RegDef;
163 for (const MachineOperand &MO : MI.operands()) {
164 if (!MO.isReg())
165 continue;
166 if (MO.isDef()) {
167 if (RegDef)
168 return Register();
169 RegDef = MO.getReg();
170 } else if (MO.getReg().isVirtual()) {
171 // This is another use of a vreg. Don't delete it.
172 return Register();
173 }
174 }
175 return RegDef;
176}
177
178static bool isRegUsedByPhiNodes(Register DefReg,
179 FunctionLoweringInfo &FuncInfo) {
180 for (auto &P : FuncInfo.PHINodesToUpdate)
181 if (P.second == DefReg)
182 return true;
183 return false;
184}
185
186void FastISel::flushLocalValueMap() {
187 // If FastISel bails out, it could leave local value instructions behind
188 // that aren't used for anything. Detect and erase those.
190 // Save the first instruction after local values, for later.
192 ++FirstNonValue;
193
196 : FuncInfo.MBB->rend();
198 for (MachineInstr &LocalMI :
200 Register DefReg = findLocalRegDef(LocalMI);
201 if (!DefReg)
202 continue;
203 if (FuncInfo.RegsWithFixups.count(DefReg))
204 continue;
205 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
206 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
207 if (EmitStartPt == &LocalMI)
208 EmitStartPt = EmitStartPt->getPrevNode();
209 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
210 << LocalMI);
211 LocalMI.eraseFromParent();
212 }
213 }
214
215 if (FirstNonValue != FuncInfo.MBB->end()) {
216 // See if there are any local value instructions left. If so, we want to
217 // make sure the first one has a debug location; if it doesn't, use the
218 // first non-value instruction's debug location.
219
220 // If EmitStartPt is non-null, this block had copies at the top before
221 // FastISel started doing anything; it points to the last one, so the
222 // first local value instruction is the one after EmitStartPt.
223 // If EmitStartPt is null, the first local value instruction is at the
224 // top of the block.
225 MachineBasicBlock::iterator FirstLocalValue =
227 : FuncInfo.MBB->begin();
228 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
229 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
230 }
231 }
232
233 LocalValueMap.clear();
236 SavedInsertPt = FuncInfo.InsertPt;
237}
238
240 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
241 // Don't handle non-simple values in FastISel.
242 if (!RealVT.isSimple())
243 return Register();
244
245 // Ignore illegal types. We must do this before looking up the value
246 // in ValueMap because Arguments are given virtual registers regardless
247 // of whether FastISel can handle them.
248 MVT VT = RealVT.getSimpleVT();
249 if (!TLI.isTypeLegal(VT)) {
250 // Handle integer promotions, though, because they're common and easy.
251 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
252 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
253 else
254 return Register();
255 }
256
257 // Look up the value to see if we already have a register for it.
259 if (Reg)
260 return Reg;
261
262 // In bottom-up mode, just create the virtual register which will be used
263 // to hold the value. It will be materialized later.
264 if (isa<Instruction>(V) &&
265 (!isa<AllocaInst>(V) ||
266 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
267 return FuncInfo.InitializeRegForValue(V);
268
269 SavePoint SaveInsertPt = enterLocalValueArea();
270
271 // Materialize the value in a register. Emit any instructions in the
272 // local value area.
273 Reg = materializeRegForValue(V, VT);
274
275 leaveLocalValueArea(SaveInsertPt);
276
277 return Reg;
278}
279
280Register FastISel::materializeConstant(const Value *V, MVT VT) {
281 Register Reg;
282 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
283 if (CI->getValue().getActiveBits() <= 64)
284 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
285 } else if (isa<AllocaInst>(V))
287 else if (isa<ConstantPointerNull>(V))
288 // Translate this as an integer zero so that it can be
289 // local-CSE'd with actual integer zeros.
290 Reg =
291 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
292 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
293 if (CF->isNullValue())
295 else
296 // Try to emit the constant directly.
297 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
298
299 if (!Reg) {
300 // Try to emit the constant by using an integer constant with a cast.
301 const APFloat &Flt = CF->getValueAPF();
302 EVT IntVT = TLI.getPointerTy(DL);
303 uint32_t IntBitWidth = IntVT.getSizeInBits();
304 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
305 bool isExact;
306 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
307 if (isExact) {
308 Register IntegerReg =
309 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
310 if (IntegerReg)
312 IntegerReg);
313 }
314 }
315 } else if (const auto *Op = dyn_cast<Operator>(V)) {
316 if (!selectOperator(Op, Op->getOpcode()))
317 if (!isa<Instruction>(Op) ||
319 return Register();
321 } else if (isa<UndefValue>(V)) {
322 Reg = createResultReg(TLI.getRegClassFor(VT));
323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
324 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
325 }
326 return Reg;
327}
328
329/// Helper for getRegForValue. This function is called when the value isn't
330/// already available in a register and must be materialized with new
331/// instructions.
332Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
334 // Give the target-specific code a try first.
335 if (isa<Constant>(V))
337
338 // If target-specific code couldn't or didn't want to handle the value, then
339 // give target-independent code a try.
340 if (!Reg)
341 Reg = materializeConstant(V, VT);
342
343 // Don't cache constant materializations in the general ValueMap.
344 // To do so would require tracking what uses they dominate.
345 if (Reg) {
347 LastLocalValue = MRI.getVRegDef(Reg);
348 }
349 return Reg;
350}
351
353 // Look up the value to see if we already have a register for it. We
354 // cache values defined by Instructions across blocks, and other values
355 // only locally. This is because Instructions already have the SSA
356 // def-dominates-use requirement enforced.
358 if (I != FuncInfo.ValueMap.end())
359 return I->second;
360 return LocalValueMap[V];
361}
362
363void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
364 if (!isa<Instruction>(I)) {
365 LocalValueMap[I] = Reg;
366 return;
367 }
368
369 Register &AssignedReg = FuncInfo.ValueMap[I];
370 if (!AssignedReg)
371 // Use the new register.
372 AssignedReg = Reg;
373 else if (Reg != AssignedReg) {
374 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
375 for (unsigned i = 0; i < NumRegs; i++) {
376 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
377 FuncInfo.RegsWithFixups.insert(Reg + i);
378 }
379
380 AssignedReg = Reg;
381 }
382}
383
385 Register IdxN = getRegForValue(Idx);
386 if (!IdxN)
387 // Unhandled operand. Halt "fast" selection and bail.
388 return Register();
389
390 // If the index is smaller or larger than intptr_t, truncate or extend it.
391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392 if (IdxVT.bitsLT(PtrVT)) {
393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394 } else if (IdxVT.bitsGT(PtrVT)) {
395 IdxN =
396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397 }
398 return IdxN;
399}
400
402 if (getLastLocalValue()) {
403 FuncInfo.InsertPt = getLastLocalValue();
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
405 ++FuncInfo.InsertPt;
406 } else
407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
408}
409
412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413 "Invalid iterator!");
414 while (I != E) {
415 if (SavedInsertPt == I)
416 SavedInsertPt = E;
417 if (EmitStartPt == I)
418 EmitStartPt = E.isValid() ? &*E : nullptr;
419 if (LastLocalValue == I)
420 LastLocalValue = E.isValid() ? &*E : nullptr;
421
422 MachineInstr *Dead = &*I;
423 ++I;
424 Dead->eraseFromParent();
425 ++NumFastIselDead;
426 }
428}
429
431 SavePoint OldInsertPt = FuncInfo.InsertPt;
433 return OldInsertPt;
434}
435
437 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
438 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439
440 // Restore the previous insert position.
441 FuncInfo.InsertPt = OldInsertPt;
442}
443
444bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446 if (VT == MVT::Other || !VT.isSimple())
447 // Unhandled type. Halt "fast" selection and bail.
448 return false;
449
450 // We only handle legal types. For example, on x86-32 the instruction
451 // selector contains all of the 64-bit instructions from x86-64,
452 // under the assumption that i64 won't be used if the target doesn't
453 // support it.
454 if (!TLI.isTypeLegal(VT)) {
455 // MVT::i1 is special. Allow AND, OR, or XOR because they
456 // don't require additional zeroing, which makes them easy.
457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
458 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
459 else
460 return false;
461 }
462
463 // Check if the first operand is a constant, and handle it as "ri". At -O0,
464 // we don't have anything that canonicalizes operand order.
465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
467 Register Op1 = getRegForValue(I->getOperand(1));
468 if (!Op1)
469 return false;
470
471 Register ResultReg =
472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
473 VT.getSimpleVT());
474 if (!ResultReg)
475 return false;
476
477 // We successfully emitted code for the given LLVM Instruction.
478 updateValueMap(I, ResultReg);
479 return true;
480 }
481
482 Register Op0 = getRegForValue(I->getOperand(0));
483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
484 return false;
485
486 // Check if the second operand is a constant and handle it appropriately.
487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
488 uint64_t Imm = CI->getSExtValue();
489
490 // Transform "sdiv exact X, 8" -> "sra X, 3".
491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
493 Imm = Log2_64(Imm);
494 ISDOpcode = ISD::SRA;
495 }
496
497 // Transform "urem x, pow2" -> "and x, pow2-1".
498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
499 isPowerOf2_64(Imm)) {
500 --Imm;
501 ISDOpcode = ISD::AND;
502 }
503
504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
505 VT.getSimpleVT());
506 if (!ResultReg)
507 return false;
508
509 // We successfully emitted code for the given LLVM Instruction.
510 updateValueMap(I, ResultReg);
511 return true;
512 }
513
514 Register Op1 = getRegForValue(I->getOperand(1));
515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
516 return false;
517
518 // Now we have both operands in registers. Emit the instruction.
519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
520 ISDOpcode, Op0, Op1);
521 if (!ResultReg)
522 // Target-specific code wasn't able to find a machine opcode for
523 // the given ISD opcode and type. Halt "fast" selection and bail.
524 return false;
525
526 // We successfully emitted code for the given LLVM Instruction.
527 updateValueMap(I, ResultReg);
528 return true;
529}
530
532 Register N = getRegForValue(I->getOperand(0));
533 if (!N) // Unhandled operand. Halt "fast" selection and bail.
534 return false;
535
536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
537 // and bail.
538 if (isa<VectorType>(I->getType()))
539 return false;
540
541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
542 // into a single N = N + TotalOffset.
543 uint64_t TotalOffs = 0;
544 // FIXME: What's a good SWAG number for MaxOffs?
545 uint64_t MaxOffs = 2048;
546 MVT VT = TLI.getValueType(DL, I->getType()).getSimpleVT();
547
549 GTI != E; ++GTI) {
550 const Value *Idx = GTI.getOperand();
551 if (StructType *StTy = GTI.getStructTypeOrNull()) {
552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553 if (Field) {
554 // N = N + Offset
555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556 if (TotalOffs >= MaxOffs) {
557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558 if (!N) // Unhandled operand. Halt "fast" selection and bail.
559 return false;
560 TotalOffs = 0;
561 }
562 }
563 } else {
564 // If this is a constant subscript, handle it quickly.
565 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
566 if (CI->isZero())
567 continue;
568 // N = N + Offset
569 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
570 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
571 if (TotalOffs >= MaxOffs) {
572 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
573 if (!N) // Unhandled operand. Halt "fast" selection and bail.
574 return false;
575 TotalOffs = 0;
576 }
577 continue;
578 }
579 if (TotalOffs) {
580 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
581 if (!N) // Unhandled operand. Halt "fast" selection and bail.
582 return false;
583 TotalOffs = 0;
584 }
585
586 // N = N + Idx * ElementSize;
587 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
588 Register IdxN = getRegForGEPIndex(VT, Idx);
589 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
590 return false;
591
592 if (ElementSize != 1) {
593 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
594 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
595 return false;
596 }
597 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
598 if (!N) // Unhandled operand. Halt "fast" selection and bail.
599 return false;
600 }
601 }
602 if (TotalOffs) {
603 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
604 if (!N) // Unhandled operand. Halt "fast" selection and bail.
605 return false;
606 }
607
608 // We successfully emitted code for the given LLVM Instruction.
610 return true;
611}
612
613bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
614 const CallInst *CI, unsigned StartIdx) {
615 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
616 Value *Val = CI->getArgOperand(i);
617 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
618 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
619 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
620 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
621 } else if (isa<ConstantPointerNull>(Val)) {
622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
623 Ops.push_back(MachineOperand::CreateImm(0));
624 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
625 // Values coming from a stack location also require a special encoding,
626 // but that is added later on by the target specific frame index
627 // elimination implementation.
628 auto SI = FuncInfo.StaticAllocaMap.find(AI);
629 if (SI != FuncInfo.StaticAllocaMap.end())
630 Ops.push_back(MachineOperand::CreateFI(SI->second));
631 else
632 return false;
633 } else {
635 if (!Reg)
636 return false;
637 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
638 }
639 }
640 return true;
641}
642
644 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
645 // [live variables...])
646 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
647 "Stackmap cannot return a value.");
648
649 // The stackmap intrinsic only records the live variables (the arguments
650 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
651 // intrinsic, this won't be lowered to a function call. This means we don't
652 // have to worry about calling conventions and target-specific lowering code.
653 // Instead we perform the call lowering right here.
654 //
655 // CALLSEQ_START(0, 0...)
656 // STACKMAP(id, nbytes, ...)
657 // CALLSEQ_END(0, 0)
658 //
660
661 // Add the <id> and <numBytes> constants.
663 "Expected a constant integer.");
664 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
665 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
666
668 "Expected a constant integer.");
669 const auto *NumBytes =
671 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
672
673 // Push live variables for the stack map (skipping the first two arguments
674 // <id> and <numBytes>).
675 if (!addStackMapLiveVars(Ops, I, 2))
676 return false;
677
678 // We are not adding any register mask info here, because the stackmap doesn't
679 // clobber anything.
680
681 // Add scratch registers as implicit def and early clobber.
682 CallingConv::ID CC = I->getCallingConv();
683 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
684 for (unsigned i = 0; ScratchRegs[i]; ++i)
686 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
687 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
688
689 // Issue CALLSEQ_START
690 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
691 auto Builder =
692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
693 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
694 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
695 Builder.addImm(0);
696
697 // Issue STACKMAP.
698 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
699 TII.get(TargetOpcode::STACKMAP));
700 for (auto const &MO : Ops)
701 MIB.add(MO);
702
703 // Issue CALLSEQ_END
704 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
706 .addImm(0)
707 .addImm(0);
708
709 // Inform the Frame Information that we have a stackmap in this function.
710 FuncInfo.MF->getFrameInfo().setHasStackMap();
711
712 return true;
713}
714
715/// Lower an argument list according to the target calling convention.
716///
717/// This is a helper for lowering intrinsics that follow a target calling
718/// convention or require stack pointer adjustment. Only a subset of the
719/// intrinsic's operands need to participate in the calling convention.
720bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
721 unsigned NumArgs, const Value *Callee,
722 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
723 ArgListTy Args;
724 Args.reserve(NumArgs);
725
726 // Populate the argument list.
727 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
728 Value *V = CI->getOperand(ArgI);
729
730 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
731
732 ArgListEntry Entry(V);
733 Entry.setAttributes(CI, ArgI);
734 Args.push_back(Entry);
735 }
736
737 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
738 : CI->getType();
739 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
740
741 return lowerCallTo(CLI);
742}
743
745 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
746 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
747 SmallString<32> MangledName;
748 Mangler::getNameWithPrefix(MangledName, Target, DL);
749 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
750 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
751}
752
754 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
755 // i32 <numBytes>,
756 // i8* <target>,
757 // i32 <numArgs>,
758 // [Args...],
759 // [live variables...])
760 CallingConv::ID CC = I->getCallingConv();
761 bool IsAnyRegCC = CC == CallingConv::AnyReg;
762 bool HasDef = !I->getType()->isVoidTy();
763 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
764
765 // Check if we can lower the return type when using anyregcc.
767 if (IsAnyRegCC && HasDef) {
768 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
769 if (ValueType == MVT::Other)
770 return false;
771 }
772
773 // Get the real number of arguments participating in the call <numArgs>
775 "Expected a constant integer.");
776 const auto *NumArgsVal =
778 unsigned NumArgs = NumArgsVal->getZExtValue();
779
780 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
781 // This includes all meta-operands up to but not including CC.
782 unsigned NumMetaOpers = PatchPointOpers::CCPos;
783 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
784 "Not enough arguments provided to the patchpoint intrinsic");
785
786 // For AnyRegCC the arguments are lowered later on manually.
787 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
789 CLI.setIsPatchPoint();
790 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
791 return false;
792
793 assert(CLI.Call && "No call instruction specified.");
794
796
797 // Add an explicit result reg if we use the anyreg calling convention.
798 if (IsAnyRegCC && HasDef) {
799 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
800 assert(ValueType.isValid());
801 CLI.ResultReg = createResultReg(TLI.getRegClassFor(ValueType));
802 CLI.NumResultRegs = 1;
803 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
804 }
805
806 // Add the <id> and <numBytes> constants.
808 "Expected a constant integer.");
809 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
810 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
811
813 "Expected a constant integer.");
814 const auto *NumBytes =
816 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
817
818 // Add the call target.
819 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
820 uint64_t CalleeConstAddr =
821 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
824 if (C->getOpcode() == Instruction::IntToPtr) {
825 uint64_t CalleeConstAddr =
826 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
827 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
828 } else
829 llvm_unreachable("Unsupported ConstantExpr.");
830 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
831 Ops.push_back(MachineOperand::CreateGA(GV, 0));
832 } else if (isa<ConstantPointerNull>(Callee))
833 Ops.push_back(MachineOperand::CreateImm(0));
834 else
835 llvm_unreachable("Unsupported callee address.");
836
837 // Adjust <numArgs> to account for any arguments that have been passed on
838 // the stack instead.
839 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
840 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
841
842 // Add the calling convention
843 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
844
845 // Add the arguments we omitted previously. The register allocator should
846 // place these in any free register.
847 if (IsAnyRegCC) {
848 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
849 Register Reg = getRegForValue(I->getArgOperand(i));
850 if (!Reg)
851 return false;
852 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
853 }
854 }
855
856 // Push the arguments from the call instruction.
857 for (auto Reg : CLI.OutRegs)
858 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
859
860 // Push live variables for the stack map.
861 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
862 return false;
863
864 // Push the register mask info.
866 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
867
868 // Add scratch registers as implicit def and early clobber.
869 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
870 for (unsigned i = 0; ScratchRegs[i]; ++i)
872 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
873 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
874
875 // Add implicit defs (return values).
876 for (auto Reg : CLI.InRegs)
877 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
878 /*isImp=*/true));
879
880 // Insert the patchpoint instruction before the call generated by the target.
882 TII.get(TargetOpcode::PATCHPOINT));
883
884 for (auto &MO : Ops)
885 MIB.add(MO);
886
888
889 // Delete the original call instruction.
890 CLI.Call->eraseFromParent();
891
892 // Inform the Frame Information that we have a patchpoint in this function.
893 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
894
895 if (CLI.NumResultRegs)
897 return true;
898}
899
901 const auto &Triple = TM.getTargetTriple();
903 return true; // don't do anything to this instruction.
905 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
906 /*isDef=*/false));
907 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
908 /*isDef=*/false));
910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
911 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
912 for (auto &MO : Ops)
913 MIB.add(MO);
914
915 // Insert the Patchable Event Call instruction, that gets lowered properly.
916 return true;
917}
918
920 const auto &Triple = TM.getTargetTriple();
922 return true; // don't do anything to this instruction.
924 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
925 /*isDef=*/false));
926 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
927 /*isDef=*/false));
928 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
929 /*isDef=*/false));
931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
932 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
933 for (auto &MO : Ops)
934 MIB.add(MO);
935
936 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
937 return true;
938}
939
940/// Returns an AttributeList representing the attributes applied to the return
941/// value of the given call.
942static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
944 if (CLI.RetSExt)
945 Attrs.push_back(Attribute::SExt);
946 if (CLI.RetZExt)
947 Attrs.push_back(Attribute::ZExt);
948 if (CLI.IsInReg)
949 Attrs.push_back(Attribute::InReg);
950
951 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
952 Attrs);
953}
954
955bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
956 unsigned NumArgs) {
957 MCContext &Ctx = MF->getContext();
958 SmallString<32> MangledName;
959 Mangler::getNameWithPrefix(MangledName, SymName, DL);
960 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
961 return lowerCallTo(CI, Sym, NumArgs);
962}
963
965 unsigned NumArgs) {
966 FunctionType *FTy = CI->getFunctionType();
967 Type *RetTy = CI->getType();
968
969 ArgListTy Args;
970 Args.reserve(NumArgs);
971
972 // Populate the argument list.
973 // Attributes for args start at offset 1, after the return attribute.
974 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
975 Value *V = CI->getOperand(ArgI);
976
977 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
978
979 ArgListEntry Entry(V);
980 Entry.setAttributes(CI, ArgI);
981 Args.push_back(Entry);
982 }
983 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
984
986 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
987
988 return lowerCallTo(CLI);
989}
990
992 // Handle the incoming return values from the call.
993 CLI.clearIns();
994 SmallVector<EVT, 4> RetTys;
995 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
996
998 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
999
1000 bool CanLowerReturn = TLI.CanLowerReturn(
1001 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext(), CLI.RetTy);
1002
1003 // FIXME: sret demotion isn't supported yet - bail out.
1004 if (!CanLowerReturn)
1005 return false;
1006
1007 for (EVT VT : RetTys) {
1008 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1009 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1010 for (unsigned i = 0; i != NumRegs; ++i) {
1011 ISD::ArgFlagsTy Flags;
1012 if (CLI.RetSExt)
1013 Flags.setSExt();
1014 if (CLI.RetZExt)
1015 Flags.setZExt();
1016 if (CLI.IsInReg)
1017 Flags.setInReg();
1018 ISD::InputArg Ret(Flags, RegisterVT, VT, CLI.RetTy, CLI.IsReturnValueUsed,
1020 CLI.Ins.push_back(Ret);
1021 }
1022 }
1023
1024 // Handle all of the outgoing arguments.
1025 CLI.clearOuts();
1026 for (auto &Arg : CLI.getArgs()) {
1027 Type *FinalType = Arg.Ty;
1028 if (Arg.IsByVal)
1029 FinalType = Arg.IndirectType;
1030 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1031 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1032
1033 ISD::ArgFlagsTy Flags;
1034 if (Arg.IsZExt)
1035 Flags.setZExt();
1036 if (Arg.IsSExt)
1037 Flags.setSExt();
1038 if (Arg.IsInReg)
1039 Flags.setInReg();
1040 if (Arg.IsSRet)
1041 Flags.setSRet();
1042 if (Arg.IsSwiftSelf)
1043 Flags.setSwiftSelf();
1044 if (Arg.IsSwiftAsync)
1045 Flags.setSwiftAsync();
1046 if (Arg.IsSwiftError)
1047 Flags.setSwiftError();
1048 if (Arg.IsCFGuardTarget)
1049 Flags.setCFGuardTarget();
1050 if (Arg.IsByVal)
1051 Flags.setByVal();
1052 if (Arg.IsInAlloca) {
1053 Flags.setInAlloca();
1054 // Set the byval flag for CCAssignFn callbacks that don't know about
1055 // inalloca. This way we can know how many bytes we should've allocated
1056 // and how many bytes a callee cleanup function will pop. If we port
1057 // inalloca to more targets, we'll have to add custom inalloca handling in
1058 // the various CC lowering callbacks.
1059 Flags.setByVal();
1060 }
1061 if (Arg.IsPreallocated) {
1062 Flags.setPreallocated();
1063 // Set the byval flag for CCAssignFn callbacks that don't know about
1064 // preallocated. This way we can know how many bytes we should've
1065 // allocated and how many bytes a callee cleanup function will pop. If we
1066 // port preallocated to more targets, we'll have to add custom
1067 // preallocated handling in the various CC lowering callbacks.
1068 Flags.setByVal();
1069 }
1070 MaybeAlign MemAlign = Arg.Alignment;
1071 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1072 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1073
1074 // For ByVal, alignment should come from FE. BE will guess if this info
1075 // is not there, but there are cases it cannot get right.
1076 if (!MemAlign)
1077 MemAlign = TLI.getByValTypeAlignment(Arg.IndirectType, DL);
1078 Flags.setByValSize(FrameSize);
1079 } else if (!MemAlign) {
1080 MemAlign = DL.getABITypeAlign(Arg.Ty);
1081 }
1082 Flags.setMemAlign(*MemAlign);
1083 if (Arg.IsNest)
1084 Flags.setNest();
1085 if (NeedsRegBlock)
1086 Flags.setInConsecutiveRegs();
1087 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1088 CLI.OutVals.push_back(Arg.Val);
1089 CLI.OutFlags.push_back(Flags);
1090 }
1091
1092 if (!fastLowerCall(CLI))
1093 return false;
1094
1095 // Set all unused physreg defs as dead.
1096 assert(CLI.Call && "No call instruction specified.");
1098
1099 if (CLI.NumResultRegs && CLI.CB)
1101
1102 // Set labels for heapallocsite call.
1103 if (CLI.CB)
1104 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1105 CLI.Call->setHeapAllocMarker(*MF, MD);
1106
1107 return true;
1108}
1109
1111 FunctionType *FuncTy = CI->getFunctionType();
1112 Type *RetTy = CI->getType();
1113
1114 ArgListTy Args;
1115 Args.reserve(CI->arg_size());
1116
1117 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1118 Value *V = *i;
1119
1120 // Skip empty types
1121 if (V->getType()->isEmptyTy())
1122 continue;
1123
1124 ArgListEntry Entry(V);
1125 // Skip the first return-type Attribute to get to params.
1126 Entry.setAttributes(CI, i - CI->arg_begin());
1127 Args.push_back(Entry);
1128 }
1129
1130 // Check if target-independent constraints permit a tail call here.
1131 // Target-dependent constraints are checked within fastLowerCall.
1132 bool IsTailCall = CI->isTailCall();
1133 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1134 IsTailCall = false;
1135 if (IsTailCall && !CI->isMustTailCall() &&
1136 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1137 IsTailCall = false;
1138
1139 CallLoweringInfo CLI;
1140 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1141 .setTailCall(IsTailCall);
1142
1143 if (lowerCallTo(CLI)) {
1144 diagnoseDontCall(*CI);
1145 return true;
1146 }
1147
1148 return false;
1149}
1150
1152 const CallInst *Call = cast<CallInst>(I);
1153
1154 // Handle simple inline asms.
1155 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1156 // Don't attempt to handle constraints.
1157 if (!IA->getConstraintString().empty())
1158 return false;
1159
1160 unsigned ExtraInfo = 0;
1161 if (IA->hasSideEffects())
1163 if (IA->isAlignStack())
1164 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1165 if (Call->isConvergent())
1166 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1167 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1168
1169 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1170 TII.get(TargetOpcode::INLINEASM));
1171 MIB.addExternalSymbol(IA->getAsmString().data());
1172 MIB.addImm(ExtraInfo);
1173
1174 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1175 if (SrcLoc)
1176 MIB.addMetadata(SrcLoc);
1177
1178 return true;
1179 }
1180
1181 // Handle intrinsic function calls.
1182 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1183 return selectIntrinsicCall(II);
1184
1185 return lowerCall(Call);
1186}
1187
1189 if (!II->hasDbgRecords())
1190 return;
1191
1192 // Clear any metadata.
1193 MIMD = MIMetadata();
1194
1195 // Reverse order of debug records, because fast-isel walks through backwards.
1196 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1197 flushLocalValueMap();
1199
1200 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1201 assert(DLR->getLabel() && "Missing label");
1202 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(),
1203 TII.get(TargetOpcode::DBG_LABEL))
1204 .addMetadata(DLR->getLabel());
1205 continue;
1206 }
1207
1209
1210 Value *V = nullptr;
1211 if (!DVR.hasArgList())
1212 V = DVR.getVariableLocationOp(0);
1213
1214 bool Res = false;
1217 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(),
1218 DVR.getDebugLoc());
1219 } else {
1221 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1222 continue;
1223 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(),
1224 DVR.getDebugLoc());
1225 }
1226
1227 if (!Res)
1228 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n");
1229 }
1230}
1231
1233 DILocalVariable *Var, const DebugLoc &DL) {
1234 // This form of DBG_VALUE is target-independent.
1235 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1236 if (!V || isa<UndefValue>(V)) {
1237 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1238 // undef DBG_VALUE to terminate any prior location.
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1240 return true;
1241 }
1242 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1243 // See if there's an expression to constant-fold.
1244 if (Expr)
1245 std::tie(Expr, CI) = Expr->constantFold(CI);
1246 if (CI->getBitWidth() > 64)
1247 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1248 .addCImm(CI)
1249 .addImm(0U)
1250 .addMetadata(Var)
1251 .addMetadata(Expr);
1252 else
1253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1254 .addImm(CI->getZExtValue())
1255 .addImm(0U)
1256 .addMetadata(Var)
1257 .addMetadata(Expr);
1258 return true;
1259 }
1260 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1261 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1262 .addFPImm(CF)
1263 .addImm(0U)
1264 .addMetadata(Var)
1265 .addMetadata(Expr);
1266 return true;
1267 }
1268 if (const auto *Arg = dyn_cast<Argument>(V);
1269 Arg && Expr && Expr->isEntryValue()) {
1270 // As per the Verifier, this case is only valid for swift async Args.
1271 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1272
1273 Register Reg = getRegForValue(Arg);
1274 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1275 if (Reg == VirtReg || Reg == PhysReg) {
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1277 PhysReg, Var, Expr);
1278 return true;
1279 }
1280
1281 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1282 "couldn't find a physical register\n");
1283 return false;
1284 }
1285 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1286 SI != FuncInfo.StaticAllocaMap.end()) {
1287 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1288 bool IsIndirect = false;
1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1290 Var, Expr);
1291 return true;
1292 }
1293 if (Register Reg = lookUpRegForValue(V)) {
1294 // FIXME: This does not handle register-indirect values at offset 0.
1295 if (!FuncInfo.MF->useDebugInstrRef()) {
1296 bool IsIndirect = false;
1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1298 Expr);
1299 return true;
1300 }
1301 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1302 // to be later patched up by finalizeDebugInstrRefs.
1304 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1305 /* isKill */ false, /* isDead */ false,
1306 /* isUndef */ false, /* isEarlyClobber */ false,
1307 /* SubReg */ 0, /* isDebug */ true)});
1310 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1311 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1312 Var, NewExpr);
1313 return true;
1314 }
1315 return false;
1316}
1317
1319 DILocalVariable *Var, const DebugLoc &DL) {
1320 if (!Address || isa<UndefValue>(Address)) {
1321 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1322 return false;
1323 }
1324
1325 std::optional<MachineOperand> Op;
1327 Op = MachineOperand::CreateReg(Reg, false);
1328
1329 // If we have a VLA that has a "use" in a metadata node that's then used
1330 // here but it has no other uses, then we have a problem. E.g.,
1331 //
1332 // int foo (const int *x) {
1333 // char a[*x];
1334 // return 0;
1335 // }
1336 //
1337 // If we assign 'a' a vreg and fast isel later on has to use the selection
1338 // DAG isel, it will want to copy the value to the vreg. However, there are
1339 // no uses, which goes counter to what selection DAG isel expects.
1340 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1342 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1343 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1344 false);
1345
1346 if (Op) {
1348 "Expected inlined-at fields to agree");
1349 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1350 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1351 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1352 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1354 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1357 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1358 Var, NewExpr);
1359 return true;
1360 }
1361
1362 // A dbg.declare describes the address of a source variable, so lower it
1363 // into an indirect DBG_VALUE.
1364 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1365 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1366 Expr);
1367 return true;
1368 }
1369
1370 // We can't yet handle anything else here because it would require
1371 // generating code, thus altering codegen because of debug info.
1372 LLVM_DEBUG(
1373 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1374 return false;
1375}
1376
1378 switch (II->getIntrinsicID()) {
1379 default:
1380 break;
1381 // At -O0 we don't care about the lifetime intrinsics.
1382 case Intrinsic::lifetime_start:
1383 case Intrinsic::lifetime_end:
1384 // The donothing intrinsic does, well, nothing.
1385 case Intrinsic::donothing:
1386 // Neither does the sideeffect intrinsic.
1387 case Intrinsic::sideeffect:
1388 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1389 case Intrinsic::assume:
1390 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1391 case Intrinsic::experimental_noalias_scope_decl:
1392 return true;
1393 case Intrinsic::objectsize:
1394 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1395
1396 case Intrinsic::is_constant:
1397 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1398
1399 case Intrinsic::allow_runtime_check:
1400 case Intrinsic::allow_ubsan_check: {
1401 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType()));
1402 if (!ResultReg)
1403 return false;
1404 updateValueMap(II, ResultReg);
1405 return true;
1406 }
1407
1408 case Intrinsic::launder_invariant_group:
1409 case Intrinsic::strip_invariant_group:
1410 case Intrinsic::expect:
1411 case Intrinsic::expect_with_probability: {
1412 Register ResultReg = getRegForValue(II->getArgOperand(0));
1413 if (!ResultReg)
1414 return false;
1415 updateValueMap(II, ResultReg);
1416 return true;
1417 }
1418 case Intrinsic::fake_use:
1419 // At -O0, we don't need fake use, so just ignore it.
1420 return true;
1421 case Intrinsic::experimental_stackmap:
1422 return selectStackmap(II);
1423 case Intrinsic::experimental_patchpoint_void:
1424 case Intrinsic::experimental_patchpoint:
1425 return selectPatchpoint(II);
1426
1427 case Intrinsic::xray_customevent:
1428 return selectXRayCustomEvent(II);
1429 case Intrinsic::xray_typedevent:
1430 return selectXRayTypedEvent(II);
1431 }
1432
1433 return fastLowerIntrinsicCall(II);
1434}
1435
1436bool FastISel::selectCast(const User *I, unsigned Opcode) {
1437 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1438 EVT DstVT = TLI.getValueType(DL, I->getType());
1439
1440 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1441 !DstVT.isSimple())
1442 // Unhandled type. Halt "fast" selection and bail.
1443 return false;
1444
1445 // Check if the destination type is legal.
1446 if (!TLI.isTypeLegal(DstVT))
1447 return false;
1448
1449 // Check if the source operand is legal.
1450 if (!TLI.isTypeLegal(SrcVT))
1451 return false;
1452
1453 Register InputReg = getRegForValue(I->getOperand(0));
1454 if (!InputReg)
1455 // Unhandled operand. Halt "fast" selection and bail.
1456 return false;
1457
1458 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1459 Opcode, InputReg);
1460 if (!ResultReg)
1461 return false;
1462
1463 updateValueMap(I, ResultReg);
1464 return true;
1465}
1466
1468 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1469 EVT DstEVT = TLI.getValueType(DL, I->getType());
1470 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1471 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1472 // Unhandled type. Halt "fast" selection and bail.
1473 return false;
1474
1475 MVT SrcVT = SrcEVT.getSimpleVT();
1476 MVT DstVT = DstEVT.getSimpleVT();
1477 Register Op0 = getRegForValue(I->getOperand(0));
1478 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1479 return false;
1480
1481 // If the bitcast doesn't change the type, just use the operand value.
1482 if (SrcVT == DstVT) {
1483 updateValueMap(I, Op0);
1484 return true;
1485 }
1486
1487 // Otherwise, select a BITCAST opcode.
1488 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1489 if (!ResultReg)
1490 return false;
1491
1492 updateValueMap(I, ResultReg);
1493 return true;
1494}
1495
1497 Register Reg = getRegForValue(I->getOperand(0));
1498 if (!Reg)
1499 // Unhandled operand.
1500 return false;
1501
1502 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1503 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1504 // Unhandled type, bail out.
1505 return false;
1506
1507 MVT Ty = ETy.getSimpleVT();
1508 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1509 Register ResultReg = createResultReg(TyRegClass);
1510 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1511 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1512
1513 updateValueMap(I, ResultReg);
1514 return true;
1515}
1516
1517// Remove local value instructions starting from the instruction after
1518// SavedLastLocalValue to the current function insert point.
1519void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1520{
1521 MachineInstr *CurLastLocalValue = getLastLocalValue();
1522 if (CurLastLocalValue != SavedLastLocalValue) {
1523 // Find the first local value instruction to be deleted.
1524 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1525 // Otherwise it's the first instruction in the block.
1526 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1527 if (SavedLastLocalValue)
1528 ++FirstDeadInst;
1529 else
1530 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1531 setLastLocalValue(SavedLastLocalValue);
1532 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1533 }
1534}
1535
1537 // Flush the local value map before starting each instruction.
1538 // This improves locality and debugging, and can reduce spills.
1539 // Reuse of values across IR instructions is relatively uncommon.
1540 flushLocalValueMap();
1541
1542 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1543 // Just before the terminator instruction, insert instructions to
1544 // feed PHI nodes in successor blocks.
1545 if (I->isTerminator()) {
1546 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1547 // PHI node handling may have generated local value instructions,
1548 // even though it failed to handle all PHI nodes.
1549 // We remove these instructions because SelectionDAGISel will generate
1550 // them again.
1551 removeDeadLocalValueCode(SavedLastLocalValue);
1552 return false;
1553 }
1554 }
1555
1556 // FastISel does not handle any operand bundles except OB_funclet.
1557 if (auto *Call = dyn_cast<CallBase>(I))
1558 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1559 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1560 return false;
1561
1562 MIMD = MIMetadata(*I);
1563
1564 SavedInsertPt = FuncInfo.InsertPt;
1565
1566 if (const auto *Call = dyn_cast<CallInst>(I)) {
1567 const Function *F = Call->getCalledFunction();
1568
1569 // Don't handle Intrinsic::trap if a trap function is specified.
1570 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1571 Call->hasFnAttr("trap-func-name"))
1572 return false;
1573 }
1574
1575 // First, try doing target-independent selection.
1577 if (selectOperator(I, I->getOpcode())) {
1578 ++NumFastIselSuccessIndependent;
1579 MIMD = {};
1580 return true;
1581 }
1582 // Remove dead code.
1584 if (SavedInsertPt != FuncInfo.InsertPt)
1585 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1586 SavedInsertPt = FuncInfo.InsertPt;
1587 }
1588 // Next, try calling the target to attempt to handle the instruction.
1589 if (fastSelectInstruction(I)) {
1590 ++NumFastIselSuccessTarget;
1591 MIMD = {};
1592 return true;
1593 }
1594 // Remove dead code.
1596 if (SavedInsertPt != FuncInfo.InsertPt)
1597 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1598
1599 MIMD = {};
1600 // Undo phi node updates, because they will be added again by SelectionDAG.
1601 if (I->isTerminator()) {
1602 // PHI node handling may have generated local value instructions.
1603 // We remove them because SelectionDAGISel will generate them again.
1604 removeDeadLocalValueCode(SavedLastLocalValue);
1605 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1606 }
1607 return false;
1608}
1609
1610/// Emit an unconditional branch to the given block, unless it is the immediate
1611/// (fall-through) successor, and update the CFG.
1613 const DebugLoc &DbgLoc) {
1614 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock();
1615 bool BlockHasMultipleInstrs = &BB->front() != &BB->back();
1616 if (BlockHasMultipleInstrs && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1617 // For more accurate line information if this is the only non-debug
1618 // instruction in the block then emit it, otherwise we have the
1619 // unconditional fall-through case, which needs no instructions.
1620 } else {
1621 // The unconditional branch case.
1622 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1624 }
1625 if (FuncInfo.BPI) {
1626 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1627 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1628 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1629 } else
1630 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1631}
1632
1634 MachineBasicBlock *TrueMBB,
1635 MachineBasicBlock *FalseMBB) {
1636 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1637 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1638 // successor/predecessor lists.
1639 if (TrueMBB != FalseMBB) {
1640 if (FuncInfo.BPI) {
1641 auto BranchProbability =
1642 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1643 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1644 } else
1645 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1646 }
1647
1648 fastEmitBranch(FalseMBB, MIMD.getDL());
1649}
1650
1651/// Emit an FNeg operation.
1652bool FastISel::selectFNeg(const User *I, const Value *In) {
1653 Register OpReg = getRegForValue(In);
1654 if (!OpReg)
1655 return false;
1656
1657 // If the target has ISD::FNEG, use it.
1658 EVT VT = TLI.getValueType(DL, I->getType());
1659 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1660 OpReg);
1661 if (ResultReg) {
1662 updateValueMap(I, ResultReg);
1663 return true;
1664 }
1665
1666 // Bitcast the value to integer, twiddle the sign bit with xor,
1667 // and then bitcast it back to floating-point.
1668 if (VT.getSizeInBits() > 64)
1669 return false;
1670 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1671 if (!TLI.isTypeLegal(IntVT))
1672 return false;
1673
1674 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1675 ISD::BITCAST, OpReg);
1676 if (!IntReg)
1677 return false;
1678
1679 Register IntResultReg = fastEmit_ri_(
1680 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1681 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1682 if (!IntResultReg)
1683 return false;
1684
1685 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1686 IntResultReg);
1687 if (!ResultReg)
1688 return false;
1689
1690 updateValueMap(I, ResultReg);
1691 return true;
1692}
1693
1696 if (!EVI)
1697 return false;
1698
1699 // Make sure we only try to handle extracts with a legal result. But also
1700 // allow i1 because it's easy.
1701 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1702 if (!RealVT.isSimple())
1703 return false;
1704 MVT VT = RealVT.getSimpleVT();
1705 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1706 return false;
1707
1708 const Value *Op0 = EVI->getOperand(0);
1709 Type *AggTy = Op0->getType();
1710
1711 // Get the base result register.
1712 Register ResultReg;
1714 if (I != FuncInfo.ValueMap.end())
1715 ResultReg = I->second;
1716 else if (isa<Instruction>(Op0))
1717 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1718 else
1719 return false; // fast-isel can't handle aggregate constants at the moment
1720
1721 // Get the actual result register, which is an offset from the base register.
1722 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1723
1724 SmallVector<EVT, 4> AggValueVTs;
1725 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1726
1727 for (unsigned i = 0; i < VTIndex; i++)
1728 ResultReg = ResultReg.id() +
1729 TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1730
1731 updateValueMap(EVI, ResultReg);
1732 return true;
1733}
1734
1735bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1736 switch (Opcode) {
1737 case Instruction::Add:
1738 return selectBinaryOp(I, ISD::ADD);
1739 case Instruction::FAdd:
1740 return selectBinaryOp(I, ISD::FADD);
1741 case Instruction::Sub:
1742 return selectBinaryOp(I, ISD::SUB);
1743 case Instruction::FSub:
1744 return selectBinaryOp(I, ISD::FSUB);
1745 case Instruction::Mul:
1746 return selectBinaryOp(I, ISD::MUL);
1747 case Instruction::FMul:
1748 return selectBinaryOp(I, ISD::FMUL);
1749 case Instruction::SDiv:
1750 return selectBinaryOp(I, ISD::SDIV);
1751 case Instruction::UDiv:
1752 return selectBinaryOp(I, ISD::UDIV);
1753 case Instruction::FDiv:
1754 return selectBinaryOp(I, ISD::FDIV);
1755 case Instruction::SRem:
1756 return selectBinaryOp(I, ISD::SREM);
1757 case Instruction::URem:
1758 return selectBinaryOp(I, ISD::UREM);
1759 case Instruction::FRem:
1760 return selectBinaryOp(I, ISD::FREM);
1761 case Instruction::Shl:
1762 return selectBinaryOp(I, ISD::SHL);
1763 case Instruction::LShr:
1764 return selectBinaryOp(I, ISD::SRL);
1765 case Instruction::AShr:
1766 return selectBinaryOp(I, ISD::SRA);
1767 case Instruction::And:
1768 return selectBinaryOp(I, ISD::AND);
1769 case Instruction::Or:
1770 return selectBinaryOp(I, ISD::OR);
1771 case Instruction::Xor:
1772 return selectBinaryOp(I, ISD::XOR);
1773
1774 case Instruction::FNeg:
1775 return selectFNeg(I, I->getOperand(0));
1776
1777 case Instruction::GetElementPtr:
1778 return selectGetElementPtr(I);
1779
1780 case Instruction::Br: {
1781 const BranchInst *BI = cast<BranchInst>(I);
1782
1783 if (BI->isUnconditional()) {
1784 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1785 MachineBasicBlock *MSucc = FuncInfo.getMBB(LLVMSucc);
1786 fastEmitBranch(MSucc, BI->getDebugLoc());
1787 return true;
1788 }
1789
1790 // Conditional branches are not handed yet.
1791 // Halt "fast" selection and bail.
1792 return false;
1793 }
1794
1795 case Instruction::Unreachable: {
1796 auto UI = cast<UnreachableInst>(I);
1797 if (!UI->shouldLowerToTrap(TM.Options.TrapUnreachable,
1798 TM.Options.NoTrapAfterNoreturn))
1799 return true;
1800
1801 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1802 }
1803
1804 case Instruction::Alloca:
1805 // FunctionLowering has the static-sized case covered.
1806 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1807 return true;
1808
1809 // Dynamic-sized alloca is not handled yet.
1810 return false;
1811
1812 case Instruction::Call:
1813 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1814 // callee of the direct function call instruction will be mapped to the
1815 // symbol for the function's entry point, which is distinct from the
1816 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1817 // name is the C-linkage name of the source level function.
1818 // But fast isel still has the ability to do selection for intrinsics.
1819 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1820 return false;
1821 return selectCall(I);
1822
1823 case Instruction::BitCast:
1824 return selectBitCast(I);
1825
1826 case Instruction::FPToSI:
1827 return selectCast(I, ISD::FP_TO_SINT);
1828 case Instruction::ZExt:
1829 return selectCast(I, ISD::ZERO_EXTEND);
1830 case Instruction::SExt:
1831 return selectCast(I, ISD::SIGN_EXTEND);
1832 case Instruction::Trunc:
1833 return selectCast(I, ISD::TRUNCATE);
1834 case Instruction::SIToFP:
1835 return selectCast(I, ISD::SINT_TO_FP);
1836
1837 case Instruction::IntToPtr: // Deliberate fall-through.
1838 case Instruction::PtrToInt:
1839 case Instruction::PtrToAddr: {
1840 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1841 EVT DstVT = TLI.getValueType(DL, I->getType());
1842 if (DstVT.bitsGT(SrcVT))
1843 return selectCast(I, ISD::ZERO_EXTEND);
1844 if (DstVT.bitsLT(SrcVT))
1845 return selectCast(I, ISD::TRUNCATE);
1846 Register Reg = getRegForValue(I->getOperand(0));
1847 if (!Reg)
1848 return false;
1849 updateValueMap(I, Reg);
1850 return true;
1851 }
1852
1853 case Instruction::ExtractValue:
1854 return selectExtractValue(I);
1855
1856 case Instruction::Freeze:
1857 return selectFreeze(I);
1858
1859 case Instruction::PHI:
1860 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1861
1862 default:
1863 // Unhandled instruction. Halt "fast" selection and bail.
1864 return false;
1865 }
1866}
1867
1871 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1872 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1873 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1874 TII(*MF->getSubtarget().getInstrInfo()),
1875 TLI(*MF->getSubtarget().getTargetLowering()),
1876 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1878
1879FastISel::~FastISel() = default;
1880
1881bool FastISel::fastLowerArguments() { return false; }
1882
1883bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1884
1886 return false;
1887}
1888
1890
1892 return Register();
1893}
1894
1896 Register /*Op1*/) {
1897 return Register();
1898}
1899
1901 return Register();
1902}
1903
1905 const ConstantFP * /*FPImm*/) {
1906 return Register();
1907}
1908
1910 uint64_t /*Imm*/) {
1911 return Register();
1912}
1913
1914/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1915/// instruction with an immediate operand using fastEmit_ri.
1916/// If that fails, it materializes the immediate into a register and try
1917/// fastEmit_rr instead.
1919 uint64_t Imm, MVT ImmType) {
1920 // If this is a multiply by a power of two, emit this as a shift left.
1921 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1922 Opcode = ISD::SHL;
1923 Imm = Log2_64(Imm);
1924 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1925 // div x, 8 -> srl x, 3
1926 Opcode = ISD::SRL;
1927 Imm = Log2_64(Imm);
1928 }
1929
1930 // Horrible hack (to be removed), check to make sure shift amounts are
1931 // in-range.
1932 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1933 Imm >= VT.getSizeInBits())
1934 return Register();
1935
1936 // First check if immediate type is legal. If not, we can't use the ri form.
1937 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1938 if (ResultReg)
1939 return ResultReg;
1940 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1941 if (!MaterialReg) {
1942 // This is a bit ugly/slow, but failing here means falling out of
1943 // fast-isel, which would be very slow.
1944 IntegerType *ITy =
1945 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1946 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1947 if (!MaterialReg)
1948 return Register();
1949 }
1950 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1951}
1952
1954 return MRI.createVirtualRegister(RC);
1955}
1956
1958 unsigned OpNum) {
1959 if (Op.isVirtual()) {
1960 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpNum);
1961 if (!MRI.constrainRegClass(Op, RegClass)) {
1962 // If it's not legal to COPY between the register classes, something
1963 // has gone very wrong before we got here.
1964 Register NewOp = createResultReg(RegClass);
1965 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1966 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1967 return NewOp;
1968 }
1969 }
1970 return Op;
1971}
1972
1973Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1974 const TargetRegisterClass *RC) {
1975 Register ResultReg = createResultReg(RC);
1976 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1977
1978 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
1979 return ResultReg;
1980}
1981
1982Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1983 const TargetRegisterClass *RC, Register Op0) {
1984 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1985
1986 Register ResultReg = createResultReg(RC);
1987 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1988
1989 if (II.getNumDefs() >= 1)
1990 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1991 .addReg(Op0);
1992 else {
1993 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1994 .addReg(Op0);
1995 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1996 ResultReg)
1997 .addReg(II.implicit_defs()[0]);
1998 }
1999
2000 return ResultReg;
2001}
2002
2003Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2004 const TargetRegisterClass *RC, Register Op0,
2005 Register Op1) {
2006 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2007
2008 Register ResultReg = createResultReg(RC);
2009 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2010 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2011
2012 if (II.getNumDefs() >= 1)
2013 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2014 .addReg(Op0)
2015 .addReg(Op1);
2016 else {
2017 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2018 .addReg(Op0)
2019 .addReg(Op1);
2020 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2021 ResultReg)
2022 .addReg(II.implicit_defs()[0]);
2023 }
2024 return ResultReg;
2025}
2026
2027Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2028 const TargetRegisterClass *RC, Register Op0,
2029 Register Op1, Register Op2) {
2030 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2031
2032 Register ResultReg = createResultReg(RC);
2033 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2034 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2035 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2036
2037 if (II.getNumDefs() >= 1)
2038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2039 .addReg(Op0)
2040 .addReg(Op1)
2041 .addReg(Op2);
2042 else {
2043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2044 .addReg(Op0)
2045 .addReg(Op1)
2046 .addReg(Op2);
2047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2048 ResultReg)
2049 .addReg(II.implicit_defs()[0]);
2050 }
2051 return ResultReg;
2052}
2053
2054Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2055 const TargetRegisterClass *RC, Register Op0,
2056 uint64_t Imm) {
2057 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2058
2059 Register ResultReg = createResultReg(RC);
2060 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2061
2062 if (II.getNumDefs() >= 1)
2063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2064 .addReg(Op0)
2065 .addImm(Imm);
2066 else {
2067 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2068 .addReg(Op0)
2069 .addImm(Imm);
2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2071 ResultReg)
2072 .addReg(II.implicit_defs()[0]);
2073 }
2074 return ResultReg;
2075}
2076
2077Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2078 const TargetRegisterClass *RC, Register Op0,
2079 uint64_t Imm1, uint64_t Imm2) {
2080 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2081
2082 Register ResultReg = createResultReg(RC);
2083 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2084
2085 if (II.getNumDefs() >= 1)
2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2087 .addReg(Op0)
2088 .addImm(Imm1)
2089 .addImm(Imm2);
2090 else {
2091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2092 .addReg(Op0)
2093 .addImm(Imm1)
2094 .addImm(Imm2);
2095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2096 ResultReg)
2097 .addReg(II.implicit_defs()[0]);
2098 }
2099 return ResultReg;
2100}
2101
2102Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2103 const TargetRegisterClass *RC,
2104 const ConstantFP *FPImm) {
2105 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2106
2107 Register ResultReg = createResultReg(RC);
2108
2109 if (II.getNumDefs() >= 1)
2110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2111 .addFPImm(FPImm);
2112 else {
2113 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2114 .addFPImm(FPImm);
2115 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2116 ResultReg)
2117 .addReg(II.implicit_defs()[0]);
2118 }
2119 return ResultReg;
2120}
2121
2122Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2123 const TargetRegisterClass *RC, Register Op0,
2124 Register Op1, uint64_t Imm) {
2125 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2126
2127 Register ResultReg = createResultReg(RC);
2128 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2129 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2130
2131 if (II.getNumDefs() >= 1)
2132 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2133 .addReg(Op0)
2134 .addReg(Op1)
2135 .addImm(Imm);
2136 else {
2137 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2138 .addReg(Op0)
2139 .addReg(Op1)
2140 .addImm(Imm);
2141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2142 ResultReg)
2143 .addReg(II.implicit_defs()[0]);
2144 }
2145 return ResultReg;
2146}
2147
2148Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2149 const TargetRegisterClass *RC, uint64_t Imm) {
2150 Register ResultReg = createResultReg(RC);
2151 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2152
2153 if (II.getNumDefs() >= 1)
2154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2155 .addImm(Imm);
2156 else {
2157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm);
2158 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2159 ResultReg)
2160 .addReg(II.implicit_defs()[0]);
2161 }
2162 return ResultReg;
2163}
2164
2166 uint32_t Idx) {
2167 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2168 assert(Op0.isVirtual() && "Cannot yet extract from physregs");
2169 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2170 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2172 ResultReg).addReg(Op0, 0, Idx);
2173 return ResultReg;
2174}
2175
2176/// Emit MachineInstrs to compute the value of Op with all but the least
2177/// significant bit set to zero.
2179 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2180}
2181
2182/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2183/// Emit code to ensure constants are copied into registers when needed.
2184/// Remember the virtual registers that need to be added to the Machine PHI
2185/// nodes as input. We cannot just directly add them, because expansion
2186/// might result in multiple MBB's for one BB. As such, the start of the
2187/// BB might correspond to a different MBB than the end.
2188bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2191
2192 // Check successor nodes' PHI nodes that expect a constant to be available
2193 // from this block.
2194 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2195 if (!isa<PHINode>(SuccBB->begin()))
2196 continue;
2197 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
2198
2199 // If this terminator has multiple identical successors (common for
2200 // switches), only handle each succ once.
2201 if (!SuccsHandled.insert(SuccMBB).second)
2202 continue;
2203
2205
2206 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2207 // nodes and Machine PHI nodes, but the incoming operands have not been
2208 // emitted yet.
2209 for (const PHINode &PN : SuccBB->phis()) {
2210 // Ignore dead phi's.
2211 if (PN.use_empty())
2212 continue;
2213
2214 // Only handle legal types. Two interesting things to note here. First,
2215 // by bailing out early, we may leave behind some dead instructions,
2216 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2217 // own moves. Second, this check is necessary because FastISel doesn't
2218 // use CreateRegs to create registers, so it always creates
2219 // exactly one register for each non-void instruction.
2220 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2221 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2222 // Handle integer promotions, though, because they're common and easy.
2223 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2225 return false;
2226 }
2227 }
2228
2229 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2230
2231 // Set the DebugLoc for the copy. Use the location of the operand if
2232 // there is one; otherwise no location, flushLocalValueMap will fix it.
2233 MIMD = {};
2234 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2235 MIMD = MIMetadata(*Inst);
2236
2237 Register Reg = getRegForValue(PHIOp);
2238 if (!Reg) {
2239 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2240 return false;
2241 }
2242 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg);
2243 MIMD = {};
2244 }
2245 }
2246
2247 return true;
2248}
2249
2250bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2251 assert(LI->hasOneUse() &&
2252 "tryToFoldLoad expected a LoadInst with a single use");
2253 // We know that the load has a single use, but don't know what it is. If it
2254 // isn't one of the folded instructions, then we can't succeed here. Handle
2255 // this by scanning the single-use users of the load until we get to FoldInst.
2256 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2257
2258 const Instruction *TheUser = LI->user_back();
2259 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2260 // Stay in the right block.
2261 TheUser->getParent() == FoldInst->getParent() &&
2262 --MaxUsers) { // Don't scan too far.
2263 // If there are multiple or no uses of this instruction, then bail out.
2264 if (!TheUser->hasOneUse())
2265 return false;
2266
2267 TheUser = TheUser->user_back();
2268 }
2269
2270 // If we didn't find the fold instruction, then we failed to collapse the
2271 // sequence.
2272 if (TheUser != FoldInst)
2273 return false;
2274
2275 // Don't try to fold volatile loads. Target has to deal with alignment
2276 // constraints.
2277 if (LI->isVolatile())
2278 return false;
2279
2280 // Figure out which vreg this is going into. If there is no assigned vreg yet
2281 // then there actually was no reference to it. Perhaps the load is referenced
2282 // by a dead instruction.
2283 Register LoadReg = getRegForValue(LI);
2284 if (!LoadReg)
2285 return false;
2286
2287 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2288 // may mean that the instruction got lowered to multiple MIs, or the use of
2289 // the loaded value ended up being multiple operands of the result.
2290 if (!MRI.hasOneUse(LoadReg))
2291 return false;
2292
2293 // If the register has fixups, there may be additional uses through a
2294 // different alias of the register.
2295 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2296 return false;
2297
2298 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2299 MachineInstr *User = RI->getParent();
2300
2301 // Set the insertion point properly. Folding the load can cause generation of
2302 // other random instructions (like sign extends) for addressing modes; make
2303 // sure they get inserted in a logical place before the new instruction.
2304 FuncInfo.InsertPt = User;
2305 FuncInfo.MBB = User->getParent();
2306
2307 // Ask the target to try folding the load.
2308 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2309}
2310
2312 // Must be an add.
2313 if (!isa<AddOperator>(Add))
2314 return false;
2315 // Type size needs to match.
2316 if (DL.getTypeSizeInBits(GEP->getType()) !=
2317 DL.getTypeSizeInBits(Add->getType()))
2318 return false;
2319 // Must be in the same basic block.
2320 if (isa<Instruction>(Add) &&
2321 FuncInfo.getMBB(cast<Instruction>(Add)->getParent()) != FuncInfo.MBB)
2322 return false;
2323 // Must have a constant operand.
2324 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2325}
2326
2329 const Value *Ptr;
2330 Type *ValTy;
2331 MaybeAlign Alignment;
2333 bool IsVolatile;
2334
2335 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2336 Alignment = LI->getAlign();
2337 IsVolatile = LI->isVolatile();
2339 Ptr = LI->getPointerOperand();
2340 ValTy = LI->getType();
2341 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2342 Alignment = SI->getAlign();
2343 IsVolatile = SI->isVolatile();
2345 Ptr = SI->getPointerOperand();
2346 ValTy = SI->getValueOperand()->getType();
2347 } else
2348 return nullptr;
2349
2350 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2351 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2352 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2353 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2354
2355 AAMDNodes AAInfo = I->getAAMetadata();
2356
2357 if (!Alignment) // Ensure that codegen never sees alignment 0.
2358 Alignment = DL.getABITypeAlign(ValTy);
2359
2360 unsigned Size = DL.getTypeStoreSize(ValTy);
2361
2362 if (IsVolatile)
2364 if (IsNonTemporal)
2366 if (IsDereferenceable)
2368 if (IsInvariant)
2370
2371 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2372 *Alignment, AAInfo, Ranges);
2373}
2374
2376 // If both operands are the same, then try to optimize or fold the cmp.
2377 CmpInst::Predicate Predicate = CI->getPredicate();
2378 if (CI->getOperand(0) != CI->getOperand(1))
2379 return Predicate;
2380
2381 switch (Predicate) {
2382 default: llvm_unreachable("Invalid predicate!");
2383 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2384 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2385 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2386 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2387 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2388 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2389 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2390 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2391 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2392 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2393 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2394 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2395 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2396 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2397 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2398 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2399
2400 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2401 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2402 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2403 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2404 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2405 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2406 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2407 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2408 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2409 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2410 }
2411
2412 return Predicate;
2413}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition FastISel.cpp:161
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition FastISel.cpp:178
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition FastISel.cpp:942
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
#define P(N)
static bool isCommutative(Instruction *I, Value *ValWithUses, bool IsCopyable=false)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition APFloat.h:1314
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction & back() const
Definition BasicBlock.h:484
const Instruction & front() const
Definition BasicBlock.h:482
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
LLVM_ABI std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
A debug info location.
Definition DebugLoc.h:123
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition FastISel.h:205
const TargetLibraryInfo * LibInfo
Definition FastISel.h:214
const DataLayout & DL
Definition FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition FastISel.cpp:531
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition FastISel.h:237
bool selectStackmap(const CallInst *I)
Definition FastISel.cpp:643
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
bool selectExtractValue(const User *U)
DenseMap< const Value *, Register > LocalValueMap
Definition FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition FastISel.h:226
bool selectXRayCustomEvent(const CallInst *II)
Definition FastISel.cpp:900
virtual Register fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, Register Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
virtual Register fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, Register Op1)
This method is called by target-independent code to request that an instruction with the given type,...
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
virtual bool lowerDbgDeclare(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition FastISel.h:233
bool lowerCall(const CallInst *I)
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition FastISel.cpp:436
virtual Register fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition FastISel.h:473
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, Register Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition FastISel.cpp:964
virtual Register fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
virtual Register fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
void handleDbgInfo(const Instruction *II)
Target-independent lowering of non-instruction debug info associated with this instruction.
bool selectFreeze(const User *I)
bool selectIntrinsicCall(const IntrinsicInst *II)
Register getRegForGEPIndex(MVT PtrVT, const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition FastISel.cpp:384
bool selectCast(const User *I, unsigned Opcode)
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition FastISel.cpp:239
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition FastISel.cpp:410
virtual Register fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition FastISel.h:484
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition FastISel.cpp:123
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
MachineFrameInfo & MFI
Definition FastISel.h:206
MachineFunction * MF
Definition FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
virtual bool lowerDbgValue(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
TargetLoweringBase::ArgListTy ArgListTy
Definition FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
bool selectXRayTypedEvent(const CallInst *II)
Definition FastISel.cpp:919
virtual Register fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition FastISel.h:478
Register fastEmitZExtFromI1(MVT VT, Register Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Register createResultReg(const TargetRegisterClass *RC)
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
const TargetInstrInfo & TII
Definition FastISel.h:211
bool selectCall(const User *I)
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition FastISel.cpp:352
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
virtual Register fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
void finishBasicBlock()
Flush the local value map.
Definition FastISel.cpp:136
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
FunctionLoweringInfo & FuncInfo
Definition FastISel.h:203
MachineConstantPool & MCP
Definition FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
bool SkipTargetIndependentISel
Definition FastISel.h:215
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
MachineBasicBlock::iterator SavePoint
Definition FastISel.h:313
Register fastEmitInst_extractsubreg(MVT RetVT, Register Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition FastISel.cpp:363
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition FastISel.cpp:444
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
bool selectPatchpoint(const CallInst *I)
Definition FastISel.cpp:753
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition FastISel.cpp:401
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition FastISel.h:212
virtual Register fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
const TargetMachine & TM
Definition FastISel.h:209
MIMetadata MIMD
Definition FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition FastISel.h:221
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition FastISel.cpp:138
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition FastISel.cpp:430
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
bool selectBitCast(const User *I)
virtual ~FastISel()
Register fastEmit_ri_(MVT VT, unsigned Opcode, Register Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
const TargetRegisterInfo & TRI
Definition FastISel.h:213
TargetLoweringBase::ArgListEntry ArgListEntry
Definition FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
MachineBasicBlock * getMBB(const BasicBlock *BB) const
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, Register > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
Class to represent function types.
const Argument * const_arg_iterator
Definition Function.h:73
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1078
Set of metadata that should be preserved when using BuildMI().
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
MachineInstrBundleIterator< MachineInstr > iterator
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Representation of each machine instruction.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
defusechain_iterator< true, true, false, true, false > reg_iterator
reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified register.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition Mangler.cpp:121
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition Triple.h:1022
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:868
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:762
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:914
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:736
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
std::reverse_iterator< iterator > rend() const
Definition BasicBlock.h:96
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:543
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition Analysis.cpp:33
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106