LLVM 23.0.0git
FastISel.cpp
Go to the documentation of this file.
1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time. For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support. In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated. Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time. Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators. More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/Statistic.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/Function.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Module.h"
90#include "llvm/IR/Operator.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/User.h"
94#include "llvm/IR/Value.h"
95#include "llvm/MC/MCContext.h"
96#include "llvm/MC/MCInstrDesc.h"
98#include "llvm/Support/Debug.h"
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <optional>
108#include <utility>
109
110using namespace llvm;
111using namespace PatternMatch;
112
113#define DEBUG_TYPE "isel"
114
115STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121/// Set the current block to which generated machine instructions will be
122/// appended.
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
134}
135
136void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
139 if (!FuncInfo.CanLowerReturn)
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156}
157
158/// Return the defined register if this instruction defines exactly one
159/// virtual register and uses no other virtual registers. Otherwise return
160/// Register();
162 Register RegDef;
163 for (const MachineOperand &MO : MI.operands()) {
164 if (!MO.isReg())
165 continue;
166 if (MO.isDef()) {
167 if (RegDef)
168 return Register();
169 RegDef = MO.getReg();
170 } else if (MO.getReg().isVirtual()) {
171 // This is another use of a vreg. Don't delete it.
172 return Register();
173 }
174 }
175 return RegDef;
176}
177
178static bool isRegUsedByPhiNodes(Register DefReg,
179 FunctionLoweringInfo &FuncInfo) {
180 for (auto &P : FuncInfo.PHINodesToUpdate)
181 if (P.second == DefReg)
182 return true;
183 return false;
184}
185
186void FastISel::flushLocalValueMap() {
187 // If FastISel bails out, it could leave local value instructions behind
188 // that aren't used for anything. Detect and erase those.
190 // Save the first instruction after local values, for later.
192 ++FirstNonValue;
193
196 : FuncInfo.MBB->rend();
198 for (MachineInstr &LocalMI :
200 Register DefReg = findLocalRegDef(LocalMI);
201 if (!DefReg)
202 continue;
203 if (FuncInfo.RegsWithFixups.count(DefReg))
204 continue;
205 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
206 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
207 if (EmitStartPt == &LocalMI)
208 EmitStartPt = EmitStartPt->getPrevNode();
209 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
210 << LocalMI);
211 LocalMI.eraseFromParent();
212 }
213 }
214
215 if (FirstNonValue != FuncInfo.MBB->end()) {
216 // See if there are any local value instructions left. If so, we want to
217 // make sure the first one has a debug location; if it doesn't, use the
218 // first non-value instruction's debug location.
219
220 // If EmitStartPt is non-null, this block had copies at the top before
221 // FastISel started doing anything; it points to the last one, so the
222 // first local value instruction is the one after EmitStartPt.
223 // If EmitStartPt is null, the first local value instruction is at the
224 // top of the block.
225 MachineBasicBlock::iterator FirstLocalValue =
227 : FuncInfo.MBB->begin();
228 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
229 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
230 }
231 }
232
233 LocalValueMap.clear();
236 SavedInsertPt = FuncInfo.InsertPt;
237}
238
240 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
241 // Don't handle non-simple values in FastISel.
242 if (!RealVT.isSimple())
243 return Register();
244
245 // Ignore illegal types. We must do this before looking up the value
246 // in ValueMap because Arguments are given virtual registers regardless
247 // of whether FastISel can handle them.
248 MVT VT = RealVT.getSimpleVT();
249 if (!TLI.isTypeLegal(VT)) {
250 // Handle integer promotions, though, because they're common and easy.
251 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
252 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
253 else
254 return Register();
255 }
256
257 // Look up the value to see if we already have a register for it.
259 if (Reg)
260 return Reg;
261
262 // In bottom-up mode, just create the virtual register which will be used
263 // to hold the value. It will be materialized later.
264 if (isa<Instruction>(V) &&
265 (!isa<AllocaInst>(V) ||
266 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
267 return FuncInfo.InitializeRegForValue(V);
268
269 SavePoint SaveInsertPt = enterLocalValueArea();
270
271 // Materialize the value in a register. Emit any instructions in the
272 // local value area.
273 Reg = materializeRegForValue(V, VT);
274
275 leaveLocalValueArea(SaveInsertPt);
276
277 return Reg;
278}
279
280Register FastISel::materializeConstant(const Value *V, MVT VT) {
281 Register Reg;
282 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
283 if (CI->getValue().getActiveBits() <= 64)
284 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
285 } else if (isa<AllocaInst>(V))
287 else if (isa<ConstantPointerNull>(V))
288 // Translate this as an integer zero so that it can be
289 // local-CSE'd with actual integer zeros.
290 Reg =
291 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
292 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
293 if (CF->isNullValue())
295 else
296 // Try to emit the constant directly.
297 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
298
299 if (!Reg) {
300 // Try to emit the constant by using an integer constant with a cast.
301 const APFloat &Flt = CF->getValueAPF();
302 EVT IntVT = TLI.getPointerTy(DL);
303 uint32_t IntBitWidth = IntVT.getSizeInBits();
304 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
305 bool isExact;
306 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
307 if (isExact) {
308 Register IntegerReg =
309 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
310 if (IntegerReg)
312 IntegerReg);
313 }
314 }
315 } else if (const auto *Op = dyn_cast<Operator>(V)) {
316 if (!selectOperator(Op, Op->getOpcode()))
317 if (!isa<Instruction>(Op) ||
319 return Register();
321 } else if (isa<UndefValue>(V)) {
322 Reg = createResultReg(TLI.getRegClassFor(VT));
323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
324 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
325 }
326 return Reg;
327}
328
329/// Helper for getRegForValue. This function is called when the value isn't
330/// already available in a register and must be materialized with new
331/// instructions.
332Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
334 // Give the target-specific code a try first.
335 if (isa<Constant>(V))
337
338 // If target-specific code couldn't or didn't want to handle the value, then
339 // give target-independent code a try.
340 if (!Reg)
341 Reg = materializeConstant(V, VT);
342
343 // Don't cache constant materializations in the general ValueMap.
344 // To do so would require tracking what uses they dominate.
345 if (Reg) {
347 LastLocalValue = MRI.getVRegDef(Reg);
348 }
349 return Reg;
350}
351
353 // Look up the value to see if we already have a register for it. We
354 // cache values defined by Instructions across blocks, and other values
355 // only locally. This is because Instructions already have the SSA
356 // def-dominates-use requirement enforced.
358 if (I != FuncInfo.ValueMap.end())
359 return I->second;
360 return LocalValueMap[V];
361}
362
363void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
364 if (!isa<Instruction>(I)) {
365 LocalValueMap[I] = Reg;
366 return;
367 }
368
369 Register &AssignedReg = FuncInfo.ValueMap[I];
370 if (!AssignedReg)
371 // Use the new register.
372 AssignedReg = Reg;
373 else if (Reg != AssignedReg) {
374 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
375 for (unsigned i = 0; i < NumRegs; i++) {
376 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
377 FuncInfo.RegsWithFixups.insert(Reg + i);
378 }
379
380 AssignedReg = Reg;
381 }
382}
383
385 Register IdxN = getRegForValue(Idx);
386 if (!IdxN)
387 // Unhandled operand. Halt "fast" selection and bail.
388 return Register();
389
390 // If the index is smaller or larger than intptr_t, truncate or extend it.
391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392 if (IdxVT.bitsLT(PtrVT)) {
393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394 } else if (IdxVT.bitsGT(PtrVT)) {
395 IdxN =
396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397 }
398 return IdxN;
399}
400
402 if (getLastLocalValue()) {
403 FuncInfo.InsertPt = getLastLocalValue();
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
405 ++FuncInfo.InsertPt;
406 } else
407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
408}
409
412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413 "Invalid iterator!");
414 while (I != E) {
415 if (SavedInsertPt == I)
416 SavedInsertPt = E;
417 if (EmitStartPt == I)
418 EmitStartPt = E.isValid() ? &*E : nullptr;
419 if (LastLocalValue == I)
420 LastLocalValue = E.isValid() ? &*E : nullptr;
421
422 MachineInstr *Dead = &*I;
423 ++I;
424 Dead->eraseFromParent();
425 ++NumFastIselDead;
426 }
428}
429
431 SavePoint OldInsertPt = FuncInfo.InsertPt;
433 return OldInsertPt;
434}
435
437 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
438 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439
440 // Restore the previous insert position.
441 FuncInfo.InsertPt = OldInsertPt;
442}
443
444bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446 if (VT == MVT::Other || !VT.isSimple())
447 // Unhandled type. Halt "fast" selection and bail.
448 return false;
449
450 // We only handle legal types. For example, on x86-32 the instruction
451 // selector contains all of the 64-bit instructions from x86-64,
452 // under the assumption that i64 won't be used if the target doesn't
453 // support it.
454 if (!TLI.isTypeLegal(VT)) {
455 // MVT::i1 is special. Allow AND, OR, or XOR because they
456 // don't require additional zeroing, which makes them easy.
457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
458 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
459 else
460 return false;
461 }
462
463 // Check if the first operand is a constant, and handle it as "ri". At -O0,
464 // we don't have anything that canonicalizes operand order.
465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
467 Register Op1 = getRegForValue(I->getOperand(1));
468 if (!Op1)
469 return false;
470
471 Register ResultReg =
472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
473 VT.getSimpleVT());
474 if (!ResultReg)
475 return false;
476
477 // We successfully emitted code for the given LLVM Instruction.
478 updateValueMap(I, ResultReg);
479 return true;
480 }
481
482 Register Op0 = getRegForValue(I->getOperand(0));
483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
484 return false;
485
486 // Check if the second operand is a constant and handle it appropriately.
487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
488 uint64_t Imm = CI->getSExtValue();
489
490 // Transform "sdiv exact X, 8" -> "sra X, 3".
491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
493 Imm = Log2_64(Imm);
494 ISDOpcode = ISD::SRA;
495 }
496
497 // Transform "urem x, pow2" -> "and x, pow2-1".
498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
499 isPowerOf2_64(Imm)) {
500 --Imm;
501 ISDOpcode = ISD::AND;
502 }
503
504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
505 VT.getSimpleVT());
506 if (!ResultReg)
507 return false;
508
509 // We successfully emitted code for the given LLVM Instruction.
510 updateValueMap(I, ResultReg);
511 return true;
512 }
513
514 Register Op1 = getRegForValue(I->getOperand(1));
515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
516 return false;
517
518 // Now we have both operands in registers. Emit the instruction.
519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
520 ISDOpcode, Op0, Op1);
521 if (!ResultReg)
522 // Target-specific code wasn't able to find a machine opcode for
523 // the given ISD opcode and type. Halt "fast" selection and bail.
524 return false;
525
526 // We successfully emitted code for the given LLVM Instruction.
527 updateValueMap(I, ResultReg);
528 return true;
529}
530
532 Register N = getRegForValue(I->getOperand(0));
533 if (!N) // Unhandled operand. Halt "fast" selection and bail.
534 return false;
535
536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
537 // and bail.
538 if (isa<VectorType>(I->getType()))
539 return false;
540
541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
542 // into a single N = N + TotalOffset.
543 uint64_t TotalOffs = 0;
544 // FIXME: What's a good SWAG number for MaxOffs?
545 uint64_t MaxOffs = 2048;
546 MVT VT = TLI.getValueType(DL, I->getType()).getSimpleVT();
547
549 GTI != E; ++GTI) {
550 const Value *Idx = GTI.getOperand();
551 if (StructType *StTy = GTI.getStructTypeOrNull()) {
552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553 if (Field) {
554 // N = N + Offset
555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556 if (TotalOffs >= MaxOffs) {
557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558 if (!N) // Unhandled operand. Halt "fast" selection and bail.
559 return false;
560 TotalOffs = 0;
561 }
562 }
563 } else {
564 // If this is a constant subscript, handle it quickly.
565 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
566 if (CI->isZero())
567 continue;
568 // N = N + Offset
569 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
570 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
571 if (TotalOffs >= MaxOffs) {
572 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
573 if (!N) // Unhandled operand. Halt "fast" selection and bail.
574 return false;
575 TotalOffs = 0;
576 }
577 continue;
578 }
579 if (TotalOffs) {
580 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
581 if (!N) // Unhandled operand. Halt "fast" selection and bail.
582 return false;
583 TotalOffs = 0;
584 }
585
586 // N = N + Idx * ElementSize;
587 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
588 Register IdxN = getRegForGEPIndex(VT, Idx);
589 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
590 return false;
591
592 if (ElementSize != 1) {
593 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
594 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
595 return false;
596 }
597 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
598 if (!N) // Unhandled operand. Halt "fast" selection and bail.
599 return false;
600 }
601 }
602 if (TotalOffs) {
603 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
604 if (!N) // Unhandled operand. Halt "fast" selection and bail.
605 return false;
606 }
607
608 // We successfully emitted code for the given LLVM Instruction.
610 return true;
611}
612
613bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
614 const CallInst *CI, unsigned StartIdx) {
615 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
616 Value *Val = CI->getArgOperand(i);
617 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
618 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
619 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
620 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
621 } else if (isa<ConstantPointerNull>(Val)) {
622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
623 Ops.push_back(MachineOperand::CreateImm(0));
624 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
625 // Values coming from a stack location also require a special encoding,
626 // but that is added later on by the target specific frame index
627 // elimination implementation.
628 auto SI = FuncInfo.StaticAllocaMap.find(AI);
629 if (SI != FuncInfo.StaticAllocaMap.end())
630 Ops.push_back(MachineOperand::CreateFI(SI->second));
631 else
632 return false;
633 } else {
635 if (!Reg)
636 return false;
637 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
638 }
639 }
640 return true;
641}
642
644 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
645 // [live variables...])
646 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
647 "Stackmap cannot return a value.");
648
649 // The stackmap intrinsic only records the live variables (the arguments
650 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
651 // intrinsic, this won't be lowered to a function call. This means we don't
652 // have to worry about calling conventions and target-specific lowering code.
653 // Instead we perform the call lowering right here.
654 //
655 // CALLSEQ_START(0, 0...)
656 // STACKMAP(id, nbytes, ...)
657 // CALLSEQ_END(0, 0)
658 //
660
661 // Add the <id> and <numBytes> constants.
663 "Expected a constant integer.");
664 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
665 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
666
668 "Expected a constant integer.");
669 const auto *NumBytes =
671 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
672
673 // Push live variables for the stack map (skipping the first two arguments
674 // <id> and <numBytes>).
675 if (!addStackMapLiveVars(Ops, I, 2))
676 return false;
677
678 // We are not adding any register mask info here, because the stackmap doesn't
679 // clobber anything.
680
681 // Add scratch registers as implicit def and early clobber.
682 CallingConv::ID CC = I->getCallingConv();
683 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
684 for (unsigned i = 0; ScratchRegs[i]; ++i)
686 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
687 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
688
689 // Issue CALLSEQ_START
690 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
691 auto Builder =
692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
693 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
694 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
695 Builder.addImm(0);
696
697 // Issue STACKMAP.
698 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
699 TII.get(TargetOpcode::STACKMAP));
700 for (auto const &MO : Ops)
701 MIB.add(MO);
702
703 // Issue CALLSEQ_END
704 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
706 .addImm(0)
707 .addImm(0);
708
709 // Inform the Frame Information that we have a stackmap in this function.
710 FuncInfo.MF->getFrameInfo().setHasStackMap();
711
712 return true;
713}
714
715/// Lower an argument list according to the target calling convention.
716///
717/// This is a helper for lowering intrinsics that follow a target calling
718/// convention or require stack pointer adjustment. Only a subset of the
719/// intrinsic's operands need to participate in the calling convention.
720bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
721 unsigned NumArgs, const Value *Callee,
722 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
723 ArgListTy Args;
724 Args.reserve(NumArgs);
725
726 // Populate the argument list.
727 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
728 Value *V = CI->getOperand(ArgI);
729
730 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
731
732 ArgListEntry Entry(V);
733 Entry.setAttributes(CI, ArgI);
734 Args.push_back(Entry);
735 }
736
737 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
738 : CI->getType();
739 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
740
741 return lowerCallTo(CLI);
742}
743
745 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
746 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
747 SmallString<32> MangledName;
748 Mangler::getNameWithPrefix(MangledName, Target, DL);
749 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
750 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
751}
752
754 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
755 // i32 <numBytes>,
756 // i8* <target>,
757 // i32 <numArgs>,
758 // [Args...],
759 // [live variables...])
760 CallingConv::ID CC = I->getCallingConv();
761 bool IsAnyRegCC = CC == CallingConv::AnyReg;
762 bool HasDef = !I->getType()->isVoidTy();
763 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
764
765 // Check if we can lower the return type when using anyregcc.
767 if (IsAnyRegCC && HasDef) {
768 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
769 if (ValueType == MVT::Other)
770 return false;
771 }
772
773 // Get the real number of arguments participating in the call <numArgs>
775 "Expected a constant integer.");
776 const auto *NumArgsVal =
778 unsigned NumArgs = NumArgsVal->getZExtValue();
779
780 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
781 // This includes all meta-operands up to but not including CC.
782 unsigned NumMetaOpers = PatchPointOpers::CCPos;
783 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
784 "Not enough arguments provided to the patchpoint intrinsic");
785
786 // For AnyRegCC the arguments are lowered later on manually.
787 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
789 CLI.setIsPatchPoint();
790 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
791 return false;
792
793 assert(CLI.Call && "No call instruction specified.");
794
796
797 // Add an explicit result reg if we use the anyreg calling convention.
798 if (IsAnyRegCC && HasDef) {
799 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
800 assert(ValueType.isValid());
801 CLI.ResultReg = createResultReg(TLI.getRegClassFor(ValueType));
802 CLI.NumResultRegs = 1;
803 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
804 }
805
806 // Add the <id> and <numBytes> constants.
808 "Expected a constant integer.");
809 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
810 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
811
813 "Expected a constant integer.");
814 const auto *NumBytes =
816 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
817
818 // Add the call target.
819 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
820 uint64_t CalleeConstAddr =
821 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
824 if (C->getOpcode() == Instruction::IntToPtr) {
825 uint64_t CalleeConstAddr =
826 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
827 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
828 } else
829 llvm_unreachable("Unsupported ConstantExpr.");
830 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
831 Ops.push_back(MachineOperand::CreateGA(GV, 0));
832 } else if (isa<ConstantPointerNull>(Callee))
833 Ops.push_back(MachineOperand::CreateImm(0));
834 else
835 llvm_unreachable("Unsupported callee address.");
836
837 // Adjust <numArgs> to account for any arguments that have been passed on
838 // the stack instead.
839 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
840 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
841
842 // Add the calling convention
843 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
844
845 // Add the arguments we omitted previously. The register allocator should
846 // place these in any free register.
847 if (IsAnyRegCC) {
848 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
849 Register Reg = getRegForValue(I->getArgOperand(i));
850 if (!Reg)
851 return false;
852 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
853 }
854 }
855
856 // Push the arguments from the call instruction.
857 for (auto Reg : CLI.OutRegs)
858 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
859
860 // Push live variables for the stack map.
861 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
862 return false;
863
864 // Push the register mask info.
866 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
867
868 // Add scratch registers as implicit def and early clobber.
869 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
870 for (unsigned i = 0; ScratchRegs[i]; ++i)
872 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
873 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
874
875 // Add implicit defs (return values).
876 for (auto Reg : CLI.InRegs)
877 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
878 /*isImp=*/true));
879
880 // Insert the patchpoint instruction before the call generated by the target.
882 TII.get(TargetOpcode::PATCHPOINT));
883
884 for (auto &MO : Ops)
885 MIB.add(MO);
886
888
889 // Delete the original call instruction.
890 CLI.Call->eraseFromParent();
891
892 // Inform the Frame Information that we have a patchpoint in this function.
893 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
894
895 if (CLI.NumResultRegs)
897 return true;
898}
899
901 const auto &Triple = TM.getTargetTriple();
903 return true; // don't do anything to this instruction.
905 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
906 /*isDef=*/false));
907 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
908 /*isDef=*/false));
910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
911 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
912 for (auto &MO : Ops)
913 MIB.add(MO);
914
915 // Insert the Patchable Event Call instruction, that gets lowered properly.
916 return true;
917}
918
920 const auto &Triple = TM.getTargetTriple();
922 return true; // don't do anything to this instruction.
924 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
925 /*isDef=*/false));
926 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
927 /*isDef=*/false));
928 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
929 /*isDef=*/false));
931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
932 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
933 for (auto &MO : Ops)
934 MIB.add(MO);
935
936 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
937 return true;
938}
939
940/// Returns an AttributeList representing the attributes applied to the return
941/// value of the given call.
942static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
944 if (CLI.RetSExt)
945 Attrs.push_back(Attribute::SExt);
946 if (CLI.RetZExt)
947 Attrs.push_back(Attribute::ZExt);
948 if (CLI.IsInReg)
949 Attrs.push_back(Attribute::InReg);
950
951 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
952 Attrs);
953}
954
955bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
956 unsigned NumArgs) {
957 MCContext &Ctx = MF->getContext();
958 SmallString<32> MangledName;
959 Mangler::getNameWithPrefix(MangledName, SymName, DL);
960 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
961 return lowerCallTo(CI, Sym, NumArgs);
962}
963
965 unsigned NumArgs) {
966 FunctionType *FTy = CI->getFunctionType();
967 Type *RetTy = CI->getType();
968
969 ArgListTy Args;
970 Args.reserve(NumArgs);
971
972 // Populate the argument list.
973 // Attributes for args start at offset 1, after the return attribute.
974 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
975 Value *V = CI->getOperand(ArgI);
976
977 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
978
979 ArgListEntry Entry(V);
980 Entry.setAttributes(CI, ArgI);
981 Args.push_back(Entry);
982 }
983 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
984
986 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
987
988 return lowerCallTo(CLI);
989}
990
992 // Handle the incoming return values from the call.
993 CLI.clearIns();
994 SmallVector<EVT, 4> RetTys;
995 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
996
998 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
999
1000 bool CanLowerReturn = TLI.CanLowerReturn(
1001 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext(), CLI.RetTy);
1002
1003 // FIXME: sret demotion isn't supported yet - bail out.
1004 if (!CanLowerReturn)
1005 return false;
1006
1007 for (EVT VT : RetTys) {
1008 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1009 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1010 for (unsigned i = 0; i != NumRegs; ++i) {
1011 ISD::ArgFlagsTy Flags;
1012 if (CLI.RetSExt)
1013 Flags.setSExt();
1014 if (CLI.RetZExt)
1015 Flags.setZExt();
1016 if (CLI.IsInReg)
1017 Flags.setInReg();
1018 ISD::InputArg Ret(Flags, RegisterVT, VT, CLI.RetTy, CLI.IsReturnValueUsed,
1020 CLI.Ins.push_back(Ret);
1021 }
1022 }
1023
1024 // Handle all of the outgoing arguments.
1025 CLI.clearOuts();
1026 for (auto &Arg : CLI.getArgs()) {
1027 Type *FinalType = Arg.Ty;
1028 if (Arg.IsByVal)
1029 FinalType = Arg.IndirectType;
1030 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1031 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1032
1033 ISD::ArgFlagsTy Flags;
1034 if (Arg.IsZExt)
1035 Flags.setZExt();
1036 if (Arg.IsSExt)
1037 Flags.setSExt();
1038 if (Arg.IsInReg)
1039 Flags.setInReg();
1040 if (Arg.IsSRet)
1041 Flags.setSRet();
1042 if (Arg.IsSwiftSelf)
1043 Flags.setSwiftSelf();
1044 if (Arg.IsSwiftAsync)
1045 Flags.setSwiftAsync();
1046 if (Arg.IsSwiftError)
1047 Flags.setSwiftError();
1048 if (Arg.IsCFGuardTarget)
1049 Flags.setCFGuardTarget();
1050 if (Arg.IsByVal)
1051 Flags.setByVal();
1052 if (Arg.IsInAlloca) {
1053 Flags.setInAlloca();
1054 // Set the byval flag for CCAssignFn callbacks that don't know about
1055 // inalloca. This way we can know how many bytes we should've allocated
1056 // and how many bytes a callee cleanup function will pop. If we port
1057 // inalloca to more targets, we'll have to add custom inalloca handling in
1058 // the various CC lowering callbacks.
1059 Flags.setByVal();
1060 }
1061 if (Arg.IsPreallocated) {
1062 Flags.setPreallocated();
1063 // Set the byval flag for CCAssignFn callbacks that don't know about
1064 // preallocated. This way we can know how many bytes we should've
1065 // allocated and how many bytes a callee cleanup function will pop. If we
1066 // port preallocated to more targets, we'll have to add custom
1067 // preallocated handling in the various CC lowering callbacks.
1068 Flags.setByVal();
1069 }
1070 MaybeAlign MemAlign = Arg.Alignment;
1071 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1072 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1073
1074 // For ByVal, alignment should come from FE. BE will guess if this info
1075 // is not there, but there are cases it cannot get right.
1076 if (!MemAlign)
1077 MemAlign = TLI.getByValTypeAlignment(Arg.IndirectType, DL);
1078 Flags.setByValSize(FrameSize);
1079 } else if (!MemAlign) {
1080 MemAlign = DL.getABITypeAlign(Arg.Ty);
1081 }
1082 Flags.setMemAlign(*MemAlign);
1083 if (Arg.IsNest)
1084 Flags.setNest();
1085 if (NeedsRegBlock)
1086 Flags.setInConsecutiveRegs();
1087 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1088 CLI.OutVals.push_back(Arg.Val);
1089 CLI.OutFlags.push_back(Flags);
1090 }
1091
1092 if (!fastLowerCall(CLI))
1093 return false;
1094
1095 // Set all unused physreg defs as dead.
1096 assert(CLI.Call && "No call instruction specified.");
1098
1099 if (CLI.NumResultRegs && CLI.CB)
1101
1102 // Set labels for heapallocsite call.
1103 if (CLI.CB)
1104 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1105 CLI.Call->setHeapAllocMarker(*MF, MD);
1106
1107 return true;
1108}
1109
1111 FunctionType *FuncTy = CI->getFunctionType();
1112 Type *RetTy = CI->getType();
1113
1114 ArgListTy Args;
1115 Args.reserve(CI->arg_size());
1116
1117 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1118 Value *V = *i;
1119
1120 // Skip empty types
1121 if (V->getType()->isEmptyTy())
1122 continue;
1123
1124 ArgListEntry Entry(V);
1125 // Skip the first return-type Attribute to get to params.
1126 Entry.setAttributes(CI, i - CI->arg_begin());
1127 Args.push_back(Entry);
1128 }
1129
1130 // Check if target-independent constraints permit a tail call here.
1131 // Target-dependent constraints are checked within fastLowerCall.
1132 bool IsTailCall = CI->isTailCall();
1133 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1134 IsTailCall = false;
1135 if (IsTailCall && !CI->isMustTailCall() &&
1136 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1137 IsTailCall = false;
1138
1139 CallLoweringInfo CLI;
1140 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1141 .setTailCall(IsTailCall);
1142
1143 if (lowerCallTo(CLI)) {
1144 diagnoseDontCall(*CI);
1145 return true;
1146 }
1147
1148 return false;
1149}
1150
1152 const CallInst *Call = cast<CallInst>(I);
1153
1154 // Handle simple inline asms.
1155 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1156 // Don't attempt to handle constraints.
1157 if (!IA->getConstraintString().empty())
1158 return false;
1159
1160 unsigned ExtraInfo = 0;
1161 if (IA->hasSideEffects())
1163 if (IA->isAlignStack())
1164 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1165 if (IA->canThrow())
1166 ExtraInfo |= InlineAsm::Extra_MayUnwind;
1167 if (Call->isConvergent())
1168 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1169 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1170
1171 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1172 TII.get(TargetOpcode::INLINEASM));
1173 MIB.addExternalSymbol(IA->getAsmString().data());
1174 MIB.addImm(ExtraInfo);
1175
1176 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1177 if (SrcLoc)
1178 MIB.addMetadata(SrcLoc);
1179
1180 return true;
1181 }
1182
1183 // Handle intrinsic function calls.
1184 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1185 return selectIntrinsicCall(II);
1186
1187 return lowerCall(Call);
1188}
1189
1191 if (!II->hasDbgRecords())
1192 return;
1193
1194 // Clear any metadata.
1195 MIMD = MIMetadata();
1196
1197 // Reverse order of debug records, because fast-isel walks through backwards.
1198 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1199 flushLocalValueMap();
1201
1202 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1203 assert(DLR->getLabel() && "Missing label");
1204 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(),
1205 TII.get(TargetOpcode::DBG_LABEL))
1206 .addMetadata(DLR->getLabel());
1207 continue;
1208 }
1209
1211
1212 Value *V = nullptr;
1213 if (!DVR.hasArgList())
1214 V = DVR.getVariableLocationOp(0);
1215
1216 bool Res = false;
1219 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(),
1220 DVR.getDebugLoc());
1221 } else {
1223 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1224 continue;
1225 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(),
1226 DVR.getDebugLoc());
1227 }
1228
1229 if (!Res)
1230 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n");
1231 }
1232}
1233
1235 DILocalVariable *Var, const DebugLoc &DL) {
1236 // This form of DBG_VALUE is target-independent.
1237 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1238 if (!V || isa<UndefValue>(V)) {
1239 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1240 // undef DBG_VALUE to terminate any prior location.
1241 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1242 return true;
1243 }
1244 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1245 // See if there's an expression to constant-fold.
1246 if (Expr)
1247 std::tie(Expr, CI) = Expr->constantFold(CI);
1248 if (CI->getBitWidth() > 64)
1249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1250 .addCImm(CI)
1251 .addImm(0U)
1252 .addMetadata(Var)
1253 .addMetadata(Expr);
1254 else
1255 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1256 .addImm(CI->getZExtValue())
1257 .addImm(0U)
1258 .addMetadata(Var)
1259 .addMetadata(Expr);
1260 return true;
1261 }
1262 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1263 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1264 .addFPImm(CF)
1265 .addImm(0U)
1266 .addMetadata(Var)
1267 .addMetadata(Expr);
1268 return true;
1269 }
1270 if (const auto *Arg = dyn_cast<Argument>(V);
1271 Arg && Expr && Expr->isEntryValue()) {
1272 // As per the Verifier, this case is only valid for swift async Args.
1273 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1274
1275 Register Reg = getRegForValue(Arg);
1276 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1277 if (Reg == VirtReg || Reg == PhysReg) {
1278 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1279 PhysReg, Var, Expr);
1280 return true;
1281 }
1282
1283 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1284 "couldn't find a physical register\n");
1285 return false;
1286 }
1287 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1288 SI != FuncInfo.StaticAllocaMap.end()) {
1289 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1290 bool IsIndirect = false;
1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1292 Var, Expr);
1293 return true;
1294 }
1295 if (Register Reg = lookUpRegForValue(V)) {
1296 // FIXME: This does not handle register-indirect values at offset 0.
1297 if (!FuncInfo.MF->useDebugInstrRef()) {
1298 bool IsIndirect = false;
1299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1300 Expr);
1301 return true;
1302 }
1303 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1304 // to be later patched up by finalizeDebugInstrRefs.
1306 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1307 /* isKill */ false, /* isDead */ false,
1308 /* isUndef */ false, /* isEarlyClobber */ false,
1309 /* SubReg */ 0, /* isDebug */ true)});
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1313 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1314 Var, NewExpr);
1315 return true;
1316 }
1317 return false;
1318}
1319
1321 DILocalVariable *Var, const DebugLoc &DL) {
1322 if (!Address || isa<UndefValue>(Address)) {
1323 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1324 return false;
1325 }
1326
1327 std::optional<MachineOperand> Op;
1329 Op = MachineOperand::CreateReg(Reg, false);
1330
1331 // If we have a VLA that has a "use" in a metadata node that's then used
1332 // here but it has no other uses, then we have a problem. E.g.,
1333 //
1334 // int foo (const int *x) {
1335 // char a[*x];
1336 // return 0;
1337 // }
1338 //
1339 // If we assign 'a' a vreg and fast isel later on has to use the selection
1340 // DAG isel, it will want to copy the value to the vreg. However, there are
1341 // no uses, which goes counter to what selection DAG isel expects.
1342 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1344 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1345 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1346 false);
1347
1348 if (Op) {
1350 "Expected inlined-at fields to agree");
1351 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1352 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1353 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1354 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1356 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1358 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1359 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1360 Var, NewExpr);
1361 return true;
1362 }
1363
1364 // A dbg.declare describes the address of a source variable, so lower it
1365 // into an indirect DBG_VALUE.
1366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1367 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1368 Expr);
1369 return true;
1370 }
1371
1372 // We can't yet handle anything else here because it would require
1373 // generating code, thus altering codegen because of debug info.
1374 LLVM_DEBUG(
1375 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1376 return false;
1377}
1378
1380 switch (II->getIntrinsicID()) {
1381 default:
1382 break;
1383 // At -O0 we don't care about the lifetime intrinsics.
1384 case Intrinsic::lifetime_start:
1385 case Intrinsic::lifetime_end:
1386 // The donothing intrinsic does, well, nothing.
1387 case Intrinsic::donothing:
1388 // Neither does the sideeffect intrinsic.
1389 case Intrinsic::sideeffect:
1390 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1391 case Intrinsic::assume:
1392 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1393 case Intrinsic::experimental_noalias_scope_decl:
1394 return true;
1395 case Intrinsic::objectsize:
1396 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1397
1398 case Intrinsic::is_constant:
1399 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1400
1401 case Intrinsic::allow_runtime_check:
1402 case Intrinsic::allow_ubsan_check: {
1403 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType()));
1404 if (!ResultReg)
1405 return false;
1406 updateValueMap(II, ResultReg);
1407 return true;
1408 }
1409
1410 case Intrinsic::launder_invariant_group:
1411 case Intrinsic::strip_invariant_group:
1412 case Intrinsic::expect:
1413 case Intrinsic::expect_with_probability: {
1414 Register ResultReg = getRegForValue(II->getArgOperand(0));
1415 if (!ResultReg)
1416 return false;
1417 updateValueMap(II, ResultReg);
1418 return true;
1419 }
1420 case Intrinsic::fake_use:
1421 // At -O0, we don't need fake use, so just ignore it.
1422 return true;
1423 case Intrinsic::experimental_stackmap:
1424 return selectStackmap(II);
1425 case Intrinsic::experimental_patchpoint_void:
1426 case Intrinsic::experimental_patchpoint:
1427 return selectPatchpoint(II);
1428
1429 case Intrinsic::xray_customevent:
1430 return selectXRayCustomEvent(II);
1431 case Intrinsic::xray_typedevent:
1432 return selectXRayTypedEvent(II);
1433 }
1434
1435 return fastLowerIntrinsicCall(II);
1436}
1437
1438bool FastISel::selectCast(const User *I, unsigned Opcode) {
1439 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1440 EVT DstVT = TLI.getValueType(DL, I->getType());
1441
1442 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1443 !DstVT.isSimple())
1444 // Unhandled type. Halt "fast" selection and bail.
1445 return false;
1446
1447 // Check if the destination type is legal.
1448 if (!TLI.isTypeLegal(DstVT))
1449 return false;
1450
1451 // Check if the source operand is legal.
1452 if (!TLI.isTypeLegal(SrcVT))
1453 return false;
1454
1455 Register InputReg = getRegForValue(I->getOperand(0));
1456 if (!InputReg)
1457 // Unhandled operand. Halt "fast" selection and bail.
1458 return false;
1459
1460 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1461 Opcode, InputReg);
1462 if (!ResultReg)
1463 return false;
1464
1465 updateValueMap(I, ResultReg);
1466 return true;
1467}
1468
1470 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1471 EVT DstEVT = TLI.getValueType(DL, I->getType());
1472 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1473 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1474 // Unhandled type. Halt "fast" selection and bail.
1475 return false;
1476
1477 MVT SrcVT = SrcEVT.getSimpleVT();
1478 MVT DstVT = DstEVT.getSimpleVT();
1479 Register Op0 = getRegForValue(I->getOperand(0));
1480 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1481 return false;
1482
1483 // If the bitcast doesn't change the type, just use the operand value.
1484 if (SrcVT == DstVT) {
1485 updateValueMap(I, Op0);
1486 return true;
1487 }
1488
1489 // Otherwise, select a BITCAST opcode.
1490 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1491 if (!ResultReg)
1492 return false;
1493
1494 updateValueMap(I, ResultReg);
1495 return true;
1496}
1497
1499 Register Reg = getRegForValue(I->getOperand(0));
1500 if (!Reg)
1501 // Unhandled operand.
1502 return false;
1503
1504 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1505 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1506 // Unhandled type, bail out.
1507 return false;
1508
1509 MVT Ty = ETy.getSimpleVT();
1510 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1511 Register ResultReg = createResultReg(TyRegClass);
1512 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1513 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1514
1515 updateValueMap(I, ResultReg);
1516 return true;
1517}
1518
1519// Remove local value instructions starting from the instruction after
1520// SavedLastLocalValue to the current function insert point.
1521void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1522{
1523 MachineInstr *CurLastLocalValue = getLastLocalValue();
1524 if (CurLastLocalValue != SavedLastLocalValue) {
1525 // Find the first local value instruction to be deleted.
1526 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1527 // Otherwise it's the first instruction in the block.
1528 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1529 if (SavedLastLocalValue)
1530 ++FirstDeadInst;
1531 else
1532 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1533 setLastLocalValue(SavedLastLocalValue);
1534 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1535 }
1536}
1537
1539 // Flush the local value map before starting each instruction.
1540 // This improves locality and debugging, and can reduce spills.
1541 // Reuse of values across IR instructions is relatively uncommon.
1542 flushLocalValueMap();
1543
1544 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1545 // Just before the terminator instruction, insert instructions to
1546 // feed PHI nodes in successor blocks.
1547 if (I->isTerminator()) {
1548 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1549 // PHI node handling may have generated local value instructions,
1550 // even though it failed to handle all PHI nodes.
1551 // We remove these instructions because SelectionDAGISel will generate
1552 // them again.
1553 removeDeadLocalValueCode(SavedLastLocalValue);
1554 return false;
1555 }
1556 }
1557
1558 // FastISel does not handle any operand bundles except OB_funclet.
1559 if (auto *Call = dyn_cast<CallBase>(I))
1560 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1561 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1562 return false;
1563
1564 MIMD = MIMetadata(*I);
1565
1566 SavedInsertPt = FuncInfo.InsertPt;
1567
1568 if (const auto *Call = dyn_cast<CallInst>(I)) {
1569 const Function *F = Call->getCalledFunction();
1570
1571 // Don't handle Intrinsic::trap if a trap function is specified.
1572 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1573 Call->hasFnAttr("trap-func-name"))
1574 return false;
1575 }
1576
1577 // First, try doing target-independent selection.
1579 if (selectOperator(I, I->getOpcode())) {
1580 ++NumFastIselSuccessIndependent;
1581 MIMD = {};
1582 return true;
1583 }
1584 // Remove dead code.
1586 if (SavedInsertPt != FuncInfo.InsertPt)
1587 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1588 SavedInsertPt = FuncInfo.InsertPt;
1589 }
1590 // Next, try calling the target to attempt to handle the instruction.
1591 if (fastSelectInstruction(I)) {
1592 ++NumFastIselSuccessTarget;
1593 MIMD = {};
1594 return true;
1595 }
1596 // Remove dead code.
1598 if (SavedInsertPt != FuncInfo.InsertPt)
1599 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1600
1601 MIMD = {};
1602 // Undo phi node updates, because they will be added again by SelectionDAG.
1603 if (I->isTerminator()) {
1604 // PHI node handling may have generated local value instructions.
1605 // We remove them because SelectionDAGISel will generate them again.
1606 removeDeadLocalValueCode(SavedLastLocalValue);
1607 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1608 }
1609 return false;
1610}
1611
1612/// Emit an unconditional branch to the given block, unless it is the immediate
1613/// (fall-through) successor, and update the CFG.
1615 const DebugLoc &DbgLoc) {
1616 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock();
1617 bool BlockHasMultipleInstrs = &BB->front() != &BB->back();
1618 if (BlockHasMultipleInstrs && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1619 // For more accurate line information if this is the only non-debug
1620 // instruction in the block then emit it, otherwise we have the
1621 // unconditional fall-through case, which needs no instructions.
1622 } else {
1623 // The unconditional branch case.
1624 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1626 }
1627 if (FuncInfo.BPI) {
1628 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1629 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1630 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1631 } else
1632 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1633}
1634
1636 MachineBasicBlock *TrueMBB,
1637 MachineBasicBlock *FalseMBB) {
1638 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1639 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1640 // successor/predecessor lists.
1641 if (TrueMBB != FalseMBB) {
1642 if (FuncInfo.BPI) {
1643 auto BranchProbability =
1644 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1645 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1646 } else
1647 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1648 }
1649
1650 fastEmitBranch(FalseMBB, MIMD.getDL());
1651}
1652
1653/// Emit an FNeg operation.
1654bool FastISel::selectFNeg(const User *I, const Value *In) {
1655 Register OpReg = getRegForValue(In);
1656 if (!OpReg)
1657 return false;
1658
1659 // If the target has ISD::FNEG, use it.
1660 EVT VT = TLI.getValueType(DL, I->getType());
1661 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1662 OpReg);
1663 if (ResultReg) {
1664 updateValueMap(I, ResultReg);
1665 return true;
1666 }
1667
1668 // Bitcast the value to integer, twiddle the sign bit with xor,
1669 // and then bitcast it back to floating-point.
1670 if (VT.getSizeInBits() > 64)
1671 return false;
1672 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1673 if (!TLI.isTypeLegal(IntVT))
1674 return false;
1675
1676 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1677 ISD::BITCAST, OpReg);
1678 if (!IntReg)
1679 return false;
1680
1681 Register IntResultReg = fastEmit_ri_(
1682 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1683 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1684 if (!IntResultReg)
1685 return false;
1686
1687 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1688 IntResultReg);
1689 if (!ResultReg)
1690 return false;
1691
1692 updateValueMap(I, ResultReg);
1693 return true;
1694}
1695
1698 if (!EVI)
1699 return false;
1700
1701 // Make sure we only try to handle extracts with a legal result. But also
1702 // allow i1 because it's easy.
1703 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1704 if (!RealVT.isSimple())
1705 return false;
1706 MVT VT = RealVT.getSimpleVT();
1707 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1708 return false;
1709
1710 const Value *Op0 = EVI->getOperand(0);
1711 Type *AggTy = Op0->getType();
1712
1713 // Get the base result register.
1714 Register ResultReg;
1716 if (I != FuncInfo.ValueMap.end())
1717 ResultReg = I->second;
1718 else if (isa<Instruction>(Op0))
1719 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1720 else
1721 return false; // fast-isel can't handle aggregate constants at the moment
1722
1723 // Get the actual result register, which is an offset from the base register.
1724 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1725
1726 SmallVector<EVT, 4> AggValueVTs;
1727 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1728
1729 for (unsigned i = 0; i < VTIndex; i++)
1730 ResultReg = ResultReg.id() +
1731 TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1732
1733 updateValueMap(EVI, ResultReg);
1734 return true;
1735}
1736
1737bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1738 switch (Opcode) {
1739 case Instruction::Add:
1740 return selectBinaryOp(I, ISD::ADD);
1741 case Instruction::FAdd:
1742 return selectBinaryOp(I, ISD::FADD);
1743 case Instruction::Sub:
1744 return selectBinaryOp(I, ISD::SUB);
1745 case Instruction::FSub:
1746 return selectBinaryOp(I, ISD::FSUB);
1747 case Instruction::Mul:
1748 return selectBinaryOp(I, ISD::MUL);
1749 case Instruction::FMul:
1750 return selectBinaryOp(I, ISD::FMUL);
1751 case Instruction::SDiv:
1752 return selectBinaryOp(I, ISD::SDIV);
1753 case Instruction::UDiv:
1754 return selectBinaryOp(I, ISD::UDIV);
1755 case Instruction::FDiv:
1756 return selectBinaryOp(I, ISD::FDIV);
1757 case Instruction::SRem:
1758 return selectBinaryOp(I, ISD::SREM);
1759 case Instruction::URem:
1760 return selectBinaryOp(I, ISD::UREM);
1761 case Instruction::FRem:
1762 return selectBinaryOp(I, ISD::FREM);
1763 case Instruction::Shl:
1764 return selectBinaryOp(I, ISD::SHL);
1765 case Instruction::LShr:
1766 return selectBinaryOp(I, ISD::SRL);
1767 case Instruction::AShr:
1768 return selectBinaryOp(I, ISD::SRA);
1769 case Instruction::And:
1770 return selectBinaryOp(I, ISD::AND);
1771 case Instruction::Or:
1772 return selectBinaryOp(I, ISD::OR);
1773 case Instruction::Xor:
1774 return selectBinaryOp(I, ISD::XOR);
1775
1776 case Instruction::FNeg:
1777 return selectFNeg(I, I->getOperand(0));
1778
1779 case Instruction::GetElementPtr:
1780 return selectGetElementPtr(I);
1781
1782 case Instruction::Br: {
1783 const BranchInst *BI = cast<BranchInst>(I);
1784
1785 if (BI->isUnconditional()) {
1786 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1787 MachineBasicBlock *MSucc = FuncInfo.getMBB(LLVMSucc);
1788 fastEmitBranch(MSucc, BI->getDebugLoc());
1789 return true;
1790 }
1791
1792 // Conditional branches are not handed yet.
1793 // Halt "fast" selection and bail.
1794 return false;
1795 }
1796
1797 case Instruction::Unreachable: {
1798 auto UI = cast<UnreachableInst>(I);
1799 if (!UI->shouldLowerToTrap(TM.Options.TrapUnreachable,
1800 TM.Options.NoTrapAfterNoreturn))
1801 return true;
1802
1803 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1804 }
1805
1806 case Instruction::Alloca:
1807 // FunctionLowering has the static-sized case covered.
1808 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1809 return true;
1810
1811 // Dynamic-sized alloca is not handled yet.
1812 return false;
1813
1814 case Instruction::Call:
1815 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1816 // callee of the direct function call instruction will be mapped to the
1817 // symbol for the function's entry point, which is distinct from the
1818 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1819 // name is the C-linkage name of the source level function.
1820 // But fast isel still has the ability to do selection for intrinsics.
1821 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1822 return false;
1823 return selectCall(I);
1824
1825 case Instruction::BitCast:
1826 return selectBitCast(I);
1827
1828 case Instruction::FPToSI:
1829 return selectCast(I, ISD::FP_TO_SINT);
1830 case Instruction::ZExt:
1831 return selectCast(I, ISD::ZERO_EXTEND);
1832 case Instruction::SExt:
1833 return selectCast(I, ISD::SIGN_EXTEND);
1834 case Instruction::Trunc:
1835 return selectCast(I, ISD::TRUNCATE);
1836 case Instruction::SIToFP:
1837 return selectCast(I, ISD::SINT_TO_FP);
1838
1839 case Instruction::IntToPtr: // Deliberate fall-through.
1840 case Instruction::PtrToInt:
1841 case Instruction::PtrToAddr: {
1842 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1843 EVT DstVT = TLI.getValueType(DL, I->getType());
1844 if (DstVT.bitsGT(SrcVT))
1845 return selectCast(I, ISD::ZERO_EXTEND);
1846 if (DstVT.bitsLT(SrcVT))
1847 return selectCast(I, ISD::TRUNCATE);
1848 Register Reg = getRegForValue(I->getOperand(0));
1849 if (!Reg)
1850 return false;
1851 updateValueMap(I, Reg);
1852 return true;
1853 }
1854
1855 case Instruction::ExtractValue:
1856 return selectExtractValue(I);
1857
1858 case Instruction::Freeze:
1859 return selectFreeze(I);
1860
1861 case Instruction::PHI:
1862 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1863
1864 default:
1865 // Unhandled instruction. Halt "fast" selection and bail.
1866 return false;
1867 }
1868}
1869
1874 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1875 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1876 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1877 TII(*MF->getSubtarget().getInstrInfo()),
1878 TLI(*MF->getSubtarget().getTargetLowering()),
1879 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1882
1883FastISel::~FastISel() = default;
1884
1885bool FastISel::fastLowerArguments() { return false; }
1886
1887bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1888
1890 return false;
1891}
1892
1894
1896 return Register();
1897}
1898
1900 Register /*Op1*/) {
1901 return Register();
1902}
1903
1905 return Register();
1906}
1907
1909 const ConstantFP * /*FPImm*/) {
1910 return Register();
1911}
1912
1914 uint64_t /*Imm*/) {
1915 return Register();
1916}
1917
1918/// This method is a wrapper of fastEmit_ri. It first tries to emit an
1919/// instruction with an immediate operand using fastEmit_ri.
1920/// If that fails, it materializes the immediate into a register and try
1921/// fastEmit_rr instead.
1923 uint64_t Imm, MVT ImmType) {
1924 // If this is a multiply by a power of two, emit this as a shift left.
1925 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1926 Opcode = ISD::SHL;
1927 Imm = Log2_64(Imm);
1928 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1929 // div x, 8 -> srl x, 3
1930 Opcode = ISD::SRL;
1931 Imm = Log2_64(Imm);
1932 }
1933
1934 // Horrible hack (to be removed), check to make sure shift amounts are
1935 // in-range.
1936 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1937 Imm >= VT.getSizeInBits())
1938 return Register();
1939
1940 // First check if immediate type is legal. If not, we can't use the ri form.
1941 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1942 if (ResultReg)
1943 return ResultReg;
1944 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1945 if (!MaterialReg) {
1946 // This is a bit ugly/slow, but failing here means falling out of
1947 // fast-isel, which would be very slow.
1948 IntegerType *ITy =
1949 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1950 // TODO: Avoid implicit trunc?
1951 // See https://github.com/llvm/llvm-project/issues/112510.
1952 MaterialReg = getRegForValue(
1953 ConstantInt::get(ITy, Imm, /*IsSigned=*/false, /*ImplicitTrunc=*/true));
1954 if (!MaterialReg)
1955 return Register();
1956 }
1957 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1958}
1959
1961 return MRI.createVirtualRegister(RC);
1962}
1963
1965 unsigned OpNum) {
1966 if (Op.isVirtual()) {
1967 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpNum);
1968 if (!MRI.constrainRegClass(Op, RegClass)) {
1969 // If it's not legal to COPY between the register classes, something
1970 // has gone very wrong before we got here.
1971 Register NewOp = createResultReg(RegClass);
1972 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1973 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1974 return NewOp;
1975 }
1976 }
1977 return Op;
1978}
1979
1980Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1981 const TargetRegisterClass *RC) {
1982 Register ResultReg = createResultReg(RC);
1983 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1984
1985 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
1986 return ResultReg;
1987}
1988
1989Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1990 const TargetRegisterClass *RC, Register Op0) {
1991 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1992
1993 Register ResultReg = createResultReg(RC);
1994 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1995
1996 if (II.getNumDefs() >= 1)
1997 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1998 .addReg(Op0);
1999 else {
2000 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2001 .addReg(Op0);
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2003 ResultReg)
2004 .addReg(II.implicit_defs()[0]);
2005 }
2006
2007 return ResultReg;
2008}
2009
2010Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2011 const TargetRegisterClass *RC, Register Op0,
2012 Register Op1) {
2013 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2014
2015 Register ResultReg = createResultReg(RC);
2016 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2017 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2018
2019 if (II.getNumDefs() >= 1)
2020 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2021 .addReg(Op0)
2022 .addReg(Op1);
2023 else {
2024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2025 .addReg(Op0)
2026 .addReg(Op1);
2027 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2028 ResultReg)
2029 .addReg(II.implicit_defs()[0]);
2030 }
2031 return ResultReg;
2032}
2033
2034Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2035 const TargetRegisterClass *RC, Register Op0,
2036 Register Op1, Register Op2) {
2037 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2038
2039 Register ResultReg = createResultReg(RC);
2040 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2041 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2042 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2043
2044 if (II.getNumDefs() >= 1)
2045 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2046 .addReg(Op0)
2047 .addReg(Op1)
2048 .addReg(Op2);
2049 else {
2050 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2051 .addReg(Op0)
2052 .addReg(Op1)
2053 .addReg(Op2);
2054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2055 ResultReg)
2056 .addReg(II.implicit_defs()[0]);
2057 }
2058 return ResultReg;
2059}
2060
2061Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2062 const TargetRegisterClass *RC, Register Op0,
2063 uint64_t Imm) {
2064 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2065
2066 Register ResultReg = createResultReg(RC);
2067 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2068
2069 if (II.getNumDefs() >= 1)
2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2071 .addReg(Op0)
2072 .addImm(Imm);
2073 else {
2074 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2075 .addReg(Op0)
2076 .addImm(Imm);
2077 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2078 ResultReg)
2079 .addReg(II.implicit_defs()[0]);
2080 }
2081 return ResultReg;
2082}
2083
2084Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2085 const TargetRegisterClass *RC, Register Op0,
2086 uint64_t Imm1, uint64_t Imm2) {
2087 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2088
2089 Register ResultReg = createResultReg(RC);
2090 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2091
2092 if (II.getNumDefs() >= 1)
2093 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2094 .addReg(Op0)
2095 .addImm(Imm1)
2096 .addImm(Imm2);
2097 else {
2098 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2099 .addReg(Op0)
2100 .addImm(Imm1)
2101 .addImm(Imm2);
2102 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2103 ResultReg)
2104 .addReg(II.implicit_defs()[0]);
2105 }
2106 return ResultReg;
2107}
2108
2109Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2110 const TargetRegisterClass *RC,
2111 const ConstantFP *FPImm) {
2112 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2113
2114 Register ResultReg = createResultReg(RC);
2115
2116 if (II.getNumDefs() >= 1)
2117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2118 .addFPImm(FPImm);
2119 else {
2120 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2121 .addFPImm(FPImm);
2122 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2123 ResultReg)
2124 .addReg(II.implicit_defs()[0]);
2125 }
2126 return ResultReg;
2127}
2128
2129Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2130 const TargetRegisterClass *RC, Register Op0,
2131 Register Op1, uint64_t Imm) {
2132 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2133
2134 Register ResultReg = createResultReg(RC);
2135 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2136 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2137
2138 if (II.getNumDefs() >= 1)
2139 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2140 .addReg(Op0)
2141 .addReg(Op1)
2142 .addImm(Imm);
2143 else {
2144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2145 .addReg(Op0)
2146 .addReg(Op1)
2147 .addImm(Imm);
2148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2149 ResultReg)
2150 .addReg(II.implicit_defs()[0]);
2151 }
2152 return ResultReg;
2153}
2154
2155Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2156 const TargetRegisterClass *RC, uint64_t Imm) {
2157 Register ResultReg = createResultReg(RC);
2158 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2159
2160 if (II.getNumDefs() >= 1)
2161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2162 .addImm(Imm);
2163 else {
2164 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm);
2165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2166 ResultReg)
2167 .addReg(II.implicit_defs()[0]);
2168 }
2169 return ResultReg;
2170}
2171
2173 uint32_t Idx) {
2174 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2175 assert(Op0.isVirtual() && "Cannot yet extract from physregs");
2176 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2177 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2179 ResultReg).addReg(Op0, 0, Idx);
2180 return ResultReg;
2181}
2182
2183/// Emit MachineInstrs to compute the value of Op with all but the least
2184/// significant bit set to zero.
2186 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2187}
2188
2189/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2190/// Emit code to ensure constants are copied into registers when needed.
2191/// Remember the virtual registers that need to be added to the Machine PHI
2192/// nodes as input. We cannot just directly add them, because expansion
2193/// might result in multiple MBB's for one BB. As such, the start of the
2194/// BB might correspond to a different MBB than the end.
2195bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2198
2199 // Check successor nodes' PHI nodes that expect a constant to be available
2200 // from this block.
2201 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2202 if (!isa<PHINode>(SuccBB->begin()))
2203 continue;
2204 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
2205
2206 // If this terminator has multiple identical successors (common for
2207 // switches), only handle each succ once.
2208 if (!SuccsHandled.insert(SuccMBB).second)
2209 continue;
2210
2212
2213 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2214 // nodes and Machine PHI nodes, but the incoming operands have not been
2215 // emitted yet.
2216 for (const PHINode &PN : SuccBB->phis()) {
2217 // Ignore dead phi's.
2218 if (PN.use_empty())
2219 continue;
2220
2221 // Only handle legal types. Two interesting things to note here. First,
2222 // by bailing out early, we may leave behind some dead instructions,
2223 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2224 // own moves. Second, this check is necessary because FastISel doesn't
2225 // use CreateRegs to create registers, so it always creates
2226 // exactly one register for each non-void instruction.
2227 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2228 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2229 // Handle integer promotions, though, because they're common and easy.
2230 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2232 return false;
2233 }
2234 }
2235
2236 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2237
2238 // Set the DebugLoc for the copy. Use the location of the operand if
2239 // there is one; otherwise no location, flushLocalValueMap will fix it.
2240 MIMD = {};
2241 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2242 MIMD = MIMetadata(*Inst);
2243
2244 Register Reg = getRegForValue(PHIOp);
2245 if (!Reg) {
2246 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2247 return false;
2248 }
2249 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg);
2250 MIMD = {};
2251 }
2252 }
2253
2254 return true;
2255}
2256
2257bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2258 assert(LI->hasOneUse() &&
2259 "tryToFoldLoad expected a LoadInst with a single use");
2260 // We know that the load has a single use, but don't know what it is. If it
2261 // isn't one of the folded instructions, then we can't succeed here. Handle
2262 // this by scanning the single-use users of the load until we get to FoldInst.
2263 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2264
2265 const Instruction *TheUser = LI->user_back();
2266 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2267 // Stay in the right block.
2268 TheUser->getParent() == FoldInst->getParent() &&
2269 --MaxUsers) { // Don't scan too far.
2270 // If there are multiple or no uses of this instruction, then bail out.
2271 if (!TheUser->hasOneUse())
2272 return false;
2273
2274 TheUser = TheUser->user_back();
2275 }
2276
2277 // If we didn't find the fold instruction, then we failed to collapse the
2278 // sequence.
2279 if (TheUser != FoldInst)
2280 return false;
2281
2282 // Don't try to fold volatile loads. Target has to deal with alignment
2283 // constraints.
2284 if (LI->isVolatile())
2285 return false;
2286
2287 // Figure out which vreg this is going into. If there is no assigned vreg yet
2288 // then there actually was no reference to it. Perhaps the load is referenced
2289 // by a dead instruction.
2290 Register LoadReg = getRegForValue(LI);
2291 if (!LoadReg)
2292 return false;
2293
2294 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2295 // may mean that the instruction got lowered to multiple MIs, or the use of
2296 // the loaded value ended up being multiple operands of the result.
2297 if (!MRI.hasOneUse(LoadReg))
2298 return false;
2299
2300 // If the register has fixups, there may be additional uses through a
2301 // different alias of the register.
2302 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2303 return false;
2304
2305 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2306 MachineInstr *User = RI->getParent();
2307
2308 // Set the insertion point properly. Folding the load can cause generation of
2309 // other random instructions (like sign extends) for addressing modes; make
2310 // sure they get inserted in a logical place before the new instruction.
2311 FuncInfo.InsertPt = User;
2312 FuncInfo.MBB = User->getParent();
2313
2314 // Ask the target to try folding the load.
2315 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2316}
2317
2319 // Must be an add.
2320 if (!isa<AddOperator>(Add))
2321 return false;
2322 // Type size needs to match.
2323 if (DL.getTypeSizeInBits(GEP->getType()) !=
2324 DL.getTypeSizeInBits(Add->getType()))
2325 return false;
2326 // Must be in the same basic block.
2327 if (isa<Instruction>(Add) &&
2328 FuncInfo.getMBB(cast<Instruction>(Add)->getParent()) != FuncInfo.MBB)
2329 return false;
2330 // Must have a constant operand.
2331 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2332}
2333
2336 const Value *Ptr;
2337 Type *ValTy;
2338 MaybeAlign Alignment;
2340 bool IsVolatile;
2341
2342 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2343 Alignment = LI->getAlign();
2344 IsVolatile = LI->isVolatile();
2346 Ptr = LI->getPointerOperand();
2347 ValTy = LI->getType();
2348 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2349 Alignment = SI->getAlign();
2350 IsVolatile = SI->isVolatile();
2352 Ptr = SI->getPointerOperand();
2353 ValTy = SI->getValueOperand()->getType();
2354 } else
2355 return nullptr;
2356
2357 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2358 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2359 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2360 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2361
2362 AAMDNodes AAInfo = I->getAAMetadata();
2363
2364 if (!Alignment) // Ensure that codegen never sees alignment 0.
2365 Alignment = DL.getABITypeAlign(ValTy);
2366
2367 unsigned Size = DL.getTypeStoreSize(ValTy);
2368
2369 if (IsVolatile)
2371 if (IsNonTemporal)
2373 if (IsDereferenceable)
2375 if (IsInvariant)
2377
2378 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2379 *Alignment, AAInfo, Ranges);
2380}
2381
2383 // If both operands are the same, then try to optimize or fold the cmp.
2384 CmpInst::Predicate Predicate = CI->getPredicate();
2385 if (CI->getOperand(0) != CI->getOperand(1))
2386 return Predicate;
2387
2388 switch (Predicate) {
2389 default: llvm_unreachable("Invalid predicate!");
2390 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2391 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2392 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2393 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2394 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2395 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2396 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2397 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2398 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2399 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2400 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2401 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2402 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2403 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2404 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2405 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2406
2407 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2408 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2409 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2410 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2411 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2412 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2413 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2414 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2415 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2416 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2417 }
2418
2419 return Predicate;
2420}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition FastISel.cpp:161
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition FastISel.cpp:178
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition FastISel.cpp:942
This file defines the FastISel class.
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
#define P(N)
static bool isCommutative(Instruction *I, Value *ValWithUses, bool IsCopyable=false)
This file defines the SmallPtrSet class.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition APFloat.h:1395
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction & back() const
Definition BasicBlock.h:495
const Instruction & front() const
Definition BasicBlock.h:493
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
LLVM_ABI std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
A debug info location.
Definition DebugLoc.h:123
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
MachineRegisterInfo & MRI
Definition FastISel.h:205
const TargetLibraryInfo * LibInfo
Definition FastISel.h:214
const DataLayout & DL
Definition FastISel.h:210
bool selectGetElementPtr(const User *I)
Definition FastISel.cpp:531
void setLastLocalValue(MachineInstr *I)
Update the position of the last instruction emitted for materializing constants for use in the curren...
Definition FastISel.h:238
bool selectStackmap(const CallInst *I)
Definition FastISel.cpp:643
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
bool selectExtractValue(const User *U)
DenseMap< const Value *, Register > LocalValueMap
Definition FastISel.h:202
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, const LibcallLoweringInfo *LibcallLowering, bool SkipTargetIndependentISel=false)
MachineInstr * EmitStartPt
The top most instruction in the current block that is allowed for emitting local variables.
Definition FastISel.h:227
bool selectXRayCustomEvent(const CallInst *II)
Definition FastISel.cpp:900
virtual Register fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, Register Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
virtual Register fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, Register Op1)
This method is called by target-independent code to request that an instruction with the given type,...
const LibcallLoweringInfo * LibcallLowering
Definition FastISel.h:215
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
virtual bool lowerDbgDeclare(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
MachineInstr * getLastLocalValue()
Return the position of the last instruction emitted for materializing constants for use in the curren...
Definition FastISel.h:234
bool lowerCall(const CallInst *I)
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition FastISel.cpp:436
virtual Register fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition FastISel.h:475
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, Register Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition FastISel.cpp:964
virtual Register fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
virtual Register fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
void handleDbgInfo(const Instruction *II)
Target-independent lowering of non-instruction debug info associated with this instruction.
bool selectFreeze(const User *I)
bool selectIntrinsicCall(const IntrinsicInst *II)
Register getRegForGEPIndex(MVT PtrVT, const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition FastISel.cpp:384
bool selectCast(const User *I, unsigned Opcode)
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition FastISel.cpp:239
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition FastISel.cpp:410
virtual Register fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
Definition FastISel.h:486
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition FastISel.cpp:123
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition FastISel.h:301
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
MachineFrameInfo & MFI
Definition FastISel.h:206
MachineFunction * MF
Definition FastISel.h:204
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
virtual bool lowerDbgValue(const Value *V, DIExpression *Expr, DILocalVariable *Var, const DebugLoc &DL)
Target-independent lowering of debug information.
TargetLoweringBase::ArgListTy ArgListTy
Definition FastISel.h:69
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
bool selectXRayTypedEvent(const CallInst *II)
Definition FastISel.cpp:919
virtual Register fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition FastISel.h:480
Register fastEmitZExtFromI1(MVT VT, Register Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Register createResultReg(const TargetRegisterClass *RC)
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
const TargetInstrInfo & TII
Definition FastISel.h:211
bool selectCall(const User *I)
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition FastISel.cpp:352
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
virtual Register fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
void finishBasicBlock()
Flush the local value map.
Definition FastISel.cpp:136
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
FunctionLoweringInfo & FuncInfo
Definition FastISel.h:203
MachineConstantPool & MCP
Definition FastISel.h:207
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
bool SkipTargetIndependentISel
Definition FastISel.h:216
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
MachineBasicBlock::iterator SavePoint
Definition FastISel.h:314
Register fastEmitInst_extractsubreg(MVT RetVT, Register Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition FastISel.cpp:363
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition FastISel.cpp:444
bool selectPatchpoint(const CallInst *I)
Definition FastISel.cpp:753
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition FastISel.cpp:401
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition FastISel.h:212
virtual Register fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, Register Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
const TargetMachine & TM
Definition FastISel.h:209
MIMetadata MIMD
Definition FastISel.h:208
MachineInstr * LastLocalValue
The position of the last instruction for materializing constants for use in the current block.
Definition FastISel.h:222
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition FastISel.cpp:138
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition FastISel.cpp:430
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
bool selectBitCast(const User *I)
virtual ~FastISel()
Register fastEmit_ri_(MVT VT, unsigned Opcode, Register Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
const TargetRegisterInfo & TRI
Definition FastISel.h:213
TargetLoweringBase::ArgListEntry ArgListEntry
Definition FastISel.h:68
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
MachineBasicBlock * getMBB(const BasicBlock *BB) const
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, Register > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
Class to represent function types.
const Argument * const_arg_iterator
Definition Function.h:73
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1078
Set of metadata that should be preserved when using BuildMI().
Machine Value Type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
MachineInstrBundleIterator< MachineInstr > iterator
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
defusechain_iterator< true, true, false, true, false > reg_iterator
reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified register.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition Mangler.cpp:121
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:418
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition Triple.h:1034
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:879
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:992
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:843
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:849
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:925
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
@ TRAP
TRAP - Trapping instruction.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:855
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
std::reverse_iterator< iterator > rend() const
Definition BasicBlock.h:96
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:539
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition Analysis.cpp:33
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition FastISel.h:95
SmallVector< Value *, 16 > OutVals
Definition FastISel.h:94
SmallVector< Register, 16 > OutRegs
Definition FastISel.h:96
CallLoweringInfo & setTailCall(bool Value=true)
Definition FastISel.h:177
SmallVector< Register, 4 > InRegs
Definition FastISel.h:98
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition FastISel.h:182
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition FastISel.h:104
SmallVector< ISD::InputArg, 4 > Ins
Definition FastISel.h:97
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106