LLVM 22.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
162 MachineFunction &MF = *MBB.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198 return true;
199}
200
203 // Give up if either DstReg or SrcReg is a physical register.
204 if (DstReg.isPhysical() || SrcReg.isPhysical())
205 return false;
206 // Give up if the types don't match.
207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208 return false;
209 // Replace if either DstReg has no constraints or the register
210 // constraints match.
211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213 return true;
214
215 // Otherwise match if the Src is already a regclass that is covered by the Dst
216 // RegBank.
217 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
218 cast<const RegisterBank *>(DstRBC)->covers(
219 *MRI.getRegClassOrNull(SrcReg));
220}
221
223 const MachineRegisterInfo &MRI) {
224 // Instructions without side-effects are dead iff they only define dead regs.
225 // This function is hot and this loop returns early in the common case,
226 // so only perform additional checks before this if absolutely necessary.
227 for (const auto &MO : MI.all_defs()) {
228 Register Reg = MO.getReg();
229 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
230 return false;
231 }
232 return MI.wouldBeTriviallyDead();
233}
234
236 MachineFunction &MF,
239 bool IsGlobalISelAbortEnabled =
241 bool IsFatal = Severity == DS_Error && IsGlobalISelAbortEnabled;
242 // Print the function name explicitly if we don't have a debug location (which
243 // makes the diagnostic less useful) or if we're going to emit a raw error.
244 if (!R.getLocation().isValid() || IsFatal)
245 R << (" (in function: " + MF.getName() + ")").str();
246
247 if (IsFatal)
248 reportFatalUsageError(Twine(R.getMsg()));
249 else
250 MORE.emit(R);
251}
252
258
265
268 const char *PassName, StringRef Msg,
269 const MachineInstr &MI) {
270 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
271 MI.getDebugLoc(), MI.getParent());
272 R << Msg;
273 // Printing MI is expensive; only do it if expensive remarks are enabled.
275 MORE.allowExtraAnalysis(PassName))
276 R << ": " << ore::MNV("Inst", MI);
277 reportGISelFailure(MF, MORE, R);
278}
279
280unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
281 switch (MinMaxOpc) {
282 case TargetOpcode::G_SMIN:
283 return TargetOpcode::G_SMAX;
284 case TargetOpcode::G_SMAX:
285 return TargetOpcode::G_SMIN;
286 case TargetOpcode::G_UMIN:
287 return TargetOpcode::G_UMAX;
288 case TargetOpcode::G_UMAX:
289 return TargetOpcode::G_UMIN;
290 default:
291 llvm_unreachable("unrecognized opcode");
292 }
293}
294
295std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
296 const MachineRegisterInfo &MRI) {
297 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
298 VReg, MRI, /*LookThroughInstrs*/ false);
299 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
300 "Value found while looking through instrs");
301 if (!ValAndVReg)
302 return std::nullopt;
303 return ValAndVReg->Value;
304}
305
307 const MachineRegisterInfo &MRI) {
308 MachineInstr *Const = MRI.getVRegDef(Reg);
309 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
310 "expected a G_CONSTANT on Reg");
311 return Const->getOperand(1).getCImm()->getValue();
312}
313
314std::optional<int64_t>
316 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
317 if (Val && Val->getBitWidth() <= 64)
318 return Val->getSExtValue();
319 return std::nullopt;
320}
321
322namespace {
323
324// This function is used in many places, and as such, it has some
325// micro-optimizations to try and make it as fast as it can be.
326//
327// - We use template arguments to avoid an indirect call caused by passing a
328// function_ref/std::function
329// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
330// Instead it returns true/false and places the result in a pre-constructed
331// APInt.
332//
333// Please change this function carefully and benchmark your changes.
334template <bool (*IsConstantOpcode)(const MachineInstr *),
335 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
336std::optional<ValueAndVReg>
337getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
338 bool LookThroughInstrs = true,
339 bool LookThroughAnyExt = false) {
342
343 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
344 LookThroughInstrs) {
345 switch (MI->getOpcode()) {
346 case TargetOpcode::G_ANYEXT:
347 if (!LookThroughAnyExt)
348 return std::nullopt;
349 [[fallthrough]];
350 case TargetOpcode::G_TRUNC:
351 case TargetOpcode::G_SEXT:
352 case TargetOpcode::G_ZEXT:
353 SeenOpcodes.push_back(std::make_pair(
354 MI->getOpcode(),
355 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
356 VReg = MI->getOperand(1).getReg();
357 break;
358 case TargetOpcode::COPY:
359 VReg = MI->getOperand(1).getReg();
360 if (VReg.isPhysical())
361 return std::nullopt;
362 break;
363 case TargetOpcode::G_INTTOPTR:
364 VReg = MI->getOperand(1).getReg();
365 break;
366 default:
367 return std::nullopt;
368 }
369 }
370 if (!MI || !IsConstantOpcode(MI))
371 return std::nullopt;
372
373 APInt Val;
374 if (!GetAPCstValue(MI, Val))
375 return std::nullopt;
376 for (auto &Pair : reverse(SeenOpcodes)) {
377 switch (Pair.first) {
378 case TargetOpcode::G_TRUNC:
379 Val = Val.trunc(Pair.second);
380 break;
381 case TargetOpcode::G_ANYEXT:
382 case TargetOpcode::G_SEXT:
383 Val = Val.sext(Pair.second);
384 break;
385 case TargetOpcode::G_ZEXT:
386 Val = Val.zext(Pair.second);
387 break;
388 }
389 }
390
391 return ValueAndVReg{std::move(Val), VReg};
392}
393
394bool isIConstant(const MachineInstr *MI) {
395 if (!MI)
396 return false;
397 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
398}
399
400bool isFConstant(const MachineInstr *MI) {
401 if (!MI)
402 return false;
403 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
404}
405
406bool isAnyConstant(const MachineInstr *MI) {
407 if (!MI)
408 return false;
409 unsigned Opc = MI->getOpcode();
410 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
411}
412
413bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
414 const MachineOperand &CstVal = MI->getOperand(1);
415 if (!CstVal.isCImm())
416 return false;
417 Result = CstVal.getCImm()->getValue();
418 return true;
419}
420
421bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
422 const MachineOperand &CstVal = MI->getOperand(1);
423 if (CstVal.isCImm())
424 Result = CstVal.getCImm()->getValue();
425 else if (CstVal.isFPImm())
427 else
428 return false;
429 return true;
430}
431
432} // end anonymous namespace
433
435 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
436 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
437 VReg, MRI, LookThroughInstrs);
438}
439
441 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
442 bool LookThroughAnyExt) {
443 return getConstantVRegValWithLookThrough<isAnyConstant,
444 getCImmOrFPImmAsAPInt>(
445 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
446}
447
448std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
449 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
450 auto Reg =
451 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
452 VReg, MRI, LookThroughInstrs);
453 if (!Reg)
454 return std::nullopt;
455
456 APFloat FloatVal(getFltSemanticForLLT(LLT::scalar(Reg->Value.getBitWidth())),
457 Reg->Value);
458 return FPValueAndVReg{FloatVal, Reg->VReg};
459}
460
461const ConstantFP *
463 MachineInstr *MI = MRI.getVRegDef(VReg);
464 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
465 return nullptr;
466 return MI->getOperand(1).getFPImm();
467}
468
469std::optional<DefinitionAndSourceRegister>
471 Register DefSrcReg = Reg;
472 // This assumes that the code is in SSA form, so there should only be one
473 // definition.
474 auto DefIt = MRI.def_begin(Reg);
475 if (DefIt == MRI.def_end())
476 return {};
477 MachineOperand &DefOpnd = *DefIt;
478 MachineInstr *DefMI = DefOpnd.getParent();
479 auto DstTy = MRI.getType(DefOpnd.getReg());
480 if (!DstTy.isValid())
481 return std::nullopt;
482 unsigned Opc = DefMI->getOpcode();
483 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
484 Register SrcReg = DefMI->getOperand(1).getReg();
485 auto SrcTy = MRI.getType(SrcReg);
486 if (!SrcTy.isValid())
487 break;
488 DefMI = MRI.getVRegDef(SrcReg);
489 DefSrcReg = SrcReg;
490 Opc = DefMI->getOpcode();
491 }
492 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
493}
494
496 const MachineRegisterInfo &MRI) {
497 std::optional<DefinitionAndSourceRegister> DefSrcReg =
499 return DefSrcReg ? DefSrcReg->MI : nullptr;
500}
501
503 const MachineRegisterInfo &MRI) {
504 std::optional<DefinitionAndSourceRegister> DefSrcReg =
506 return DefSrcReg ? DefSrcReg->Reg : Register();
507}
508
509void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
511 MachineIRBuilder &MIRBuilder,
513 for (int i = 0; i < NumParts; ++i)
514 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
515 MIRBuilder.buildUnmerge(VRegs, Reg);
516}
517
518bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
520 SmallVectorImpl<Register> &LeftoverRegs,
521 MachineIRBuilder &MIRBuilder,
523 assert(!LeftoverTy.isValid() && "this is an out argument");
524
525 unsigned RegSize = RegTy.getSizeInBits();
526 unsigned MainSize = MainTy.getSizeInBits();
527 unsigned NumParts = RegSize / MainSize;
528 unsigned LeftoverSize = RegSize - NumParts * MainSize;
529
530 // Use an unmerge when possible.
531 if (LeftoverSize == 0) {
532 for (unsigned I = 0; I < NumParts; ++I)
533 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
534 MIRBuilder.buildUnmerge(VRegs, Reg);
535 return true;
536 }
537
538 // Try to use unmerge for irregular vector split where possible
539 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
540 // leftover, it becomes:
541 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
542 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
543 if (RegTy.isVector() && MainTy.isVector()) {
544 unsigned RegNumElts = RegTy.getNumElements();
545 unsigned MainNumElts = MainTy.getNumElements();
546 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
547 // If can unmerge to LeftoverTy, do it
548 if (MainNumElts % LeftoverNumElts == 0 &&
549 RegNumElts % LeftoverNumElts == 0 &&
550 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
551 LeftoverNumElts > 1) {
552 LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
553
554 // Unmerge the SrcReg to LeftoverTy vectors
555 SmallVector<Register, 4> UnmergeValues;
556 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
557 MIRBuilder, MRI);
558
559 // Find how many LeftoverTy makes one MainTy
560 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
561 unsigned NumOfLeftoverVal =
562 ((RegNumElts % MainNumElts) / LeftoverNumElts);
563
564 // Create as many MainTy as possible using unmerged value
565 SmallVector<Register, 4> MergeValues;
566 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
567 MergeValues.push_back(UnmergeValues[I]);
568 if (MergeValues.size() == LeftoverPerMain) {
569 VRegs.push_back(
570 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
571 MergeValues.clear();
572 }
573 }
574 // Populate LeftoverRegs with the leftovers
575 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
576 I < UnmergeValues.size(); I++) {
577 LeftoverRegs.push_back(UnmergeValues[I]);
578 }
579 return true;
580 }
581 }
582 // Perform irregular split. Leftover is last element of RegPieces.
583 if (MainTy.isVector()) {
584 SmallVector<Register, 8> RegPieces;
585 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
586 MRI);
587 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
588 VRegs.push_back(RegPieces[i]);
589 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
590 LeftoverTy = MRI.getType(LeftoverRegs[0]);
591 return true;
592 }
593
594 LeftoverTy = LLT::scalar(LeftoverSize);
595 // For irregular sizes, extract the individual parts.
596 for (unsigned I = 0; I != NumParts; ++I) {
597 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
598 VRegs.push_back(NewReg);
599 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
600 }
601
602 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
603 Offset += LeftoverSize) {
604 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
605 LeftoverRegs.push_back(NewReg);
606 MIRBuilder.buildExtract(NewReg, Reg, Offset);
607 }
608
609 return true;
610}
611
612void llvm::extractVectorParts(Register Reg, unsigned NumElts,
614 MachineIRBuilder &MIRBuilder,
616 LLT RegTy = MRI.getType(Reg);
617 assert(RegTy.isVector() && "Expected a vector type");
618
619 LLT EltTy = RegTy.getElementType();
620 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
621 unsigned RegNumElts = RegTy.getNumElements();
622 unsigned LeftoverNumElts = RegNumElts % NumElts;
623 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
624
625 // Perfect split without leftover
626 if (LeftoverNumElts == 0)
627 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
628 MRI);
629
630 // Irregular split. Provide direct access to all elements for artifact
631 // combiner using unmerge to elements. Then build vectors with NumElts
632 // elements. Remaining element(s) will be (used to build vector) Leftover.
634 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
635
636 unsigned Offset = 0;
637 // Requested sub-vectors of NarrowTy.
638 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
639 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
640 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
641 }
642
643 // Leftover element(s).
644 if (LeftoverNumElts == 1) {
645 VRegs.push_back(Elts[Offset]);
646 } else {
647 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
648 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
649 VRegs.push_back(
650 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
651 }
652}
653
655 const MachineRegisterInfo &MRI) {
657 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
658}
659
660APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
661 if (Size == 32)
662 return APFloat(float(Val));
663 if (Size == 64)
664 return APFloat(Val);
665 if (Size != 16)
666 llvm_unreachable("Unsupported FPConstant size");
667 bool Ignored;
668 APFloat APF(Val);
670 return APF;
671}
672
673std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
674 const Register Op1,
675 const Register Op2,
676 const MachineRegisterInfo &MRI) {
677 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
678 if (!MaybeOp2Cst)
679 return std::nullopt;
680
681 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
682 if (!MaybeOp1Cst)
683 return std::nullopt;
684
685 const APInt &C1 = MaybeOp1Cst->Value;
686 const APInt &C2 = MaybeOp2Cst->Value;
687 switch (Opcode) {
688 default:
689 break;
690 case TargetOpcode::G_ADD:
691 return C1 + C2;
692 case TargetOpcode::G_PTR_ADD:
693 // Types can be of different width here.
694 // Result needs to be the same width as C1, so trunc or sext C2.
695 return C1 + C2.sextOrTrunc(C1.getBitWidth());
696 case TargetOpcode::G_AND:
697 return C1 & C2;
698 case TargetOpcode::G_ASHR:
699 return C1.ashr(C2);
700 case TargetOpcode::G_LSHR:
701 return C1.lshr(C2);
702 case TargetOpcode::G_MUL:
703 return C1 * C2;
704 case TargetOpcode::G_OR:
705 return C1 | C2;
706 case TargetOpcode::G_SHL:
707 return C1 << C2;
708 case TargetOpcode::G_SUB:
709 return C1 - C2;
710 case TargetOpcode::G_XOR:
711 return C1 ^ C2;
712 case TargetOpcode::G_UDIV:
713 if (!C2.getBoolValue())
714 break;
715 return C1.udiv(C2);
716 case TargetOpcode::G_SDIV:
717 if (!C2.getBoolValue())
718 break;
719 return C1.sdiv(C2);
720 case TargetOpcode::G_UREM:
721 if (!C2.getBoolValue())
722 break;
723 return C1.urem(C2);
724 case TargetOpcode::G_SREM:
725 if (!C2.getBoolValue())
726 break;
727 return C1.srem(C2);
728 case TargetOpcode::G_SMIN:
729 return APIntOps::smin(C1, C2);
730 case TargetOpcode::G_SMAX:
731 return APIntOps::smax(C1, C2);
732 case TargetOpcode::G_UMIN:
733 return APIntOps::umin(C1, C2);
734 case TargetOpcode::G_UMAX:
735 return APIntOps::umax(C1, C2);
736 }
737
738 return std::nullopt;
739}
740
741std::optional<APFloat>
742llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
743 const Register Op2, const MachineRegisterInfo &MRI) {
744 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
745 if (!Op2Cst)
746 return std::nullopt;
747
748 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
749 if (!Op1Cst)
750 return std::nullopt;
751
752 APFloat C1 = Op1Cst->getValueAPF();
753 const APFloat &C2 = Op2Cst->getValueAPF();
754 switch (Opcode) {
755 case TargetOpcode::G_FADD:
757 return C1;
758 case TargetOpcode::G_FSUB:
760 return C1;
761 case TargetOpcode::G_FMUL:
763 return C1;
764 case TargetOpcode::G_FDIV:
766 return C1;
767 case TargetOpcode::G_FREM:
768 C1.mod(C2);
769 return C1;
770 case TargetOpcode::G_FCOPYSIGN:
771 C1.copySign(C2);
772 return C1;
773 case TargetOpcode::G_FMINNUM:
774 if (C1.isSignaling() || C2.isSignaling())
775 return std::nullopt;
776 return minnum(C1, C2);
777 case TargetOpcode::G_FMAXNUM:
778 if (C1.isSignaling() || C2.isSignaling())
779 return std::nullopt;
780 return maxnum(C1, C2);
781 case TargetOpcode::G_FMINIMUM:
782 return minimum(C1, C2);
783 case TargetOpcode::G_FMAXIMUM:
784 return maximum(C1, C2);
785 case TargetOpcode::G_FMINNUM_IEEE:
786 case TargetOpcode::G_FMAXNUM_IEEE:
787 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
788 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
789 // and currently there isn't a nice wrapper in APFloat for the version with
790 // correct snan handling.
791 break;
792 default:
793 break;
794 }
795
796 return std::nullopt;
797}
798
800llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
801 const Register Op2,
802 const MachineRegisterInfo &MRI) {
803 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
804 if (!SrcVec2)
805 return SmallVector<APInt>();
806
807 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
808 if (!SrcVec1)
809 return SmallVector<APInt>();
810
811 SmallVector<APInt> FoldedElements;
812 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
813 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
814 SrcVec2->getSourceReg(Idx), MRI);
815 if (!MaybeCst)
816 return SmallVector<APInt>();
817 FoldedElements.push_back(*MaybeCst);
818 }
819 return FoldedElements;
820}
821
823 bool SNaN) {
824 const MachineInstr *DefMI = MRI.getVRegDef(Val);
825 if (!DefMI)
826 return false;
827
828 if (DefMI->getFlag(MachineInstr::FmNoNans))
829 return true;
830
831 // If the value is a constant, we can obviously see if it is a NaN or not.
832 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
833 return !FPVal->getValueAPF().isNaN() ||
834 (SNaN && !FPVal->getValueAPF().isSignaling());
835 }
836
837 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
838 for (const auto &Op : DefMI->uses())
839 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
840 return false;
841 return true;
842 }
843
844 switch (DefMI->getOpcode()) {
845 default:
846 break;
847 case TargetOpcode::G_FADD:
848 case TargetOpcode::G_FSUB:
849 case TargetOpcode::G_FMUL:
850 case TargetOpcode::G_FDIV:
851 case TargetOpcode::G_FREM:
852 case TargetOpcode::G_FSIN:
853 case TargetOpcode::G_FCOS:
854 case TargetOpcode::G_FTAN:
855 case TargetOpcode::G_FACOS:
856 case TargetOpcode::G_FASIN:
857 case TargetOpcode::G_FATAN:
858 case TargetOpcode::G_FATAN2:
859 case TargetOpcode::G_FCOSH:
860 case TargetOpcode::G_FSINH:
861 case TargetOpcode::G_FTANH:
862 case TargetOpcode::G_FMA:
863 case TargetOpcode::G_FMAD:
864 if (SNaN)
865 return true;
866
867 // TODO: Need isKnownNeverInfinity
868 return false;
869 case TargetOpcode::G_FMINNUM_IEEE:
870 case TargetOpcode::G_FMAXNUM_IEEE: {
871 if (SNaN)
872 return true;
873 // This can return a NaN if either operand is an sNaN, or if both operands
874 // are NaN.
875 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
876 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
877 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
878 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
879 }
880 case TargetOpcode::G_FMINNUM:
881 case TargetOpcode::G_FMAXNUM: {
882 // Only one needs to be known not-nan, since it will be returned if the
883 // other ends up being one.
884 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
885 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
886 }
887 }
888
889 if (SNaN) {
890 // FP operations quiet. For now, just handle the ones inserted during
891 // legalization.
892 switch (DefMI->getOpcode()) {
893 case TargetOpcode::G_FPEXT:
894 case TargetOpcode::G_FPTRUNC:
895 case TargetOpcode::G_FCANONICALIZE:
896 return true;
897 default:
898 return false;
899 }
900 }
901
902 return false;
903}
904
906 const MachinePointerInfo &MPO) {
909 MachineFrameInfo &MFI = MF.getFrameInfo();
910 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
911 MPO.Offset);
912 }
913
914 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
915 const Module *M = MF.getFunction().getParent();
916 return V->getPointerAlignment(M->getDataLayout());
917 }
918
919 return Align(1);
920}
921
923 const TargetInstrInfo &TII,
924 MCRegister PhysReg,
925 const TargetRegisterClass &RC,
926 const DebugLoc &DL, LLT RegTy) {
927 MachineBasicBlock &EntryMBB = MF.front();
929 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
930 if (LiveIn) {
931 MachineInstr *Def = MRI.getVRegDef(LiveIn);
932 if (Def) {
933 // FIXME: Should the verifier check this is in the entry block?
934 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
935 return LiveIn;
936 }
937
938 // It's possible the incoming argument register and copy was added during
939 // lowering, but later deleted due to being/becoming dead. If this happens,
940 // re-insert the copy.
941 } else {
942 // The live in register was not present, so add it.
943 LiveIn = MF.addLiveIn(PhysReg, &RC);
944 if (RegTy.isValid())
945 MRI.setType(LiveIn, RegTy);
946 }
947
948 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
949 .addReg(PhysReg);
950 if (!EntryMBB.isLiveIn(PhysReg))
951 EntryMBB.addLiveIn(PhysReg);
952 return LiveIn;
953}
954
955std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
956 const Register Op1, uint64_t Imm,
957 const MachineRegisterInfo &MRI) {
958 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
959 if (MaybeOp1Cst) {
960 switch (Opcode) {
961 default:
962 break;
963 case TargetOpcode::G_SEXT_INREG: {
964 LLT Ty = MRI.getType(Op1);
965 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
966 }
967 }
968 }
969 return std::nullopt;
970}
971
972std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
973 const Register Op0,
974 const MachineRegisterInfo &MRI) {
975 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
976 if (!Val)
977 return Val;
978
979 const unsigned DstSize = DstTy.getScalarSizeInBits();
980
981 switch (Opcode) {
982 case TargetOpcode::G_SEXT:
983 return Val->sext(DstSize);
984 case TargetOpcode::G_ZEXT:
985 case TargetOpcode::G_ANYEXT:
986 // TODO: DAG considers target preference when constant folding any_extend.
987 return Val->zext(DstSize);
988 default:
989 break;
990 }
991
992 llvm_unreachable("unexpected cast opcode to constant fold");
993}
994
995std::optional<APFloat>
996llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
997 const MachineRegisterInfo &MRI) {
998 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
999 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
1000 APFloat DstVal(getFltSemanticForLLT(DstTy));
1001 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
1003 return DstVal;
1004 }
1005 return std::nullopt;
1006}
1007
1008std::optional<SmallVector<unsigned>>
1010 std::function<unsigned(APInt)> CB) {
1011 LLT Ty = MRI.getType(Src);
1012 SmallVector<unsigned> FoldedCTLZs;
1013 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
1014 auto MaybeCst = getIConstantVRegVal(R, MRI);
1015 if (!MaybeCst)
1016 return std::nullopt;
1017 return CB(*MaybeCst);
1018 };
1019 if (Ty.isVector()) {
1020 // Try to constant fold each element.
1021 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1022 if (!BV)
1023 return std::nullopt;
1024 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1025 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1026 FoldedCTLZs.emplace_back(*MaybeFold);
1027 continue;
1028 }
1029 return std::nullopt;
1030 }
1031 return FoldedCTLZs;
1032 }
1033 if (auto MaybeCst = tryFoldScalar(Src)) {
1034 FoldedCTLZs.emplace_back(*MaybeCst);
1035 return FoldedCTLZs;
1036 }
1037 return std::nullopt;
1038}
1039
1040std::optional<SmallVector<APInt>>
1041llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1042 unsigned DstScalarSizeInBits, unsigned ExtOp,
1043 const MachineRegisterInfo &MRI) {
1044 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
1045 ExtOp == TargetOpcode::G_ANYEXT);
1046
1047 const LLT Ty = MRI.getType(Op1);
1048
1049 auto GetICmpResultCst = [&](bool IsTrue) {
1050 if (IsTrue)
1051 return ExtOp == TargetOpcode::G_SEXT
1052 ? APInt::getAllOnes(DstScalarSizeInBits)
1053 : APInt::getOneBitSet(DstScalarSizeInBits, 0);
1054 return APInt::getZero(DstScalarSizeInBits);
1055 };
1056
1057 auto TryFoldScalar = [&](Register LHS, Register RHS) -> std::optional<APInt> {
1058 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1059 if (!RHSCst)
1060 return std::nullopt;
1061 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1062 if (!LHSCst)
1063 return std::nullopt;
1064
1065 switch (Pred) {
1067 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1069 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1071 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1073 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1075 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1077 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1079 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1081 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1083 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1085 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1086 default:
1087 return std::nullopt;
1088 }
1089 };
1090
1091 SmallVector<APInt> FoldedICmps;
1092
1093 if (Ty.isVector()) {
1094 // Try to constant fold each element.
1095 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1096 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1097 if (!BV1 || !BV2)
1098 return std::nullopt;
1099 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1100 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1101 if (auto MaybeFold =
1102 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1103 FoldedICmps.emplace_back(*MaybeFold);
1104 continue;
1105 }
1106 return std::nullopt;
1107 }
1108 return FoldedICmps;
1109 }
1110
1111 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1112 FoldedICmps.emplace_back(*MaybeCst);
1113 return FoldedICmps;
1114 }
1115
1116 return std::nullopt;
1117}
1118
1120 GISelValueTracking *VT) {
1121 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1123 if (!DefSrcReg)
1124 return false;
1125
1126 const MachineInstr &MI = *DefSrcReg->MI;
1127 const LLT Ty = MRI.getType(Reg);
1128
1129 switch (MI.getOpcode()) {
1130 case TargetOpcode::G_CONSTANT: {
1131 unsigned BitWidth = Ty.getScalarSizeInBits();
1132 const ConstantInt *CI = MI.getOperand(1).getCImm();
1133 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1134 }
1135 case TargetOpcode::G_SHL: {
1136 // A left-shift of a constant one will have exactly one bit set because
1137 // shifting the bit off the end is undefined.
1138
1139 // TODO: Constant splat
1140 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1141 if (*ConstLHS == 1)
1142 return true;
1143 }
1144
1145 break;
1146 }
1147 case TargetOpcode::G_LSHR: {
1148 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1149 if (ConstLHS->isSignMask())
1150 return true;
1151 }
1152
1153 break;
1154 }
1155 case TargetOpcode::G_BUILD_VECTOR: {
1156 // TODO: Probably should have a recursion depth guard since you could have
1157 // bitcasted vector elements.
1158 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1159 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT))
1160 return false;
1161
1162 return true;
1163 }
1164 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1165 // Only handle constants since we would need to know if number of leading
1166 // zeros is greater than the truncation amount.
1167 const unsigned BitWidth = Ty.getScalarSizeInBits();
1168 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1169 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1170 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1171 return false;
1172 }
1173
1174 return true;
1175 }
1176 default:
1177 break;
1178 }
1179
1180 if (!VT)
1181 return false;
1182
1183 // More could be done here, though the above checks are enough
1184 // to handle some common cases.
1185
1186 // Fall back to computeKnownBits to catch other known cases.
1187 KnownBits Known = VT->getKnownBits(Reg);
1188 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1189}
1190
1194
1195LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1196 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1197 return OrigTy;
1198
1199 if (OrigTy.isVector() && TargetTy.isVector()) {
1200 LLT OrigElt = OrigTy.getElementType();
1201 LLT TargetElt = TargetTy.getElementType();
1202
1203 // TODO: The docstring for this function says the intention is to use this
1204 // function to build MERGE/UNMERGE instructions. It won't be the case that
1205 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1206 // could implement getLCMType between the two in the future if there was a
1207 // need, but it is not worth it now as this function should not be used in
1208 // that way.
1209 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1210 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1211 "getLCMType not implemented between fixed and scalable vectors.");
1212
1213 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1214 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1215 TargetTy.getElementCount().getKnownMinValue());
1216 // Prefer the original element type.
1218 TargetTy.getElementCount().getKnownMinValue());
1219 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1220 OrigTy.getElementType());
1221 }
1222 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1223 TargetTy.getSizeInBits().getKnownMinValue());
1224 return LLT::vector(
1225 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1226 OrigElt);
1227 }
1228
1229 // One type is scalar, one type is vector
1230 if (OrigTy.isVector() || TargetTy.isVector()) {
1231 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1232 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1233 LLT EltTy = VecTy.getElementType();
1234 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1235
1236 // Prefer scalar type from OrigTy.
1237 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1238 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1239
1240 // Different size scalars. Create vector with the same total size.
1241 // LCM will take fixed/scalable from VecTy.
1242 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1244 ScalarTy.getSizeInBits().getFixedValue());
1245 // Prefer type from OrigTy
1246 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1247 VecTy.getElementCount().isScalable()),
1248 OrigEltTy);
1249 }
1250
1251 // At this point, both types are scalars of different size
1252 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1253 TargetTy.getSizeInBits().getFixedValue());
1254 // Preserve pointer types.
1255 if (LCM == OrigTy.getSizeInBits())
1256 return OrigTy;
1257 if (LCM == TargetTy.getSizeInBits())
1258 return TargetTy;
1259 return LLT::scalar(LCM);
1260}
1261
1262LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1263
1264 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1265 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1267 "getCoverTy not implemented between fixed and scalable vectors.");
1268
1269 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1270 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1271 return getLCMType(OrigTy, TargetTy);
1272
1273 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1274 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1275 if (OrigTyNumElts % TargetTyNumElts == 0)
1276 return OrigTy;
1277
1278 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1280 OrigTy.getElementType());
1281}
1282
1283LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1284 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1285 return OrigTy;
1286
1287 if (OrigTy.isVector() && TargetTy.isVector()) {
1288 LLT OrigElt = OrigTy.getElementType();
1289
1290 // TODO: The docstring for this function says the intention is to use this
1291 // function to build MERGE/UNMERGE instructions. It won't be the case that
1292 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1293 // could implement getGCDType between the two in the future if there was a
1294 // need, but it is not worth it now as this function should not be used in
1295 // that way.
1296 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1297 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1298 "getGCDType not implemented between fixed and scalable vectors.");
1299
1300 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1301 TargetTy.getSizeInBits().getKnownMinValue());
1302 if (GCD == OrigElt.getSizeInBits())
1304 OrigElt);
1305
1306 // Cannot produce original element type, but both have vscale in common.
1307 if (GCD < OrigElt.getSizeInBits())
1309 GCD);
1310
1311 return LLT::vector(
1313 OrigTy.isScalable()),
1314 OrigElt);
1315 }
1316
1317 // If one type is vector and the element size matches the scalar size, then
1318 // the gcd is the scalar type.
1319 if (OrigTy.isVector() &&
1320 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1321 return OrigTy.getElementType();
1322 if (TargetTy.isVector() &&
1323 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1324 return OrigTy;
1325
1326 // At this point, both types are either scalars of different type or one is a
1327 // vector and one is a scalar. If both types are scalars, the GCD type is the
1328 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1329 // the GCD type is the GCD between the scalar and the vector element size.
1330 LLT OrigScalar = OrigTy.getScalarType();
1331 LLT TargetScalar = TargetTy.getScalarType();
1332 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1333 TargetScalar.getSizeInBits().getFixedValue());
1334 return LLT::scalar(GCD);
1335}
1336
1338 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1339 "Only G_SHUFFLE_VECTOR can have a splat index!");
1340 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1341 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1342
1343 // If all elements are undefined, this shuffle can be considered a splat.
1344 // Return 0 for better potential for callers to simplify.
1345 if (FirstDefinedIdx == Mask.end())
1346 return 0;
1347
1348 // Make sure all remaining elements are either undef or the same
1349 // as the first non-undef value.
1350 int SplatValue = *FirstDefinedIdx;
1351 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1352 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1353 return std::nullopt;
1354
1355 return SplatValue;
1356}
1357
1358static bool isBuildVectorOp(unsigned Opcode) {
1359 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1360 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1361}
1362
1363namespace {
1364
1365std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1366 const MachineRegisterInfo &MRI,
1367 bool AllowUndef) {
1369 if (!MI)
1370 return std::nullopt;
1371
1372 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1373 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1374 return std::nullopt;
1375
1376 std::optional<ValueAndVReg> SplatValAndReg;
1377 for (MachineOperand &Op : MI->uses()) {
1378 Register Element = Op.getReg();
1379 // If we have a G_CONCAT_VECTOR, we recursively look into the
1380 // vectors that we're concatenating to see if they're splats.
1381 auto ElementValAndReg =
1382 isConcatVectorsOp
1383 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1385
1386 // If AllowUndef, treat undef as value that will result in a constant splat.
1387 if (!ElementValAndReg) {
1388 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1389 continue;
1390 return std::nullopt;
1391 }
1392
1393 // Record splat value
1394 if (!SplatValAndReg)
1395 SplatValAndReg = ElementValAndReg;
1396
1397 // Different constant than the one already recorded, not a constant splat.
1398 if (SplatValAndReg->Value != ElementValAndReg->Value)
1399 return std::nullopt;
1400 }
1401
1402 return SplatValAndReg;
1403}
1404
1405} // end anonymous namespace
1406
1408 const MachineRegisterInfo &MRI,
1409 int64_t SplatValue, bool AllowUndef) {
1410 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1411 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1412
1413 return false;
1414}
1415
1417 const MachineRegisterInfo &MRI,
1418 const APInt &SplatValue,
1419 bool AllowUndef) {
1420 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
1421 if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
1422 return APInt::isSameValue(
1423 SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
1424 return APInt::isSameValue(
1425 SplatValAndReg->Value,
1426 SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
1427 }
1428
1429 return false;
1430}
1431
1433 const MachineRegisterInfo &MRI,
1434 int64_t SplatValue, bool AllowUndef) {
1435 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1436 AllowUndef);
1437}
1438
1440 const MachineRegisterInfo &MRI,
1441 const APInt &SplatValue,
1442 bool AllowUndef) {
1443 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1444 AllowUndef);
1445}
1446
1447std::optional<APInt>
1449 if (auto SplatValAndReg =
1450 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1451 if (std::optional<ValueAndVReg> ValAndVReg =
1452 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1453 return ValAndVReg->Value;
1454 }
1455
1456 return std::nullopt;
1457}
1458
1459std::optional<APInt>
1461 const MachineRegisterInfo &MRI) {
1462 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1463}
1464
1465std::optional<int64_t>
1467 const MachineRegisterInfo &MRI) {
1468 if (auto SplatValAndReg =
1469 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1470 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1471 return std::nullopt;
1472}
1473
1474std::optional<int64_t>
1476 const MachineRegisterInfo &MRI) {
1477 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1478}
1479
1480std::optional<FPValueAndVReg>
1482 bool AllowUndef) {
1483 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1484 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1485 return std::nullopt;
1486}
1487
1489 const MachineRegisterInfo &MRI,
1490 bool AllowUndef) {
1491 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1492}
1493
1495 const MachineRegisterInfo &MRI,
1496 bool AllowUndef) {
1497 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1498}
1499
1500std::optional<RegOrConstant>
1502 unsigned Opc = MI.getOpcode();
1503 if (!isBuildVectorOp(Opc))
1504 return std::nullopt;
1505 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1506 return RegOrConstant(*Splat);
1507 auto Reg = MI.getOperand(1).getReg();
1508 if (any_of(drop_begin(MI.operands(), 2),
1509 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1510 return std::nullopt;
1511 return RegOrConstant(Reg);
1512}
1513
1515 const MachineRegisterInfo &MRI,
1516 bool AllowFP = true,
1517 bool AllowOpaqueConstants = true) {
1518 switch (MI.getOpcode()) {
1519 case TargetOpcode::G_CONSTANT:
1520 case TargetOpcode::G_IMPLICIT_DEF:
1521 return true;
1522 case TargetOpcode::G_FCONSTANT:
1523 return AllowFP;
1524 case TargetOpcode::G_GLOBAL_VALUE:
1525 case TargetOpcode::G_FRAME_INDEX:
1526 case TargetOpcode::G_BLOCK_ADDR:
1527 case TargetOpcode::G_JUMP_TABLE:
1528 return AllowOpaqueConstants;
1529 default:
1530 return false;
1531 }
1532}
1533
1535 const MachineRegisterInfo &MRI) {
1536 Register Def = MI.getOperand(0).getReg();
1537 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1538 return true;
1540 if (!BV)
1541 return false;
1542 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1545 continue;
1546 return false;
1547 }
1548 return true;
1549}
1550
1552 const MachineRegisterInfo &MRI,
1553 bool AllowFP, bool AllowOpaqueConstants) {
1554 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1555 return true;
1556
1557 if (!isBuildVectorOp(MI.getOpcode()))
1558 return false;
1559
1560 const unsigned NumOps = MI.getNumOperands();
1561 for (unsigned I = 1; I != NumOps; ++I) {
1562 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1563 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1564 return false;
1565 }
1566
1567 return true;
1568}
1569
1570std::optional<APInt>
1572 const MachineRegisterInfo &MRI) {
1573 Register Def = MI.getOperand(0).getReg();
1574 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1575 return C->Value;
1576 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1577 if (!MaybeCst)
1578 return std::nullopt;
1579 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1580 return APInt(ScalarSize, *MaybeCst, true);
1581}
1582
1583std::optional<APFloat>
1585 const MachineRegisterInfo &MRI) {
1586 Register Def = MI.getOperand(0).getReg();
1587 if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1588 return FpConst->Value;
1589 auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1590 if (!MaybeCstFP)
1591 return std::nullopt;
1592 return MaybeCstFP->Value;
1593}
1594
1596 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1597 switch (MI.getOpcode()) {
1598 case TargetOpcode::G_IMPLICIT_DEF:
1599 return AllowUndefs;
1600 case TargetOpcode::G_CONSTANT:
1601 return MI.getOperand(1).getCImm()->isNullValue();
1602 case TargetOpcode::G_FCONSTANT: {
1603 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1604 return FPImm->isZero() && !FPImm->isNegative();
1605 }
1606 default:
1607 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1608 return false;
1609 return isBuildVectorAllZeros(MI, MRI);
1610 }
1611}
1612
1614 const MachineRegisterInfo &MRI,
1615 bool AllowUndefs) {
1616 switch (MI.getOpcode()) {
1617 case TargetOpcode::G_IMPLICIT_DEF:
1618 return AllowUndefs;
1619 case TargetOpcode::G_CONSTANT:
1620 return MI.getOperand(1).getCImm()->isAllOnesValue();
1621 default:
1622 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1623 return false;
1624 return isBuildVectorAllOnes(MI, MRI);
1625 }
1626}
1627
1629 const MachineRegisterInfo &MRI, Register Reg,
1630 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1631
1632 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1633 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1634 return Match(nullptr);
1635
1636 // TODO: Also handle fconstant
1637 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1638 return Match(Def->getOperand(1).getCImm());
1639
1640 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1641 return false;
1642
1643 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1644 Register SrcElt = Def->getOperand(I).getReg();
1645 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1646 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1647 if (!Match(nullptr))
1648 return false;
1649 continue;
1650 }
1651
1652 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1653 !Match(SrcDef->getOperand(1).getCImm()))
1654 return false;
1655 }
1656
1657 return true;
1658}
1659
1660bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1661 bool IsFP) {
1662 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1664 return Val & 0x1;
1666 return Val == 1;
1668 return Val == -1;
1669 }
1670 llvm_unreachable("Invalid boolean contents");
1671}
1672
1673bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1674 bool IsVector, bool IsFP) {
1675 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1677 return ~Val & 0x1;
1680 return Val == 0;
1681 }
1682 llvm_unreachable("Invalid boolean contents");
1683}
1684
1685int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1686 bool IsFP) {
1687 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1690 return 1;
1692 return -1;
1693 }
1694 llvm_unreachable("Invalid boolean contents");
1695}
1696
1698 LostDebugLocObserver *LocObserver,
1699 SmallInstListTy &DeadInstChain) {
1700 for (MachineOperand &Op : MI.uses()) {
1701 if (Op.isReg() && Op.getReg().isVirtual())
1702 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1703 }
1704 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1705 DeadInstChain.remove(&MI);
1706 MI.eraseFromParent();
1707 if (LocObserver)
1708 LocObserver->checkpoint(false);
1709}
1710
1713 LostDebugLocObserver *LocObserver) {
1714 SmallInstListTy DeadInstChain;
1715 for (MachineInstr *MI : DeadInstrs)
1716 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1717
1718 while (!DeadInstChain.empty()) {
1719 MachineInstr *Inst = DeadInstChain.pop_back_val();
1720 if (!isTriviallyDead(*Inst, MRI))
1721 continue;
1722 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1723 }
1724}
1725
1727 LostDebugLocObserver *LocObserver) {
1728 return eraseInstrs({&MI}, MRI, LocObserver);
1729}
1730
1732 for (auto &Def : MI.defs()) {
1733 assert(Def.isReg() && "Must be a reg");
1734
1736 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1737 MachineInstr *DbgValue = MOUse.getParent();
1738 // Ignore partially formed DBG_VALUEs.
1739 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1740 DbgUsers.push_back(&MOUse);
1741 }
1742 }
1743
1744 if (!DbgUsers.empty()) {
1746 }
1747 }
1748}
1749
1751 switch (Opc) {
1752 case TargetOpcode::G_FABS:
1753 case TargetOpcode::G_FADD:
1754 case TargetOpcode::G_FCANONICALIZE:
1755 case TargetOpcode::G_FCEIL:
1756 case TargetOpcode::G_FCONSTANT:
1757 case TargetOpcode::G_FCOPYSIGN:
1758 case TargetOpcode::G_FCOS:
1759 case TargetOpcode::G_FDIV:
1760 case TargetOpcode::G_FEXP2:
1761 case TargetOpcode::G_FEXP:
1762 case TargetOpcode::G_FFLOOR:
1763 case TargetOpcode::G_FLOG10:
1764 case TargetOpcode::G_FLOG2:
1765 case TargetOpcode::G_FLOG:
1766 case TargetOpcode::G_FMA:
1767 case TargetOpcode::G_FMAD:
1768 case TargetOpcode::G_FMAXIMUM:
1769 case TargetOpcode::G_FMAXIMUMNUM:
1770 case TargetOpcode::G_FMAXNUM:
1771 case TargetOpcode::G_FMAXNUM_IEEE:
1772 case TargetOpcode::G_FMINIMUM:
1773 case TargetOpcode::G_FMINIMUMNUM:
1774 case TargetOpcode::G_FMINNUM:
1775 case TargetOpcode::G_FMINNUM_IEEE:
1776 case TargetOpcode::G_FMUL:
1777 case TargetOpcode::G_FNEARBYINT:
1778 case TargetOpcode::G_FNEG:
1779 case TargetOpcode::G_FPEXT:
1780 case TargetOpcode::G_FPOW:
1781 case TargetOpcode::G_FPTRUNC:
1782 case TargetOpcode::G_FREM:
1783 case TargetOpcode::G_FRINT:
1784 case TargetOpcode::G_FSIN:
1785 case TargetOpcode::G_FTAN:
1786 case TargetOpcode::G_FACOS:
1787 case TargetOpcode::G_FASIN:
1788 case TargetOpcode::G_FATAN:
1789 case TargetOpcode::G_FATAN2:
1790 case TargetOpcode::G_FCOSH:
1791 case TargetOpcode::G_FSINH:
1792 case TargetOpcode::G_FTANH:
1793 case TargetOpcode::G_FSQRT:
1794 case TargetOpcode::G_FSUB:
1795 case TargetOpcode::G_INTRINSIC_ROUND:
1796 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1797 case TargetOpcode::G_INTRINSIC_TRUNC:
1798 return true;
1799 default:
1800 return false;
1801 }
1802}
1803
1804/// Shifts return poison if shiftwidth is larger than the bitwidth.
1805static bool shiftAmountKnownInRange(Register ShiftAmount,
1806 const MachineRegisterInfo &MRI) {
1807 LLT Ty = MRI.getType(ShiftAmount);
1808
1809 if (Ty.isScalableVector())
1810 return false; // Can't tell, just return false to be safe
1811
1812 if (Ty.isScalar()) {
1813 std::optional<ValueAndVReg> Val =
1815 if (!Val)
1816 return false;
1817 return Val->Value.ult(Ty.getScalarSizeInBits());
1818 }
1819
1820 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1821 if (!BV)
1822 return false;
1823
1824 unsigned Sources = BV->getNumSources();
1825 for (unsigned I = 0; I < Sources; ++I) {
1826 std::optional<ValueAndVReg> Val =
1828 if (!Val)
1829 return false;
1830 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1831 return false;
1832 }
1833
1834 return true;
1835}
1836
1837namespace {
1838enum class UndefPoisonKind {
1839 PoisonOnly = (1 << 0),
1840 UndefOnly = (1 << 1),
1842};
1843}
1844
1846 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1847}
1848
1850 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1851}
1852
1854 bool ConsiderFlagsAndMetadata,
1855 UndefPoisonKind Kind) {
1856 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1857
1858 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1859 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1860 if (GMI->hasPoisonGeneratingFlags())
1861 return true;
1862
1863 // Check whether opcode is a poison/undef-generating operation.
1864 switch (RegDef->getOpcode()) {
1865 case TargetOpcode::G_BUILD_VECTOR:
1866 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1867 return false;
1868 case TargetOpcode::G_SHL:
1869 case TargetOpcode::G_ASHR:
1870 case TargetOpcode::G_LSHR:
1871 return includesPoison(Kind) &&
1873 case TargetOpcode::G_FPTOSI:
1874 case TargetOpcode::G_FPTOUI:
1875 // fptosi/ui yields poison if the resulting value does not fit in the
1876 // destination type.
1877 return true;
1878 case TargetOpcode::G_CTLZ:
1879 case TargetOpcode::G_CTTZ:
1880 case TargetOpcode::G_ABS:
1881 case TargetOpcode::G_CTPOP:
1882 case TargetOpcode::G_BSWAP:
1883 case TargetOpcode::G_BITREVERSE:
1884 case TargetOpcode::G_FSHL:
1885 case TargetOpcode::G_FSHR:
1886 case TargetOpcode::G_SMAX:
1887 case TargetOpcode::G_SMIN:
1888 case TargetOpcode::G_SCMP:
1889 case TargetOpcode::G_UMAX:
1890 case TargetOpcode::G_UMIN:
1891 case TargetOpcode::G_UCMP:
1892 case TargetOpcode::G_PTRMASK:
1893 case TargetOpcode::G_SADDO:
1894 case TargetOpcode::G_SSUBO:
1895 case TargetOpcode::G_UADDO:
1896 case TargetOpcode::G_USUBO:
1897 case TargetOpcode::G_SMULO:
1898 case TargetOpcode::G_UMULO:
1899 case TargetOpcode::G_SADDSAT:
1900 case TargetOpcode::G_UADDSAT:
1901 case TargetOpcode::G_SSUBSAT:
1902 case TargetOpcode::G_USUBSAT:
1903 case TargetOpcode::G_SBFX:
1904 case TargetOpcode::G_UBFX:
1905 return false;
1906 case TargetOpcode::G_SSHLSAT:
1907 case TargetOpcode::G_USHLSAT:
1908 return includesPoison(Kind) &&
1910 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1912 if (includesPoison(Kind)) {
1913 std::optional<ValueAndVReg> Index =
1914 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1915 if (!Index)
1916 return true;
1917 LLT VecTy = MRI.getType(Insert->getVectorReg());
1918 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1919 }
1920 return false;
1921 }
1922 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1924 if (includesPoison(Kind)) {
1925 std::optional<ValueAndVReg> Index =
1927 if (!Index)
1928 return true;
1929 LLT VecTy = MRI.getType(Extract->getVectorReg());
1930 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1931 }
1932 return false;
1933 }
1934 case TargetOpcode::G_SHUFFLE_VECTOR: {
1935 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1936 ArrayRef<int> Mask = Shuffle->getMask();
1937 return includesPoison(Kind) && is_contained(Mask, -1);
1938 }
1939 case TargetOpcode::G_FNEG:
1940 case TargetOpcode::G_PHI:
1941 case TargetOpcode::G_SELECT:
1942 case TargetOpcode::G_UREM:
1943 case TargetOpcode::G_SREM:
1944 case TargetOpcode::G_FREEZE:
1945 case TargetOpcode::G_ICMP:
1946 case TargetOpcode::G_FCMP:
1947 case TargetOpcode::G_FADD:
1948 case TargetOpcode::G_FSUB:
1949 case TargetOpcode::G_FMUL:
1950 case TargetOpcode::G_FDIV:
1951 case TargetOpcode::G_FREM:
1952 case TargetOpcode::G_PTR_ADD:
1953 return false;
1954 default:
1955 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1956 }
1957}
1958
1960 const MachineRegisterInfo &MRI,
1961 unsigned Depth,
1962 UndefPoisonKind Kind) {
1964 return false;
1965
1966 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1967
1968 switch (RegDef->getOpcode()) {
1969 case TargetOpcode::G_FREEZE:
1970 return true;
1971 case TargetOpcode::G_IMPLICIT_DEF:
1972 return !includesUndef(Kind);
1973 case TargetOpcode::G_CONSTANT:
1974 case TargetOpcode::G_FCONSTANT:
1975 return true;
1976 case TargetOpcode::G_BUILD_VECTOR: {
1977 GBuildVector *BV = cast<GBuildVector>(RegDef);
1978 unsigned NumSources = BV->getNumSources();
1979 for (unsigned I = 0; I < NumSources; ++I)
1981 Depth + 1, Kind))
1982 return false;
1983 return true;
1984 }
1985 case TargetOpcode::G_PHI: {
1986 GPhi *Phi = cast<GPhi>(RegDef);
1987 unsigned NumIncoming = Phi->getNumIncomingValues();
1988 for (unsigned I = 0; I < NumIncoming; ++I)
1989 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1990 Depth + 1, Kind))
1991 return false;
1992 return true;
1993 }
1994 default: {
1995 auto MOCheck = [&](const MachineOperand &MO) {
1996 if (!MO.isReg())
1997 return true;
1998 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1999 Kind);
2000 };
2002 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
2003 all_of(RegDef->uses(), MOCheck);
2004 }
2005 }
2006}
2007
2009 bool ConsiderFlagsAndMetadata) {
2010 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2012}
2013
2015 bool ConsiderFlagsAndMetadata = true) {
2016 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2018}
2019
2021 const MachineRegisterInfo &MRI,
2022 unsigned Depth) {
2023 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2025}
2026
2028 const MachineRegisterInfo &MRI,
2029 unsigned Depth) {
2030 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2032}
2033
2035 const MachineRegisterInfo &MRI,
2036 unsigned Depth) {
2037 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2039}
2040
2042 if (Ty.isVector())
2043 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
2044 Ty.getElementCount());
2045 return IntegerType::get(C, Ty.getSizeInBits());
2046}
2047
2049 switch (MI.getOpcode()) {
2050 default:
2051 return false;
2052 case TargetOpcode::G_ASSERT_ALIGN:
2053 case TargetOpcode::G_ASSERT_SEXT:
2054 case TargetOpcode::G_ASSERT_ZEXT:
2055 return true;
2056 }
2057}
2058
2060 assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
2061
2062 return Value;
2063}
2064
2065std::optional<GIConstant>
2068
2070 std::optional<ValueAndVReg> MayBeConstant =
2072 if (!MayBeConstant)
2073 return std::nullopt;
2074 return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
2075 }
2076
2078 SmallVector<APInt> Values;
2079 unsigned NumSources = Build->getNumSources();
2080 for (unsigned I = 0; I < NumSources; ++I) {
2081 Register SrcReg = Build->getSourceReg(I);
2082 std::optional<ValueAndVReg> MayBeConstant =
2084 if (!MayBeConstant)
2085 return std::nullopt;
2086 Values.push_back(MayBeConstant->Value);
2087 }
2088 return GIConstant(Values);
2089 }
2090
2091 std::optional<ValueAndVReg> MayBeConstant =
2093 if (!MayBeConstant)
2094 return std::nullopt;
2095
2096 return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2097}
2098
2100 assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2101
2102 return Values[0];
2103}
2104
2105std::optional<GFConstant>
2108
2110 std::optional<FPValueAndVReg> MayBeConstant =
2112 if (!MayBeConstant)
2113 return std::nullopt;
2114 return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2115 }
2116
2118 SmallVector<APFloat> Values;
2119 unsigned NumSources = Build->getNumSources();
2120 for (unsigned I = 0; I < NumSources; ++I) {
2121 Register SrcReg = Build->getSourceReg(I);
2122 std::optional<FPValueAndVReg> MayBeConstant =
2124 if (!MayBeConstant)
2125 return std::nullopt;
2126 Values.push_back(MayBeConstant->Value);
2127 }
2128 return GFConstant(Values);
2129 }
2130
2131 std::optional<FPValueAndVReg> MayBeConstant =
2133 if (!MayBeConstant)
2134 return std::nullopt;
2135
2136 return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2137}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition Utils.cpp:235
static bool includesPoison(UndefPoisonKind Kind)
Definition Utils.cpp:1845
static bool includesUndef(UndefPoisonKind Kind)
Definition Utils.cpp:1849
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition Utils.cpp:1805
static bool isBuildVectorOp(unsigned Opcode)
Definition Utils.cpp:1358
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition Utils.cpp:1514
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Class recording the (high level) value of a variable.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1190
void copySign(const APFloat &RHS)
Definition APFloat.h:1284
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:6053
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1172
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1163
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1329
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1181
bool isSignaling() const
Definition APFloat.h:1433
APInt bitcastToAPInt() const
Definition APFloat.h:1335
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1208
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1666
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1736
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:985
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
Definition APInt.h:554
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:285
const APFloat & getValueAPF() const
Definition Constants.h:328
bool isNegative() const
Return true if the sign bit is set.
Definition Constants.h:335
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:332
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:162
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2106
GFConstant(ArrayRef< APFloat > Values)
Definition Utils.h:700
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2099
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2059
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2066
GIConstant(ArrayRef< APInt > Values)
Definition Utils.h:659
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represents a value which can be a Register or a constant.
Definition Utils.h:404
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetOptions Options
GlobalISelAbortMode GlobalISelAbort
EnableGlobalISelAbort - Control abort behaviour when global instruction selection fails to lower/sele...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2249
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2254
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2259
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2264
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition Utils.cpp:922
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1041
@ Offset
Definition DWP.cpp:532
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1488
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:2041
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:654
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:462
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:295
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition Utils.cpp:996
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1448
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1613
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:742
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1731
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition Utils.cpp:1009
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition Utils.cpp:955
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1501
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1625
GISelWorkList< 4 > SmallInstListTy
Definition Utils.h:579
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1571
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1595
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition Utils.cpp:495
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1628
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:253
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1660
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1195
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:315
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:673
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:306
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1580
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition Utils.cpp:1551
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition Utils.cpp:201
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition Utils.cpp:1697
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition Utils.cpp:440
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1494
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition Utils.cpp:800
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition Utils.cpp:1481
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition Utils.cpp:972
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:509
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1191
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition Utils.cpp:1262
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1561
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
Definition Utils.cpp:280
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:448
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1673
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
Definition Utils.cpp:1584
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:660
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition Utils.cpp:1407
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1726
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition Utils.cpp:46
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition Utils.cpp:1685
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:434
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1750
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition Utils.h:349
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition Utils.cpp:470
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1711
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition Utils.cpp:502
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1283
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1598
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1466
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
Definition Utils.cpp:2048
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:612
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition Utils.cpp:222
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:905
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
#define MORE()
Definition regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition Utils.h:231
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:289
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:286
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition Utils.h:190