LLVM 23.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
162 MachineFunction &MF = *MBB.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198}
199
202 // Give up if either DstReg or SrcReg is a physical register.
203 if (DstReg.isPhysical() || SrcReg.isPhysical())
204 return false;
205 // Give up if the types don't match.
206 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
207 return false;
208 // Replace if either DstReg has no constraints or the register
209 // constraints match.
210 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
211 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
212 return true;
213
214 // Otherwise match if the Src is already a regclass that is covered by the Dst
215 // RegBank.
216 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
217 cast<const RegisterBank *>(DstRBC)->covers(
218 *MRI.getRegClassOrNull(SrcReg));
219}
220
222 const MachineRegisterInfo &MRI) {
223 // Instructions without side-effects are dead iff they only define dead regs.
224 // This function is hot and this loop returns early in the common case,
225 // so only perform additional checks before this if absolutely necessary.
226 for (const auto &MO : MI.all_defs()) {
227 Register Reg = MO.getReg();
228 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
229 return false;
230 }
231 return MI.wouldBeTriviallyDead();
232}
233
235 MachineFunction &MF,
238 bool IsGlobalISelAbortEnabled =
240 bool IsFatal = Severity == DS_Error && IsGlobalISelAbortEnabled;
241 // Print the function name explicitly if we don't have a debug location (which
242 // makes the diagnostic less useful) or if we're going to emit a raw error.
243 if (!R.getLocation().isValid() || IsFatal)
244 R << (" (in function: " + MF.getName() + ")").str();
245
246 if (IsFatal)
247 reportFatalUsageError(Twine(R.getMsg()));
248 else
249 MORE.emit(R);
250}
251
257
264
267 const char *PassName, StringRef Msg,
268 const MachineInstr &MI) {
269 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
270 MI.getDebugLoc(), MI.getParent());
271 R << Msg;
272 // Printing MI is expensive; only do it if expensive remarks are enabled.
274 MORE.allowExtraAnalysis(PassName))
275 R << ": " << ore::MNV("Inst", MI);
276 reportGISelFailure(MF, MORE, R);
277}
278
279unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
280 switch (MinMaxOpc) {
281 case TargetOpcode::G_SMIN:
282 return TargetOpcode::G_SMAX;
283 case TargetOpcode::G_SMAX:
284 return TargetOpcode::G_SMIN;
285 case TargetOpcode::G_UMIN:
286 return TargetOpcode::G_UMAX;
287 case TargetOpcode::G_UMAX:
288 return TargetOpcode::G_UMIN;
289 default:
290 llvm_unreachable("unrecognized opcode");
291 }
292}
293
294std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
295 const MachineRegisterInfo &MRI) {
296 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
297 VReg, MRI, /*LookThroughInstrs*/ false);
298 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
299 "Value found while looking through instrs");
300 if (!ValAndVReg)
301 return std::nullopt;
302 return ValAndVReg->Value;
303}
304
306 const MachineRegisterInfo &MRI) {
307 MachineInstr *Const = MRI.getVRegDef(Reg);
308 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
309 "expected a G_CONSTANT on Reg");
310 return Const->getOperand(1).getCImm()->getValue();
311}
312
313std::optional<int64_t>
315 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
316 if (Val && Val->getBitWidth() <= 64)
317 return Val->getSExtValue();
318 return std::nullopt;
319}
320
321namespace {
322
323// This function is used in many places, and as such, it has some
324// micro-optimizations to try and make it as fast as it can be.
325//
326// - We use template arguments to avoid an indirect call caused by passing a
327// function_ref/std::function
328// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
329// Instead it returns true/false and places the result in a pre-constructed
330// APInt.
331//
332// Please change this function carefully and benchmark your changes.
333template <bool (*IsConstantOpcode)(const MachineInstr *),
334 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
335std::optional<ValueAndVReg>
336getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
337 bool LookThroughInstrs = true,
338 bool LookThroughAnyExt = false) {
341
342 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
343 LookThroughInstrs) {
344 switch (MI->getOpcode()) {
345 case TargetOpcode::G_ANYEXT:
346 if (!LookThroughAnyExt)
347 return std::nullopt;
348 [[fallthrough]];
349 case TargetOpcode::G_TRUNC:
350 case TargetOpcode::G_SEXT:
351 case TargetOpcode::G_ZEXT:
352 SeenOpcodes.push_back(std::make_pair(
353 MI->getOpcode(),
354 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
355 VReg = MI->getOperand(1).getReg();
356 break;
357 case TargetOpcode::COPY:
358 VReg = MI->getOperand(1).getReg();
359 if (VReg.isPhysical())
360 return std::nullopt;
361 break;
362 case TargetOpcode::G_INTTOPTR:
363 VReg = MI->getOperand(1).getReg();
364 break;
365 default:
366 return std::nullopt;
367 }
368 }
369 if (!MI || !IsConstantOpcode(MI))
370 return std::nullopt;
371
372 APInt Val;
373 if (!GetAPCstValue(MI, Val))
374 return std::nullopt;
375 for (auto &Pair : reverse(SeenOpcodes)) {
376 switch (Pair.first) {
377 case TargetOpcode::G_TRUNC:
378 Val = Val.trunc(Pair.second);
379 break;
380 case TargetOpcode::G_ANYEXT:
381 case TargetOpcode::G_SEXT:
382 Val = Val.sext(Pair.second);
383 break;
384 case TargetOpcode::G_ZEXT:
385 Val = Val.zext(Pair.second);
386 break;
387 }
388 }
389
390 return ValueAndVReg{std::move(Val), VReg};
391}
392
393bool isIConstant(const MachineInstr *MI) {
394 if (!MI)
395 return false;
396 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
397}
398
399bool isFConstant(const MachineInstr *MI) {
400 if (!MI)
401 return false;
402 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
403}
404
405bool isAnyConstant(const MachineInstr *MI) {
406 if (!MI)
407 return false;
408 unsigned Opc = MI->getOpcode();
409 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
410}
411
412bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
413 const MachineOperand &CstVal = MI->getOperand(1);
414 if (!CstVal.isCImm())
415 return false;
416 Result = CstVal.getCImm()->getValue();
417 return true;
418}
419
420bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
421 const MachineOperand &CstVal = MI->getOperand(1);
422 if (CstVal.isCImm())
423 Result = CstVal.getCImm()->getValue();
424 else if (CstVal.isFPImm())
426 else
427 return false;
428 return true;
429}
430
431} // end anonymous namespace
432
434 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
435 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
436 VReg, MRI, LookThroughInstrs);
437}
438
440 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
441 bool LookThroughAnyExt) {
442 return getConstantVRegValWithLookThrough<isAnyConstant,
443 getCImmOrFPImmAsAPInt>(
444 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
445}
446
447std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
448 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
449 auto Reg =
450 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
451 VReg, MRI, LookThroughInstrs);
452 if (!Reg)
453 return std::nullopt;
454
455 APFloat FloatVal(getFltSemanticForLLT(LLT::scalar(Reg->Value.getBitWidth())),
456 Reg->Value);
457 return FPValueAndVReg{FloatVal, Reg->VReg};
458}
459
460const ConstantFP *
462 MachineInstr *MI = MRI.getVRegDef(VReg);
463 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
464 return nullptr;
465 return MI->getOperand(1).getFPImm();
466}
467
468std::optional<DefinitionAndSourceRegister>
470 Register DefSrcReg = Reg;
471 // This assumes that the code is in SSA form, so there should only be one
472 // definition.
473 auto DefIt = MRI.def_begin(Reg);
474 if (DefIt == MRI.def_end())
475 return {};
476 MachineOperand &DefOpnd = *DefIt;
477 MachineInstr *DefMI = DefOpnd.getParent();
478 auto DstTy = MRI.getType(DefOpnd.getReg());
479 if (!DstTy.isValid())
480 return std::nullopt;
481 unsigned Opc = DefMI->getOpcode();
482 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
483 Register SrcReg = DefMI->getOperand(1).getReg();
484 auto SrcTy = MRI.getType(SrcReg);
485 if (!SrcTy.isValid())
486 break;
487 DefMI = MRI.getVRegDef(SrcReg);
488 DefSrcReg = SrcReg;
489 Opc = DefMI->getOpcode();
490 }
491 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
492}
493
495 const MachineRegisterInfo &MRI) {
496 std::optional<DefinitionAndSourceRegister> DefSrcReg =
498 return DefSrcReg ? DefSrcReg->MI : nullptr;
499}
500
502 const MachineRegisterInfo &MRI) {
503 std::optional<DefinitionAndSourceRegister> DefSrcReg =
505 return DefSrcReg ? DefSrcReg->Reg : Register();
506}
507
508void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
510 MachineIRBuilder &MIRBuilder,
512 for (int i = 0; i < NumParts; ++i)
513 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
514 MIRBuilder.buildUnmerge(VRegs, Reg);
515}
516
517bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
519 SmallVectorImpl<Register> &LeftoverRegs,
520 MachineIRBuilder &MIRBuilder,
522 assert(!LeftoverTy.isValid() && "this is an out argument");
523
524 unsigned RegSize = RegTy.getSizeInBits();
525 unsigned MainSize = MainTy.getSizeInBits();
526 unsigned NumParts = RegSize / MainSize;
527 unsigned LeftoverSize = RegSize - NumParts * MainSize;
528
529 // Use an unmerge when possible.
530 if (LeftoverSize == 0) {
531 for (unsigned I = 0; I < NumParts; ++I)
532 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
533 MIRBuilder.buildUnmerge(VRegs, Reg);
534 return true;
535 }
536
537 // Try to use unmerge for irregular vector split where possible
538 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
539 // leftover, it becomes:
540 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
541 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
542 if (RegTy.isVector() && MainTy.isVector()) {
543 unsigned RegNumElts = RegTy.getNumElements();
544 unsigned MainNumElts = MainTy.getNumElements();
545 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
546 // If can unmerge to LeftoverTy, do it
547 if (MainNumElts % LeftoverNumElts == 0 &&
548 RegNumElts % LeftoverNumElts == 0 &&
549 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
550 LeftoverNumElts > 1) {
551 LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
552
553 // Unmerge the SrcReg to LeftoverTy vectors
554 SmallVector<Register, 4> UnmergeValues;
555 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
556 MIRBuilder, MRI);
557
558 // Find how many LeftoverTy makes one MainTy
559 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
560 unsigned NumOfLeftoverVal =
561 ((RegNumElts % MainNumElts) / LeftoverNumElts);
562
563 // Create as many MainTy as possible using unmerged value
564 SmallVector<Register, 4> MergeValues;
565 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
566 MergeValues.push_back(UnmergeValues[I]);
567 if (MergeValues.size() == LeftoverPerMain) {
568 VRegs.push_back(
569 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
570 MergeValues.clear();
571 }
572 }
573 // Populate LeftoverRegs with the leftovers
574 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
575 I < UnmergeValues.size(); I++) {
576 LeftoverRegs.push_back(UnmergeValues[I]);
577 }
578 return true;
579 }
580 }
581 // Perform irregular split. Leftover is last element of RegPieces.
582 if (MainTy.isVector()) {
583 SmallVector<Register, 8> RegPieces;
584 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
585 MRI);
586 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
587 VRegs.push_back(RegPieces[i]);
588 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
589 LeftoverTy = MRI.getType(LeftoverRegs[0]);
590 return true;
591 }
592
593 LeftoverTy = LLT::scalar(LeftoverSize);
594 // For irregular sizes, extract the individual parts.
595 for (unsigned I = 0; I != NumParts; ++I) {
596 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
597 VRegs.push_back(NewReg);
598 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
599 }
600
601 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
602 Offset += LeftoverSize) {
603 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
604 LeftoverRegs.push_back(NewReg);
605 MIRBuilder.buildExtract(NewReg, Reg, Offset);
606 }
607
608 return true;
609}
610
611void llvm::extractVectorParts(Register Reg, unsigned NumElts,
613 MachineIRBuilder &MIRBuilder,
615 LLT RegTy = MRI.getType(Reg);
616 assert(RegTy.isVector() && "Expected a vector type");
617
618 LLT EltTy = RegTy.getElementType();
619 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
620 unsigned RegNumElts = RegTy.getNumElements();
621 unsigned LeftoverNumElts = RegNumElts % NumElts;
622 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
623
624 // Perfect split without leftover
625 if (LeftoverNumElts == 0)
626 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
627 MRI);
628
629 // Irregular split. Provide direct access to all elements for artifact
630 // combiner using unmerge to elements. Then build vectors with NumElts
631 // elements. Remaining element(s) will be (used to build vector) Leftover.
633 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
634
635 unsigned Offset = 0;
636 // Requested sub-vectors of NarrowTy.
637 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
638 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
639 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
640 }
641
642 // Leftover element(s).
643 if (LeftoverNumElts == 1) {
644 VRegs.push_back(Elts[Offset]);
645 } else {
646 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
647 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
648 VRegs.push_back(
649 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
650 }
651}
652
654 const MachineRegisterInfo &MRI) {
656 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
657}
658
659APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
660 if (Size == 32)
661 return APFloat(float(Val));
662 if (Size == 64)
663 return APFloat(Val);
664 if (Size != 16)
665 llvm_unreachable("Unsupported FPConstant size");
666 bool Ignored;
667 APFloat APF(Val);
669 return APF;
670}
671
672std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
673 const Register Op1,
674 const Register Op2,
675 const MachineRegisterInfo &MRI) {
676 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
677 if (!MaybeOp2Cst)
678 return std::nullopt;
679
680 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
681 if (!MaybeOp1Cst)
682 return std::nullopt;
683
684 const APInt &C1 = MaybeOp1Cst->Value;
685 const APInt &C2 = MaybeOp2Cst->Value;
686 switch (Opcode) {
687 default:
688 break;
689 case TargetOpcode::G_ADD:
690 return C1 + C2;
691 case TargetOpcode::G_PTR_ADD:
692 // Types can be of different width here.
693 // Result needs to be the same width as C1, so trunc or sext C2.
694 return C1 + C2.sextOrTrunc(C1.getBitWidth());
695 case TargetOpcode::G_AND:
696 return C1 & C2;
697 case TargetOpcode::G_ASHR:
698 return C1.ashr(C2);
699 case TargetOpcode::G_LSHR:
700 return C1.lshr(C2);
701 case TargetOpcode::G_MUL:
702 return C1 * C2;
703 case TargetOpcode::G_OR:
704 return C1 | C2;
705 case TargetOpcode::G_SHL:
706 return C1 << C2;
707 case TargetOpcode::G_SUB:
708 return C1 - C2;
709 case TargetOpcode::G_XOR:
710 return C1 ^ C2;
711 case TargetOpcode::G_UDIV:
712 if (!C2.getBoolValue())
713 break;
714 return C1.udiv(C2);
715 case TargetOpcode::G_SDIV:
716 if (!C2.getBoolValue())
717 break;
718 return C1.sdiv(C2);
719 case TargetOpcode::G_UREM:
720 if (!C2.getBoolValue())
721 break;
722 return C1.urem(C2);
723 case TargetOpcode::G_SREM:
724 if (!C2.getBoolValue())
725 break;
726 return C1.srem(C2);
727 case TargetOpcode::G_SMIN:
728 return APIntOps::smin(C1, C2);
729 case TargetOpcode::G_SMAX:
730 return APIntOps::smax(C1, C2);
731 case TargetOpcode::G_UMIN:
732 return APIntOps::umin(C1, C2);
733 case TargetOpcode::G_UMAX:
734 return APIntOps::umax(C1, C2);
735 }
736
737 return std::nullopt;
738}
739
740std::optional<APFloat>
741llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
742 const Register Op2, const MachineRegisterInfo &MRI) {
743 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
744 if (!Op2Cst)
745 return std::nullopt;
746
747 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
748 if (!Op1Cst)
749 return std::nullopt;
750
751 APFloat C1 = Op1Cst->getValueAPF();
752 const APFloat &C2 = Op2Cst->getValueAPF();
753 switch (Opcode) {
754 case TargetOpcode::G_FADD:
756 return C1;
757 case TargetOpcode::G_FSUB:
759 return C1;
760 case TargetOpcode::G_FMUL:
762 return C1;
763 case TargetOpcode::G_FDIV:
765 return C1;
766 case TargetOpcode::G_FREM:
767 C1.mod(C2);
768 return C1;
769 case TargetOpcode::G_FCOPYSIGN:
770 C1.copySign(C2);
771 return C1;
772 case TargetOpcode::G_FMINNUM:
773 if (C1.isSignaling() || C2.isSignaling())
774 return std::nullopt;
775 return minnum(C1, C2);
776 case TargetOpcode::G_FMAXNUM:
777 if (C1.isSignaling() || C2.isSignaling())
778 return std::nullopt;
779 return maxnum(C1, C2);
780 case TargetOpcode::G_FMINIMUM:
781 return minimum(C1, C2);
782 case TargetOpcode::G_FMAXIMUM:
783 return maximum(C1, C2);
784 case TargetOpcode::G_FMINNUM_IEEE:
785 case TargetOpcode::G_FMAXNUM_IEEE:
786 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
787 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
788 // and currently there isn't a nice wrapper in APFloat for the version with
789 // correct snan handling.
790 break;
791 default:
792 break;
793 }
794
795 return std::nullopt;
796}
797
799llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
800 const Register Op2,
801 const MachineRegisterInfo &MRI) {
802 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
803 if (!SrcVec2)
804 return SmallVector<APInt>();
805
806 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
807 if (!SrcVec1)
808 return SmallVector<APInt>();
809
810 SmallVector<APInt> FoldedElements;
811 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
812 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
813 SrcVec2->getSourceReg(Idx), MRI);
814 if (!MaybeCst)
815 return SmallVector<APInt>();
816 FoldedElements.push_back(*MaybeCst);
817 }
818 return FoldedElements;
819}
820
822 bool SNaN) {
823 const MachineInstr *DefMI = MRI.getVRegDef(Val);
824 if (!DefMI)
825 return false;
826
827 if (DefMI->getFlag(MachineInstr::FmNoNans))
828 return true;
829
830 // If the value is a constant, we can obviously see if it is a NaN or not.
831 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
832 return !FPVal->getValueAPF().isNaN() ||
833 (SNaN && !FPVal->getValueAPF().isSignaling());
834 }
835
836 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
837 for (const auto &Op : DefMI->uses())
838 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
839 return false;
840 return true;
841 }
842
843 switch (DefMI->getOpcode()) {
844 default:
845 break;
846 case TargetOpcode::G_FADD:
847 case TargetOpcode::G_FSUB:
848 case TargetOpcode::G_FMUL:
849 case TargetOpcode::G_FDIV:
850 case TargetOpcode::G_FREM:
851 case TargetOpcode::G_FSIN:
852 case TargetOpcode::G_FCOS:
853 case TargetOpcode::G_FTAN:
854 case TargetOpcode::G_FACOS:
855 case TargetOpcode::G_FASIN:
856 case TargetOpcode::G_FATAN:
857 case TargetOpcode::G_FATAN2:
858 case TargetOpcode::G_FCOSH:
859 case TargetOpcode::G_FSINH:
860 case TargetOpcode::G_FTANH:
861 case TargetOpcode::G_FMA:
862 case TargetOpcode::G_FMAD:
863 if (SNaN)
864 return true;
865
866 // TODO: Need isKnownNeverInfinity
867 return false;
868 case TargetOpcode::G_FMINNUM_IEEE:
869 case TargetOpcode::G_FMAXNUM_IEEE: {
870 if (SNaN)
871 return true;
872 // This can return a NaN if either operand is an sNaN, or if both operands
873 // are NaN.
874 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
875 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
876 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
877 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
878 }
879 case TargetOpcode::G_FMINNUM:
880 case TargetOpcode::G_FMAXNUM: {
881 // Only one needs to be known not-nan, since it will be returned if the
882 // other ends up being one.
883 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
884 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
885 }
886 }
887
888 if (SNaN) {
889 // FP operations quiet. For now, just handle the ones inserted during
890 // legalization.
891 switch (DefMI->getOpcode()) {
892 case TargetOpcode::G_FPEXT:
893 case TargetOpcode::G_FPTRUNC:
894 case TargetOpcode::G_FCANONICALIZE:
895 return true;
896 default:
897 return false;
898 }
899 }
900
901 return false;
902}
903
905 const MachinePointerInfo &MPO) {
908 MachineFrameInfo &MFI = MF.getFrameInfo();
909 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
910 MPO.Offset);
911 }
912
913 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
914 const Module *M = MF.getFunction().getParent();
915 return V->getPointerAlignment(M->getDataLayout());
916 }
917
918 return Align(1);
919}
920
922 const TargetInstrInfo &TII,
923 MCRegister PhysReg,
924 const TargetRegisterClass &RC,
925 const DebugLoc &DL, LLT RegTy) {
926 MachineBasicBlock &EntryMBB = MF.front();
928 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
929 if (LiveIn) {
930 MachineInstr *Def = MRI.getVRegDef(LiveIn);
931 if (Def) {
932 // FIXME: Should the verifier check this is in the entry block?
933 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
934 return LiveIn;
935 }
936
937 // It's possible the incoming argument register and copy was added during
938 // lowering, but later deleted due to being/becoming dead. If this happens,
939 // re-insert the copy.
940 } else {
941 // The live in register was not present, so add it.
942 LiveIn = MF.addLiveIn(PhysReg, &RC);
943 if (RegTy.isValid())
944 MRI.setType(LiveIn, RegTy);
945 }
946
947 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
948 .addReg(PhysReg);
949 if (!EntryMBB.isLiveIn(PhysReg))
950 EntryMBB.addLiveIn(PhysReg);
951 return LiveIn;
952}
953
954std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
955 const Register Op1, uint64_t Imm,
956 const MachineRegisterInfo &MRI) {
957 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
958 if (MaybeOp1Cst) {
959 switch (Opcode) {
960 default:
961 break;
962 case TargetOpcode::G_SEXT_INREG: {
963 LLT Ty = MRI.getType(Op1);
964 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
965 }
966 }
967 }
968 return std::nullopt;
969}
970
971std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
972 const Register Op0,
973 const MachineRegisterInfo &MRI) {
974 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
975 if (!Val)
976 return Val;
977
978 const unsigned DstSize = DstTy.getScalarSizeInBits();
979
980 switch (Opcode) {
981 case TargetOpcode::G_SEXT:
982 return Val->sext(DstSize);
983 case TargetOpcode::G_ZEXT:
984 case TargetOpcode::G_ANYEXT:
985 // TODO: DAG considers target preference when constant folding any_extend.
986 return Val->zext(DstSize);
987 default:
988 break;
989 }
990
991 llvm_unreachable("unexpected cast opcode to constant fold");
992}
993
994std::optional<APFloat>
995llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
996 const MachineRegisterInfo &MRI) {
997 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
998 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
999 APFloat DstVal(getFltSemanticForLLT(DstTy));
1000 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
1002 return DstVal;
1003 }
1004 return std::nullopt;
1005}
1006
1007std::optional<SmallVector<unsigned>>
1009 std::function<unsigned(APInt)> CB) {
1010 LLT Ty = MRI.getType(Src);
1011 SmallVector<unsigned> FoldedCTLZs;
1012 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
1013 auto MaybeCst = getIConstantVRegVal(R, MRI);
1014 if (!MaybeCst)
1015 return std::nullopt;
1016 return CB(*MaybeCst);
1017 };
1018 if (Ty.isVector()) {
1019 // Try to constant fold each element.
1020 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1021 if (!BV)
1022 return std::nullopt;
1023 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1024 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1025 FoldedCTLZs.emplace_back(*MaybeFold);
1026 continue;
1027 }
1028 return std::nullopt;
1029 }
1030 return FoldedCTLZs;
1031 }
1032 if (auto MaybeCst = tryFoldScalar(Src)) {
1033 FoldedCTLZs.emplace_back(*MaybeCst);
1034 return FoldedCTLZs;
1035 }
1036 return std::nullopt;
1037}
1038
1039std::optional<SmallVector<APInt>>
1040llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1041 unsigned DstScalarSizeInBits, unsigned ExtOp,
1042 const MachineRegisterInfo &MRI) {
1043 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
1044 ExtOp == TargetOpcode::G_ANYEXT);
1045
1046 const LLT Ty = MRI.getType(Op1);
1047
1048 auto GetICmpResultCst = [&](bool IsTrue) {
1049 if (IsTrue)
1050 return ExtOp == TargetOpcode::G_SEXT
1051 ? APInt::getAllOnes(DstScalarSizeInBits)
1052 : APInt::getOneBitSet(DstScalarSizeInBits, 0);
1053 return APInt::getZero(DstScalarSizeInBits);
1054 };
1055
1056 auto TryFoldScalar = [&](Register LHS, Register RHS) -> std::optional<APInt> {
1057 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1058 if (!RHSCst)
1059 return std::nullopt;
1060 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1061 if (!LHSCst)
1062 return std::nullopt;
1063
1064 switch (Pred) {
1066 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1068 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1070 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1072 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1074 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1076 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1078 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1080 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1082 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1084 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1085 default:
1086 return std::nullopt;
1087 }
1088 };
1089
1090 SmallVector<APInt> FoldedICmps;
1091
1092 if (Ty.isVector()) {
1093 // Try to constant fold each element.
1094 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1095 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1096 if (!BV1 || !BV2)
1097 return std::nullopt;
1098 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1099 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1100 if (auto MaybeFold =
1101 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1102 FoldedICmps.emplace_back(*MaybeFold);
1103 continue;
1104 }
1105 return std::nullopt;
1106 }
1107 return FoldedICmps;
1108 }
1109
1110 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1111 FoldedICmps.emplace_back(*MaybeCst);
1112 return FoldedICmps;
1113 }
1114
1115 return std::nullopt;
1116}
1117
1119 GISelValueTracking *VT) {
1120 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1122 if (!DefSrcReg)
1123 return false;
1124
1125 const MachineInstr &MI = *DefSrcReg->MI;
1126 const LLT Ty = MRI.getType(Reg);
1127
1128 switch (MI.getOpcode()) {
1129 case TargetOpcode::G_CONSTANT: {
1130 unsigned BitWidth = Ty.getScalarSizeInBits();
1131 const ConstantInt *CI = MI.getOperand(1).getCImm();
1132 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1133 }
1134 case TargetOpcode::G_SHL: {
1135 // A left-shift of a constant one will have exactly one bit set because
1136 // shifting the bit off the end is undefined.
1137
1138 // TODO: Constant splat
1139 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1140 if (*ConstLHS == 1)
1141 return true;
1142 }
1143
1144 break;
1145 }
1146 case TargetOpcode::G_LSHR: {
1147 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1148 if (ConstLHS->isSignMask())
1149 return true;
1150 }
1151
1152 break;
1153 }
1154 case TargetOpcode::G_BUILD_VECTOR: {
1155 // TODO: Probably should have a recursion depth guard since you could have
1156 // bitcasted vector elements.
1157 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1158 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT))
1159 return false;
1160
1161 return true;
1162 }
1163 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1164 // Only handle constants since we would need to know if number of leading
1165 // zeros is greater than the truncation amount.
1166 const unsigned BitWidth = Ty.getScalarSizeInBits();
1167 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1168 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1169 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1170 return false;
1171 }
1172
1173 return true;
1174 }
1175 default:
1176 break;
1177 }
1178
1179 if (!VT)
1180 return false;
1181
1182 // More could be done here, though the above checks are enough
1183 // to handle some common cases.
1184
1185 // Fall back to computeKnownBits to catch other known cases.
1186 KnownBits Known = VT->getKnownBits(Reg);
1187 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1188}
1189
1193
1194LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1195 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1196 return OrigTy;
1197
1198 if (OrigTy.isVector() && TargetTy.isVector()) {
1199 LLT OrigElt = OrigTy.getElementType();
1200 LLT TargetElt = TargetTy.getElementType();
1201
1202 // TODO: The docstring for this function says the intention is to use this
1203 // function to build MERGE/UNMERGE instructions. It won't be the case that
1204 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1205 // could implement getLCMType between the two in the future if there was a
1206 // need, but it is not worth it now as this function should not be used in
1207 // that way.
1208 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1209 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1210 "getLCMType not implemented between fixed and scalable vectors.");
1211
1212 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1213 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1214 TargetTy.getElementCount().getKnownMinValue());
1215 // Prefer the original element type.
1217 TargetTy.getElementCount().getKnownMinValue());
1218 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1219 OrigTy.getElementType());
1220 }
1221 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1222 TargetTy.getSizeInBits().getKnownMinValue());
1223 return LLT::vector(
1224 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1225 OrigElt);
1226 }
1227
1228 // One type is scalar, one type is vector
1229 if (OrigTy.isVector() || TargetTy.isVector()) {
1230 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1231 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1232 LLT EltTy = VecTy.getElementType();
1233 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1234
1235 // Prefer scalar type from OrigTy.
1236 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1237 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1238
1239 // Different size scalars. Create vector with the same total size.
1240 // LCM will take fixed/scalable from VecTy.
1241 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1243 ScalarTy.getSizeInBits().getFixedValue());
1244 // Prefer type from OrigTy
1245 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1246 VecTy.getElementCount().isScalable()),
1247 OrigEltTy);
1248 }
1249
1250 // At this point, both types are scalars of different size
1251 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1252 TargetTy.getSizeInBits().getFixedValue());
1253 // Preserve pointer types.
1254 if (LCM == OrigTy.getSizeInBits())
1255 return OrigTy;
1256 if (LCM == TargetTy.getSizeInBits())
1257 return TargetTy;
1258 return LLT::scalar(LCM);
1259}
1260
1261LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1262
1263 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1264 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1266 "getCoverTy not implemented between fixed and scalable vectors.");
1267
1268 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1269 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1270 return getLCMType(OrigTy, TargetTy);
1271
1272 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1273 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1274 if (OrigTyNumElts % TargetTyNumElts == 0)
1275 return OrigTy;
1276
1277 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1279 OrigTy.getElementType());
1280}
1281
1282LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1283 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1284 return OrigTy;
1285
1286 if (OrigTy.isVector() && TargetTy.isVector()) {
1287 LLT OrigElt = OrigTy.getElementType();
1288
1289 // TODO: The docstring for this function says the intention is to use this
1290 // function to build MERGE/UNMERGE instructions. It won't be the case that
1291 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1292 // could implement getGCDType between the two in the future if there was a
1293 // need, but it is not worth it now as this function should not be used in
1294 // that way.
1295 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1296 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1297 "getGCDType not implemented between fixed and scalable vectors.");
1298
1299 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1300 TargetTy.getSizeInBits().getKnownMinValue());
1301 if (GCD == OrigElt.getSizeInBits())
1303 OrigElt);
1304
1305 // Cannot produce original element type, but both have vscale in common.
1306 if (GCD < OrigElt.getSizeInBits())
1308 GCD);
1309
1310 return LLT::vector(
1312 OrigTy.isScalable()),
1313 OrigElt);
1314 }
1315
1316 // If one type is vector and the element size matches the scalar size, then
1317 // the gcd is the scalar type.
1318 if (OrigTy.isVector() &&
1319 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1320 return OrigTy.getElementType();
1321 if (TargetTy.isVector() &&
1322 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1323 return OrigTy;
1324
1325 // At this point, both types are either scalars of different type or one is a
1326 // vector and one is a scalar. If both types are scalars, the GCD type is the
1327 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1328 // the GCD type is the GCD between the scalar and the vector element size.
1329 LLT OrigScalar = OrigTy.getScalarType();
1330 LLT TargetScalar = TargetTy.getScalarType();
1331 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1332 TargetScalar.getSizeInBits().getFixedValue());
1333 return LLT::scalar(GCD);
1334}
1335
1337 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1338 "Only G_SHUFFLE_VECTOR can have a splat index!");
1339 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1340 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1341
1342 // If all elements are undefined, this shuffle can be considered a splat.
1343 // Return 0 for better potential for callers to simplify.
1344 if (FirstDefinedIdx == Mask.end())
1345 return 0;
1346
1347 // Make sure all remaining elements are either undef or the same
1348 // as the first non-undef value.
1349 int SplatValue = *FirstDefinedIdx;
1350 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1351 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1352 return std::nullopt;
1353
1354 return SplatValue;
1355}
1356
1357static bool isBuildVectorOp(unsigned Opcode) {
1358 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1359 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1360}
1361
1362namespace {
1363
1364std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1365 const MachineRegisterInfo &MRI,
1366 bool AllowUndef) {
1368 if (!MI)
1369 return std::nullopt;
1370
1371 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1372 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1373 return std::nullopt;
1374
1375 std::optional<ValueAndVReg> SplatValAndReg;
1376 for (MachineOperand &Op : MI->uses()) {
1377 Register Element = Op.getReg();
1378 // If we have a G_CONCAT_VECTOR, we recursively look into the
1379 // vectors that we're concatenating to see if they're splats.
1380 auto ElementValAndReg =
1381 isConcatVectorsOp
1382 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1384
1385 // If AllowUndef, treat undef as value that will result in a constant splat.
1386 if (!ElementValAndReg) {
1387 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1388 continue;
1389 return std::nullopt;
1390 }
1391
1392 // Record splat value
1393 if (!SplatValAndReg)
1394 SplatValAndReg = ElementValAndReg;
1395
1396 // Different constant than the one already recorded, not a constant splat.
1397 if (SplatValAndReg->Value != ElementValAndReg->Value)
1398 return std::nullopt;
1399 }
1400
1401 return SplatValAndReg;
1402}
1403
1404} // end anonymous namespace
1405
1407 const MachineRegisterInfo &MRI,
1408 int64_t SplatValue, bool AllowUndef) {
1409 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1410 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1411
1412 return false;
1413}
1414
1416 const MachineRegisterInfo &MRI,
1417 const APInt &SplatValue,
1418 bool AllowUndef) {
1419 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
1420 if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
1421 return APInt::isSameValue(
1422 SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
1423 return APInt::isSameValue(
1424 SplatValAndReg->Value,
1425 SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
1426 }
1427
1428 return false;
1429}
1430
1432 const MachineRegisterInfo &MRI,
1433 int64_t SplatValue, bool AllowUndef) {
1434 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1435 AllowUndef);
1436}
1437
1439 const MachineRegisterInfo &MRI,
1440 const APInt &SplatValue,
1441 bool AllowUndef) {
1442 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1443 AllowUndef);
1444}
1445
1446std::optional<APInt>
1448 if (auto SplatValAndReg =
1449 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1450 if (std::optional<ValueAndVReg> ValAndVReg =
1451 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1452 return ValAndVReg->Value;
1453 }
1454
1455 return std::nullopt;
1456}
1457
1458std::optional<APInt>
1460 const MachineRegisterInfo &MRI) {
1461 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1462}
1463
1464std::optional<int64_t>
1466 const MachineRegisterInfo &MRI) {
1467 if (auto SplatValAndReg =
1468 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1469 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1470 return std::nullopt;
1471}
1472
1473std::optional<int64_t>
1475 const MachineRegisterInfo &MRI) {
1476 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1477}
1478
1479std::optional<FPValueAndVReg>
1481 bool AllowUndef) {
1482 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1483 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1484 return std::nullopt;
1485}
1486
1488 const MachineRegisterInfo &MRI,
1489 bool AllowUndef) {
1490 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1491}
1492
1494 const MachineRegisterInfo &MRI,
1495 bool AllowUndef) {
1496 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1497}
1498
1499std::optional<RegOrConstant>
1501 unsigned Opc = MI.getOpcode();
1502 if (!isBuildVectorOp(Opc))
1503 return std::nullopt;
1504 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1505 return RegOrConstant(*Splat);
1506 auto Reg = MI.getOperand(1).getReg();
1507 if (any_of(drop_begin(MI.operands(), 2),
1508 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1509 return std::nullopt;
1510 return RegOrConstant(Reg);
1511}
1512
1514 const MachineRegisterInfo &MRI,
1515 bool AllowFP = true,
1516 bool AllowOpaqueConstants = true) {
1517 switch (MI.getOpcode()) {
1518 case TargetOpcode::G_CONSTANT:
1519 case TargetOpcode::G_IMPLICIT_DEF:
1520 return true;
1521 case TargetOpcode::G_FCONSTANT:
1522 return AllowFP;
1523 case TargetOpcode::G_GLOBAL_VALUE:
1524 case TargetOpcode::G_FRAME_INDEX:
1525 case TargetOpcode::G_BLOCK_ADDR:
1526 case TargetOpcode::G_JUMP_TABLE:
1527 return AllowOpaqueConstants;
1528 default:
1529 return false;
1530 }
1531}
1532
1534 const MachineRegisterInfo &MRI) {
1535 Register Def = MI.getOperand(0).getReg();
1536 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1537 return true;
1539 if (!BV)
1540 return false;
1541 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1544 continue;
1545 return false;
1546 }
1547 return true;
1548}
1549
1551 const MachineRegisterInfo &MRI,
1552 bool AllowFP, bool AllowOpaqueConstants) {
1553 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1554 return true;
1555
1556 if (!isBuildVectorOp(MI.getOpcode()))
1557 return false;
1558
1559 const unsigned NumOps = MI.getNumOperands();
1560 for (unsigned I = 1; I != NumOps; ++I) {
1561 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1562 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1563 return false;
1564 }
1565
1566 return true;
1567}
1568
1569std::optional<APInt>
1571 const MachineRegisterInfo &MRI) {
1572 Register Def = MI.getOperand(0).getReg();
1573 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1574 return C->Value;
1575 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1576 if (!MaybeCst)
1577 return std::nullopt;
1578 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1579 return APInt(ScalarSize, *MaybeCst, true);
1580}
1581
1582std::optional<APFloat>
1584 const MachineRegisterInfo &MRI) {
1585 Register Def = MI.getOperand(0).getReg();
1586 if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1587 return FpConst->Value;
1588 auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1589 if (!MaybeCstFP)
1590 return std::nullopt;
1591 return MaybeCstFP->Value;
1592}
1593
1595 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1596 switch (MI.getOpcode()) {
1597 case TargetOpcode::G_IMPLICIT_DEF:
1598 return AllowUndefs;
1599 case TargetOpcode::G_CONSTANT:
1600 return MI.getOperand(1).getCImm()->isNullValue();
1601 case TargetOpcode::G_FCONSTANT: {
1602 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1603 return FPImm->isZero() && !FPImm->isNegative();
1604 }
1605 default:
1606 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1607 return false;
1608 return isBuildVectorAllZeros(MI, MRI);
1609 }
1610}
1611
1613 const MachineRegisterInfo &MRI,
1614 bool AllowUndefs) {
1615 switch (MI.getOpcode()) {
1616 case TargetOpcode::G_IMPLICIT_DEF:
1617 return AllowUndefs;
1618 case TargetOpcode::G_CONSTANT:
1619 return MI.getOperand(1).getCImm()->isAllOnesValue();
1620 default:
1621 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1622 return false;
1623 return isBuildVectorAllOnes(MI, MRI);
1624 }
1625}
1626
1628 const MachineRegisterInfo &MRI, Register Reg,
1629 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1630
1631 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1632 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1633 return Match(nullptr);
1634
1635 // TODO: Also handle fconstant
1636 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1637 return Match(Def->getOperand(1).getCImm());
1638
1639 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1640 return false;
1641
1642 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1643 Register SrcElt = Def->getOperand(I).getReg();
1644 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1645 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1646 if (!Match(nullptr))
1647 return false;
1648 continue;
1649 }
1650
1651 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1652 !Match(SrcDef->getOperand(1).getCImm()))
1653 return false;
1654 }
1655
1656 return true;
1657}
1658
1659bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1660 bool IsFP) {
1661 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1663 return Val & 0x1;
1665 return Val == 1;
1667 return Val == -1;
1668 }
1669 llvm_unreachable("Invalid boolean contents");
1670}
1671
1672bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1673 bool IsVector, bool IsFP) {
1674 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1676 return ~Val & 0x1;
1679 return Val == 0;
1680 }
1681 llvm_unreachable("Invalid boolean contents");
1682}
1683
1684int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1685 bool IsFP) {
1686 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1689 return 1;
1691 return -1;
1692 }
1693 llvm_unreachable("Invalid boolean contents");
1694}
1695
1697 LostDebugLocObserver *LocObserver,
1698 SmallInstListTy &DeadInstChain) {
1699 for (MachineOperand &Op : MI.uses()) {
1700 if (Op.isReg() && Op.getReg().isVirtual())
1701 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1702 }
1703 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1704 DeadInstChain.remove(&MI);
1705 MI.eraseFromParent();
1706 if (LocObserver)
1707 LocObserver->checkpoint(false);
1708}
1709
1712 LostDebugLocObserver *LocObserver) {
1713 SmallInstListTy DeadInstChain;
1714 for (MachineInstr *MI : DeadInstrs)
1715 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1716
1717 while (!DeadInstChain.empty()) {
1718 MachineInstr *Inst = DeadInstChain.pop_back_val();
1719 if (!isTriviallyDead(*Inst, MRI))
1720 continue;
1721 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1722 }
1723}
1724
1726 LostDebugLocObserver *LocObserver) {
1727 return eraseInstrs({&MI}, MRI, LocObserver);
1728}
1729
1731 for (auto &Def : MI.defs()) {
1732 assert(Def.isReg() && "Must be a reg");
1733
1735 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1736 MachineInstr *DbgValue = MOUse.getParent();
1737 // Ignore partially formed DBG_VALUEs.
1738 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1739 DbgUsers.push_back(&MOUse);
1740 }
1741 }
1742
1743 if (!DbgUsers.empty()) {
1745 }
1746 }
1747}
1748
1750 switch (Opc) {
1751 case TargetOpcode::G_FABS:
1752 case TargetOpcode::G_FADD:
1753 case TargetOpcode::G_FCANONICALIZE:
1754 case TargetOpcode::G_FCEIL:
1755 case TargetOpcode::G_FCONSTANT:
1756 case TargetOpcode::G_FCOPYSIGN:
1757 case TargetOpcode::G_FCOS:
1758 case TargetOpcode::G_FDIV:
1759 case TargetOpcode::G_FEXP2:
1760 case TargetOpcode::G_FEXP:
1761 case TargetOpcode::G_FFLOOR:
1762 case TargetOpcode::G_FLOG10:
1763 case TargetOpcode::G_FLOG2:
1764 case TargetOpcode::G_FLOG:
1765 case TargetOpcode::G_FMA:
1766 case TargetOpcode::G_FMAD:
1767 case TargetOpcode::G_FMAXIMUM:
1768 case TargetOpcode::G_FMAXIMUMNUM:
1769 case TargetOpcode::G_FMAXNUM:
1770 case TargetOpcode::G_FMAXNUM_IEEE:
1771 case TargetOpcode::G_FMINIMUM:
1772 case TargetOpcode::G_FMINIMUMNUM:
1773 case TargetOpcode::G_FMINNUM:
1774 case TargetOpcode::G_FMINNUM_IEEE:
1775 case TargetOpcode::G_FMUL:
1776 case TargetOpcode::G_FNEARBYINT:
1777 case TargetOpcode::G_FNEG:
1778 case TargetOpcode::G_FPEXT:
1779 case TargetOpcode::G_FPOW:
1780 case TargetOpcode::G_FPTRUNC:
1781 case TargetOpcode::G_FREM:
1782 case TargetOpcode::G_FRINT:
1783 case TargetOpcode::G_FSIN:
1784 case TargetOpcode::G_FTAN:
1785 case TargetOpcode::G_FACOS:
1786 case TargetOpcode::G_FASIN:
1787 case TargetOpcode::G_FATAN:
1788 case TargetOpcode::G_FATAN2:
1789 case TargetOpcode::G_FCOSH:
1790 case TargetOpcode::G_FSINH:
1791 case TargetOpcode::G_FTANH:
1792 case TargetOpcode::G_FSQRT:
1793 case TargetOpcode::G_FSUB:
1794 case TargetOpcode::G_INTRINSIC_ROUND:
1795 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1796 case TargetOpcode::G_INTRINSIC_TRUNC:
1797 return true;
1798 default:
1799 return false;
1800 }
1801}
1802
1803/// Shifts return poison if shiftwidth is larger than the bitwidth.
1804static bool shiftAmountKnownInRange(Register ShiftAmount,
1805 const MachineRegisterInfo &MRI) {
1806 LLT Ty = MRI.getType(ShiftAmount);
1807
1808 if (Ty.isScalableVector())
1809 return false; // Can't tell, just return false to be safe
1810
1811 if (Ty.isScalar()) {
1812 std::optional<ValueAndVReg> Val =
1814 if (!Val)
1815 return false;
1816 return Val->Value.ult(Ty.getScalarSizeInBits());
1817 }
1818
1819 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1820 if (!BV)
1821 return false;
1822
1823 unsigned Sources = BV->getNumSources();
1824 for (unsigned I = 0; I < Sources; ++I) {
1825 std::optional<ValueAndVReg> Val =
1827 if (!Val)
1828 return false;
1829 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1830 return false;
1831 }
1832
1833 return true;
1834}
1835
1836namespace {
1837enum class UndefPoisonKind {
1838 PoisonOnly = (1 << 0),
1839 UndefOnly = (1 << 1),
1841};
1842}
1843
1845 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1846}
1847
1849 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1850}
1851
1853 bool ConsiderFlagsAndMetadata,
1854 UndefPoisonKind Kind) {
1855 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1856
1857 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1858 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1859 if (GMI->hasPoisonGeneratingFlags())
1860 return true;
1861
1862 // Check whether opcode is a poison/undef-generating operation.
1863 switch (RegDef->getOpcode()) {
1864 case TargetOpcode::G_BUILD_VECTOR:
1865 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1866 return false;
1867 case TargetOpcode::G_SHL:
1868 case TargetOpcode::G_ASHR:
1869 case TargetOpcode::G_LSHR:
1870 return includesPoison(Kind) &&
1872 case TargetOpcode::G_FPTOSI:
1873 case TargetOpcode::G_FPTOUI:
1874 // fptosi/ui yields poison if the resulting value does not fit in the
1875 // destination type.
1876 return true;
1877 case TargetOpcode::G_CTLZ:
1878 case TargetOpcode::G_CTTZ:
1879 case TargetOpcode::G_CTLS:
1880 case TargetOpcode::G_ABS:
1881 case TargetOpcode::G_CTPOP:
1882 case TargetOpcode::G_BSWAP:
1883 case TargetOpcode::G_BITREVERSE:
1884 case TargetOpcode::G_FSHL:
1885 case TargetOpcode::G_FSHR:
1886 case TargetOpcode::G_SMAX:
1887 case TargetOpcode::G_SMIN:
1888 case TargetOpcode::G_SCMP:
1889 case TargetOpcode::G_UMAX:
1890 case TargetOpcode::G_UMIN:
1891 case TargetOpcode::G_UCMP:
1892 case TargetOpcode::G_PTRMASK:
1893 case TargetOpcode::G_SADDO:
1894 case TargetOpcode::G_SSUBO:
1895 case TargetOpcode::G_UADDO:
1896 case TargetOpcode::G_USUBO:
1897 case TargetOpcode::G_SMULO:
1898 case TargetOpcode::G_UMULO:
1899 case TargetOpcode::G_SADDSAT:
1900 case TargetOpcode::G_UADDSAT:
1901 case TargetOpcode::G_SSUBSAT:
1902 case TargetOpcode::G_USUBSAT:
1903 case TargetOpcode::G_SBFX:
1904 case TargetOpcode::G_UBFX:
1905 return false;
1906 case TargetOpcode::G_SSHLSAT:
1907 case TargetOpcode::G_USHLSAT:
1908 return includesPoison(Kind) &&
1910 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1912 if (includesPoison(Kind)) {
1913 std::optional<ValueAndVReg> Index =
1914 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1915 if (!Index)
1916 return true;
1917 LLT VecTy = MRI.getType(Insert->getVectorReg());
1918 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1919 }
1920 return false;
1921 }
1922 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1924 if (includesPoison(Kind)) {
1925 std::optional<ValueAndVReg> Index =
1927 if (!Index)
1928 return true;
1929 LLT VecTy = MRI.getType(Extract->getVectorReg());
1930 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1931 }
1932 return false;
1933 }
1934 case TargetOpcode::G_SHUFFLE_VECTOR: {
1935 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1936 ArrayRef<int> Mask = Shuffle->getMask();
1937 return includesPoison(Kind) && is_contained(Mask, -1);
1938 }
1939 case TargetOpcode::G_FNEG:
1940 case TargetOpcode::G_PHI:
1941 case TargetOpcode::G_SELECT:
1942 case TargetOpcode::G_UREM:
1943 case TargetOpcode::G_SREM:
1944 case TargetOpcode::G_FREEZE:
1945 case TargetOpcode::G_ICMP:
1946 case TargetOpcode::G_FCMP:
1947 case TargetOpcode::G_FADD:
1948 case TargetOpcode::G_FSUB:
1949 case TargetOpcode::G_FMUL:
1950 case TargetOpcode::G_FDIV:
1951 case TargetOpcode::G_FREM:
1952 case TargetOpcode::G_PTR_ADD:
1953 return false;
1954 default:
1955 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1956 }
1957}
1958
1960 const MachineRegisterInfo &MRI,
1961 unsigned Depth,
1962 UndefPoisonKind Kind) {
1964 return false;
1965
1966 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1967
1968 switch (RegDef->getOpcode()) {
1969 case TargetOpcode::G_FREEZE:
1970 return true;
1971 case TargetOpcode::G_IMPLICIT_DEF:
1972 return !includesUndef(Kind);
1973 case TargetOpcode::G_CONSTANT:
1974 case TargetOpcode::G_FCONSTANT:
1975 return true;
1976 case TargetOpcode::G_BUILD_VECTOR: {
1977 GBuildVector *BV = cast<GBuildVector>(RegDef);
1978 unsigned NumSources = BV->getNumSources();
1979 for (unsigned I = 0; I < NumSources; ++I)
1981 Depth + 1, Kind))
1982 return false;
1983 return true;
1984 }
1985 case TargetOpcode::G_PHI: {
1986 GPhi *Phi = cast<GPhi>(RegDef);
1987 unsigned NumIncoming = Phi->getNumIncomingValues();
1988 for (unsigned I = 0; I < NumIncoming; ++I)
1989 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1990 Depth + 1, Kind))
1991 return false;
1992 return true;
1993 }
1994 default: {
1995 auto MOCheck = [&](const MachineOperand &MO) {
1996 if (!MO.isReg())
1997 return true;
1998 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1999 Kind);
2000 };
2002 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
2003 all_of(RegDef->uses(), MOCheck);
2004 }
2005 }
2006}
2007
2009 bool ConsiderFlagsAndMetadata) {
2010 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2012}
2013
2015 bool ConsiderFlagsAndMetadata = true) {
2016 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2018}
2019
2021 const MachineRegisterInfo &MRI,
2022 unsigned Depth) {
2023 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2025}
2026
2028 const MachineRegisterInfo &MRI,
2029 unsigned Depth) {
2030 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2032}
2033
2035 const MachineRegisterInfo &MRI,
2036 unsigned Depth) {
2037 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2039}
2040
2042 if (Ty.isVector())
2043 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
2044 Ty.getElementCount());
2045 return IntegerType::get(C, Ty.getSizeInBits());
2046}
2047
2049 switch (MI.getOpcode()) {
2050 default:
2051 return false;
2052 case TargetOpcode::G_ASSERT_ALIGN:
2053 case TargetOpcode::G_ASSERT_SEXT:
2054 case TargetOpcode::G_ASSERT_ZEXT:
2055 return true;
2056 }
2057}
2058
2060 assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
2061
2062 return Value;
2063}
2064
2065std::optional<GIConstant>
2068
2070 std::optional<ValueAndVReg> MayBeConstant =
2072 if (!MayBeConstant)
2073 return std::nullopt;
2074 return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
2075 }
2076
2078 SmallVector<APInt> Values;
2079 unsigned NumSources = Build->getNumSources();
2080 for (unsigned I = 0; I < NumSources; ++I) {
2081 Register SrcReg = Build->getSourceReg(I);
2082 std::optional<ValueAndVReg> MayBeConstant =
2084 if (!MayBeConstant)
2085 return std::nullopt;
2086 Values.push_back(MayBeConstant->Value);
2087 }
2088 return GIConstant(Values);
2089 }
2090
2091 std::optional<ValueAndVReg> MayBeConstant =
2093 if (!MayBeConstant)
2094 return std::nullopt;
2095
2096 return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2097}
2098
2100 assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2101
2102 return Values[0];
2103}
2104
2105std::optional<GFConstant>
2108
2110 std::optional<FPValueAndVReg> MayBeConstant =
2112 if (!MayBeConstant)
2113 return std::nullopt;
2114 return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2115 }
2116
2118 SmallVector<APFloat> Values;
2119 unsigned NumSources = Build->getNumSources();
2120 for (unsigned I = 0; I < NumSources; ++I) {
2121 Register SrcReg = Build->getSourceReg(I);
2122 std::optional<FPValueAndVReg> MayBeConstant =
2124 if (!MayBeConstant)
2125 return std::nullopt;
2126 Values.push_back(MayBeConstant->Value);
2127 }
2128 return GFConstant(Values);
2129 }
2130
2131 std::optional<FPValueAndVReg> MayBeConstant =
2133 if (!MayBeConstant)
2134 return std::nullopt;
2135
2136 return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2137}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition Utils.cpp:234
static bool includesPoison(UndefPoisonKind Kind)
Definition Utils.cpp:1844
static bool includesUndef(UndefPoisonKind Kind)
Definition Utils.cpp:1848
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition Utils.cpp:1804
static bool isBuildVectorOp(unsigned Opcode)
Definition Utils.cpp:1357
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition Utils.cpp:1513
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Class recording the (high level) value of a variable.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1259
void copySign(const APFloat &RHS)
Definition APFloat.h:1353
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:5975
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1241
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1232
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1398
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1250
bool isSignaling() const
Definition APFloat.h:1514
APInt bitcastToAPInt() const
Definition APFloat.h:1404
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1277
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1584
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1044
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1677
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1655
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1052
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
Definition APInt.h:555
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1747
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:996
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
bool isNegative() const
Return true if the sign bit is set.
Definition Constants.h:332
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:329
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2106
GFConstant(ArrayRef< APFloat > Values)
Definition Utils.h:698
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2099
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2059
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2066
GIConstant(ArrayRef< APInt > Values)
Definition Utils.h:657
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represents a value which can be a Register or a constant.
Definition Utils.h:402
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetOptions Options
GlobalISelAbortMode GlobalISelAbort
EnableGlobalISelAbort - Control abort behaviour when global instruction selection fails to lower/sele...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2263
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2268
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2273
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2278
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition Utils.cpp:921
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1040
@ Offset
Definition DWP.cpp:532
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1487
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:2041
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:653
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:461
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition Utils.cpp:995
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1447
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1612
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:741
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1730
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition Utils.cpp:1008
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition Utils.cpp:954
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1500
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1706
GISelWorkList< 4 > SmallInstListTy
Definition Utils.h:577
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1570
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1594
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition Utils.cpp:494
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1627
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:252
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1659
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1194
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:672
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:305
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1661
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition Utils.cpp:1550
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition Utils.cpp:200
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition Utils.cpp:1696
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:258
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition Utils.cpp:439
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1493
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition Utils.cpp:799
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition Utils.cpp:1480
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition Utils.cpp:971
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:508
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1190
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition Utils.cpp:1261
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1642
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
Definition Utils.cpp:279
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:447
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1672
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
Definition Utils.cpp:1583
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:659
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition Utils.cpp:1406
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1725
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition Utils.cpp:46
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition Utils.cpp:1684
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1749
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition Utils.h:347
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition Utils.cpp:469
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1710
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition Utils.cpp:501
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1282
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1679
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1465
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
Definition Utils.cpp:2048
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:611
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition Utils.cpp:221
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:904
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
#define MORE()
Definition regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition Utils.h:229
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:305
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:302
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition Utils.h:188