LLVM 22.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
162 MachineFunction &MF = *MBB.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198 return true;
199}
200
203 // Give up if either DstReg or SrcReg is a physical register.
204 if (DstReg.isPhysical() || SrcReg.isPhysical())
205 return false;
206 // Give up if the types don't match.
207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208 return false;
209 // Replace if either DstReg has no constraints or the register
210 // constraints match.
211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213 return true;
214
215 // Otherwise match if the Src is already a regclass that is covered by the Dst
216 // RegBank.
217 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
218 cast<const RegisterBank *>(DstRBC)->covers(
219 *MRI.getRegClassOrNull(SrcReg));
220}
221
223 const MachineRegisterInfo &MRI) {
224 // Instructions without side-effects are dead iff they only define dead regs.
225 // This function is hot and this loop returns early in the common case,
226 // so only perform additional checks before this if absolutely necessary.
227 for (const auto &MO : MI.all_defs()) {
228 Register Reg = MO.getReg();
229 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
230 return false;
231 }
232 return MI.wouldBeTriviallyDead();
233}
234
236 MachineFunction &MF,
239 bool IsGlobalISelAbortEnabled =
241 bool IsFatal = Severity == DS_Error && IsGlobalISelAbortEnabled;
242 // Print the function name explicitly if we don't have a debug location (which
243 // makes the diagnostic less useful) or if we're going to emit a raw error.
244 if (!R.getLocation().isValid() || IsFatal)
245 R << (" (in function: " + MF.getName() + ")").str();
246
247 if (IsFatal)
248 reportFatalUsageError(Twine(R.getMsg()));
249 else
250 MORE.emit(R);
251}
252
258
265
268 const char *PassName, StringRef Msg,
269 const MachineInstr &MI) {
270 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
271 MI.getDebugLoc(), MI.getParent());
272 R << Msg;
273 // Printing MI is expensive; only do it if expensive remarks are enabled.
275 MORE.allowExtraAnalysis(PassName))
276 R << ": " << ore::MNV("Inst", MI);
277 reportGISelFailure(MF, MORE, R);
278}
279
280unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
281 switch (MinMaxOpc) {
282 case TargetOpcode::G_SMIN:
283 return TargetOpcode::G_SMAX;
284 case TargetOpcode::G_SMAX:
285 return TargetOpcode::G_SMIN;
286 case TargetOpcode::G_UMIN:
287 return TargetOpcode::G_UMAX;
288 case TargetOpcode::G_UMAX:
289 return TargetOpcode::G_UMIN;
290 default:
291 llvm_unreachable("unrecognized opcode");
292 }
293}
294
295std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
296 const MachineRegisterInfo &MRI) {
297 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
298 VReg, MRI, /*LookThroughInstrs*/ false);
299 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
300 "Value found while looking through instrs");
301 if (!ValAndVReg)
302 return std::nullopt;
303 return ValAndVReg->Value;
304}
305
307 const MachineRegisterInfo &MRI) {
308 MachineInstr *Const = MRI.getVRegDef(Reg);
309 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
310 "expected a G_CONSTANT on Reg");
311 return Const->getOperand(1).getCImm()->getValue();
312}
313
314std::optional<int64_t>
316 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
317 if (Val && Val->getBitWidth() <= 64)
318 return Val->getSExtValue();
319 return std::nullopt;
320}
321
322namespace {
323
324// This function is used in many places, and as such, it has some
325// micro-optimizations to try and make it as fast as it can be.
326//
327// - We use template arguments to avoid an indirect call caused by passing a
328// function_ref/std::function
329// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
330// Instead it returns true/false and places the result in a pre-constructed
331// APInt.
332//
333// Please change this function carefully and benchmark your changes.
334template <bool (*IsConstantOpcode)(const MachineInstr *),
335 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
336std::optional<ValueAndVReg>
337getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
338 bool LookThroughInstrs = true,
339 bool LookThroughAnyExt = false) {
342
343 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
344 LookThroughInstrs) {
345 switch (MI->getOpcode()) {
346 case TargetOpcode::G_ANYEXT:
347 if (!LookThroughAnyExt)
348 return std::nullopt;
349 [[fallthrough]];
350 case TargetOpcode::G_TRUNC:
351 case TargetOpcode::G_SEXT:
352 case TargetOpcode::G_ZEXT:
353 SeenOpcodes.push_back(std::make_pair(
354 MI->getOpcode(),
355 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
356 VReg = MI->getOperand(1).getReg();
357 break;
358 case TargetOpcode::COPY:
359 VReg = MI->getOperand(1).getReg();
360 if (VReg.isPhysical())
361 return std::nullopt;
362 break;
363 case TargetOpcode::G_INTTOPTR:
364 VReg = MI->getOperand(1).getReg();
365 break;
366 default:
367 return std::nullopt;
368 }
369 }
370 if (!MI || !IsConstantOpcode(MI))
371 return std::nullopt;
372
373 APInt Val;
374 if (!GetAPCstValue(MI, Val))
375 return std::nullopt;
376 for (auto &Pair : reverse(SeenOpcodes)) {
377 switch (Pair.first) {
378 case TargetOpcode::G_TRUNC:
379 Val = Val.trunc(Pair.second);
380 break;
381 case TargetOpcode::G_ANYEXT:
382 case TargetOpcode::G_SEXT:
383 Val = Val.sext(Pair.second);
384 break;
385 case TargetOpcode::G_ZEXT:
386 Val = Val.zext(Pair.second);
387 break;
388 }
389 }
390
391 return ValueAndVReg{std::move(Val), VReg};
392}
393
394bool isIConstant(const MachineInstr *MI) {
395 if (!MI)
396 return false;
397 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
398}
399
400bool isFConstant(const MachineInstr *MI) {
401 if (!MI)
402 return false;
403 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
404}
405
406bool isAnyConstant(const MachineInstr *MI) {
407 if (!MI)
408 return false;
409 unsigned Opc = MI->getOpcode();
410 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
411}
412
413bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
414 const MachineOperand &CstVal = MI->getOperand(1);
415 if (!CstVal.isCImm())
416 return false;
417 Result = CstVal.getCImm()->getValue();
418 return true;
419}
420
421bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
422 const MachineOperand &CstVal = MI->getOperand(1);
423 if (CstVal.isCImm())
424 Result = CstVal.getCImm()->getValue();
425 else if (CstVal.isFPImm())
427 else
428 return false;
429 return true;
430}
431
432} // end anonymous namespace
433
435 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
436 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
437 VReg, MRI, LookThroughInstrs);
438}
439
441 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
442 bool LookThroughAnyExt) {
443 return getConstantVRegValWithLookThrough<isAnyConstant,
444 getCImmOrFPImmAsAPInt>(
445 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
446}
447
448std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
449 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
450 auto Reg =
451 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
452 VReg, MRI, LookThroughInstrs);
453 if (!Reg)
454 return std::nullopt;
456 Reg->VReg};
457}
458
459const ConstantFP *
461 MachineInstr *MI = MRI.getVRegDef(VReg);
462 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
463 return nullptr;
464 return MI->getOperand(1).getFPImm();
465}
466
467std::optional<DefinitionAndSourceRegister>
469 Register DefSrcReg = Reg;
470 // This assumes that the code is in SSA form, so there should only be one
471 // definition.
472 auto DefIt = MRI.def_begin(Reg);
473 if (DefIt == MRI.def_end())
474 return {};
475 MachineOperand &DefOpnd = *DefIt;
476 MachineInstr *DefMI = DefOpnd.getParent();
477 auto DstTy = MRI.getType(DefOpnd.getReg());
478 if (!DstTy.isValid())
479 return std::nullopt;
480 unsigned Opc = DefMI->getOpcode();
481 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
482 Register SrcReg = DefMI->getOperand(1).getReg();
483 auto SrcTy = MRI.getType(SrcReg);
484 if (!SrcTy.isValid())
485 break;
486 DefMI = MRI.getVRegDef(SrcReg);
487 DefSrcReg = SrcReg;
488 Opc = DefMI->getOpcode();
489 }
490 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
491}
492
494 const MachineRegisterInfo &MRI) {
495 std::optional<DefinitionAndSourceRegister> DefSrcReg =
497 return DefSrcReg ? DefSrcReg->MI : nullptr;
498}
499
501 const MachineRegisterInfo &MRI) {
502 std::optional<DefinitionAndSourceRegister> DefSrcReg =
504 return DefSrcReg ? DefSrcReg->Reg : Register();
505}
506
507void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
509 MachineIRBuilder &MIRBuilder,
511 for (int i = 0; i < NumParts; ++i)
512 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
513 MIRBuilder.buildUnmerge(VRegs, Reg);
514}
515
516bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
518 SmallVectorImpl<Register> &LeftoverRegs,
519 MachineIRBuilder &MIRBuilder,
521 assert(!LeftoverTy.isValid() && "this is an out argument");
522
523 unsigned RegSize = RegTy.getSizeInBits();
524 unsigned MainSize = MainTy.getSizeInBits();
525 unsigned NumParts = RegSize / MainSize;
526 unsigned LeftoverSize = RegSize - NumParts * MainSize;
527
528 // Use an unmerge when possible.
529 if (LeftoverSize == 0) {
530 for (unsigned I = 0; I < NumParts; ++I)
531 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
532 MIRBuilder.buildUnmerge(VRegs, Reg);
533 return true;
534 }
535
536 // Try to use unmerge for irregular vector split where possible
537 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
538 // leftover, it becomes:
539 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
540 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
541 if (RegTy.isVector() && MainTy.isVector()) {
542 unsigned RegNumElts = RegTy.getNumElements();
543 unsigned MainNumElts = MainTy.getNumElements();
544 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
545 // If can unmerge to LeftoverTy, do it
546 if (MainNumElts % LeftoverNumElts == 0 &&
547 RegNumElts % LeftoverNumElts == 0 &&
548 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
549 LeftoverNumElts > 1) {
550 LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
551
552 // Unmerge the SrcReg to LeftoverTy vectors
553 SmallVector<Register, 4> UnmergeValues;
554 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
555 MIRBuilder, MRI);
556
557 // Find how many LeftoverTy makes one MainTy
558 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
559 unsigned NumOfLeftoverVal =
560 ((RegNumElts % MainNumElts) / LeftoverNumElts);
561
562 // Create as many MainTy as possible using unmerged value
563 SmallVector<Register, 4> MergeValues;
564 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
565 MergeValues.push_back(UnmergeValues[I]);
566 if (MergeValues.size() == LeftoverPerMain) {
567 VRegs.push_back(
568 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
569 MergeValues.clear();
570 }
571 }
572 // Populate LeftoverRegs with the leftovers
573 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
574 I < UnmergeValues.size(); I++) {
575 LeftoverRegs.push_back(UnmergeValues[I]);
576 }
577 return true;
578 }
579 }
580 // Perform irregular split. Leftover is last element of RegPieces.
581 if (MainTy.isVector()) {
582 SmallVector<Register, 8> RegPieces;
583 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
584 MRI);
585 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
586 VRegs.push_back(RegPieces[i]);
587 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
588 LeftoverTy = MRI.getType(LeftoverRegs[0]);
589 return true;
590 }
591
592 LeftoverTy = LLT::scalar(LeftoverSize);
593 // For irregular sizes, extract the individual parts.
594 for (unsigned I = 0; I != NumParts; ++I) {
595 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
596 VRegs.push_back(NewReg);
597 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
598 }
599
600 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
601 Offset += LeftoverSize) {
602 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
603 LeftoverRegs.push_back(NewReg);
604 MIRBuilder.buildExtract(NewReg, Reg, Offset);
605 }
606
607 return true;
608}
609
610void llvm::extractVectorParts(Register Reg, unsigned NumElts,
612 MachineIRBuilder &MIRBuilder,
614 LLT RegTy = MRI.getType(Reg);
615 assert(RegTy.isVector() && "Expected a vector type");
616
617 LLT EltTy = RegTy.getElementType();
618 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
619 unsigned RegNumElts = RegTy.getNumElements();
620 unsigned LeftoverNumElts = RegNumElts % NumElts;
621 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
622
623 // Perfect split without leftover
624 if (LeftoverNumElts == 0)
625 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
626 MRI);
627
628 // Irregular split. Provide direct access to all elements for artifact
629 // combiner using unmerge to elements. Then build vectors with NumElts
630 // elements. Remaining element(s) will be (used to build vector) Leftover.
632 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
633
634 unsigned Offset = 0;
635 // Requested sub-vectors of NarrowTy.
636 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
637 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
638 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
639 }
640
641 // Leftover element(s).
642 if (LeftoverNumElts == 1) {
643 VRegs.push_back(Elts[Offset]);
644 } else {
645 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
646 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
647 VRegs.push_back(
648 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
649 }
650}
651
653 const MachineRegisterInfo &MRI) {
655 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
656}
657
658APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
659 if (Size == 32)
660 return APFloat(float(Val));
661 if (Size == 64)
662 return APFloat(Val);
663 if (Size != 16)
664 llvm_unreachable("Unsupported FPConstant size");
665 bool Ignored;
666 APFloat APF(Val);
668 return APF;
669}
670
671std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
672 const Register Op1,
673 const Register Op2,
674 const MachineRegisterInfo &MRI) {
675 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
676 if (!MaybeOp2Cst)
677 return std::nullopt;
678
679 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
680 if (!MaybeOp1Cst)
681 return std::nullopt;
682
683 const APInt &C1 = MaybeOp1Cst->Value;
684 const APInt &C2 = MaybeOp2Cst->Value;
685 switch (Opcode) {
686 default:
687 break;
688 case TargetOpcode::G_ADD:
689 return C1 + C2;
690 case TargetOpcode::G_PTR_ADD:
691 // Types can be of different width here.
692 // Result needs to be the same width as C1, so trunc or sext C2.
693 return C1 + C2.sextOrTrunc(C1.getBitWidth());
694 case TargetOpcode::G_AND:
695 return C1 & C2;
696 case TargetOpcode::G_ASHR:
697 return C1.ashr(C2);
698 case TargetOpcode::G_LSHR:
699 return C1.lshr(C2);
700 case TargetOpcode::G_MUL:
701 return C1 * C2;
702 case TargetOpcode::G_OR:
703 return C1 | C2;
704 case TargetOpcode::G_SHL:
705 return C1 << C2;
706 case TargetOpcode::G_SUB:
707 return C1 - C2;
708 case TargetOpcode::G_XOR:
709 return C1 ^ C2;
710 case TargetOpcode::G_UDIV:
711 if (!C2.getBoolValue())
712 break;
713 return C1.udiv(C2);
714 case TargetOpcode::G_SDIV:
715 if (!C2.getBoolValue())
716 break;
717 return C1.sdiv(C2);
718 case TargetOpcode::G_UREM:
719 if (!C2.getBoolValue())
720 break;
721 return C1.urem(C2);
722 case TargetOpcode::G_SREM:
723 if (!C2.getBoolValue())
724 break;
725 return C1.srem(C2);
726 case TargetOpcode::G_SMIN:
727 return APIntOps::smin(C1, C2);
728 case TargetOpcode::G_SMAX:
729 return APIntOps::smax(C1, C2);
730 case TargetOpcode::G_UMIN:
731 return APIntOps::umin(C1, C2);
732 case TargetOpcode::G_UMAX:
733 return APIntOps::umax(C1, C2);
734 }
735
736 return std::nullopt;
737}
738
739std::optional<APFloat>
740llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
741 const Register Op2, const MachineRegisterInfo &MRI) {
742 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
743 if (!Op2Cst)
744 return std::nullopt;
745
746 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
747 if (!Op1Cst)
748 return std::nullopt;
749
750 APFloat C1 = Op1Cst->getValueAPF();
751 const APFloat &C2 = Op2Cst->getValueAPF();
752 switch (Opcode) {
753 case TargetOpcode::G_FADD:
755 return C1;
756 case TargetOpcode::G_FSUB:
758 return C1;
759 case TargetOpcode::G_FMUL:
761 return C1;
762 case TargetOpcode::G_FDIV:
764 return C1;
765 case TargetOpcode::G_FREM:
766 C1.mod(C2);
767 return C1;
768 case TargetOpcode::G_FCOPYSIGN:
769 C1.copySign(C2);
770 return C1;
771 case TargetOpcode::G_FMINNUM:
772 if (C1.isSignaling() || C2.isSignaling())
773 return std::nullopt;
774 return minnum(C1, C2);
775 case TargetOpcode::G_FMAXNUM:
776 if (C1.isSignaling() || C2.isSignaling())
777 return std::nullopt;
778 return maxnum(C1, C2);
779 case TargetOpcode::G_FMINIMUM:
780 return minimum(C1, C2);
781 case TargetOpcode::G_FMAXIMUM:
782 return maximum(C1, C2);
783 case TargetOpcode::G_FMINNUM_IEEE:
784 case TargetOpcode::G_FMAXNUM_IEEE:
785 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
786 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
787 // and currently there isn't a nice wrapper in APFloat for the version with
788 // correct snan handling.
789 break;
790 default:
791 break;
792 }
793
794 return std::nullopt;
795}
796
798llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
799 const Register Op2,
800 const MachineRegisterInfo &MRI) {
801 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
802 if (!SrcVec2)
803 return SmallVector<APInt>();
804
805 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
806 if (!SrcVec1)
807 return SmallVector<APInt>();
808
809 SmallVector<APInt> FoldedElements;
810 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
811 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
812 SrcVec2->getSourceReg(Idx), MRI);
813 if (!MaybeCst)
814 return SmallVector<APInt>();
815 FoldedElements.push_back(*MaybeCst);
816 }
817 return FoldedElements;
818}
819
821 bool SNaN) {
822 const MachineInstr *DefMI = MRI.getVRegDef(Val);
823 if (!DefMI)
824 return false;
825
826 if (DefMI->getFlag(MachineInstr::FmNoNans))
827 return true;
828
829 // If the value is a constant, we can obviously see if it is a NaN or not.
830 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
831 return !FPVal->getValueAPF().isNaN() ||
832 (SNaN && !FPVal->getValueAPF().isSignaling());
833 }
834
835 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
836 for (const auto &Op : DefMI->uses())
837 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
838 return false;
839 return true;
840 }
841
842 switch (DefMI->getOpcode()) {
843 default:
844 break;
845 case TargetOpcode::G_FADD:
846 case TargetOpcode::G_FSUB:
847 case TargetOpcode::G_FMUL:
848 case TargetOpcode::G_FDIV:
849 case TargetOpcode::G_FREM:
850 case TargetOpcode::G_FSIN:
851 case TargetOpcode::G_FCOS:
852 case TargetOpcode::G_FTAN:
853 case TargetOpcode::G_FACOS:
854 case TargetOpcode::G_FASIN:
855 case TargetOpcode::G_FATAN:
856 case TargetOpcode::G_FATAN2:
857 case TargetOpcode::G_FCOSH:
858 case TargetOpcode::G_FSINH:
859 case TargetOpcode::G_FTANH:
860 case TargetOpcode::G_FMA:
861 case TargetOpcode::G_FMAD:
862 if (SNaN)
863 return true;
864
865 // TODO: Need isKnownNeverInfinity
866 return false;
867 case TargetOpcode::G_FMINNUM_IEEE:
868 case TargetOpcode::G_FMAXNUM_IEEE: {
869 if (SNaN)
870 return true;
871 // This can return a NaN if either operand is an sNaN, or if both operands
872 // are NaN.
873 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
874 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
875 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
876 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
877 }
878 case TargetOpcode::G_FMINNUM:
879 case TargetOpcode::G_FMAXNUM: {
880 // Only one needs to be known not-nan, since it will be returned if the
881 // other ends up being one.
882 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
883 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
884 }
885 }
886
887 if (SNaN) {
888 // FP operations quiet. For now, just handle the ones inserted during
889 // legalization.
890 switch (DefMI->getOpcode()) {
891 case TargetOpcode::G_FPEXT:
892 case TargetOpcode::G_FPTRUNC:
893 case TargetOpcode::G_FCANONICALIZE:
894 return true;
895 default:
896 return false;
897 }
898 }
899
900 return false;
901}
902
904 const MachinePointerInfo &MPO) {
907 MachineFrameInfo &MFI = MF.getFrameInfo();
908 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
909 MPO.Offset);
910 }
911
912 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
913 const Module *M = MF.getFunction().getParent();
914 return V->getPointerAlignment(M->getDataLayout());
915 }
916
917 return Align(1);
918}
919
921 const TargetInstrInfo &TII,
922 MCRegister PhysReg,
923 const TargetRegisterClass &RC,
924 const DebugLoc &DL, LLT RegTy) {
925 MachineBasicBlock &EntryMBB = MF.front();
927 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
928 if (LiveIn) {
929 MachineInstr *Def = MRI.getVRegDef(LiveIn);
930 if (Def) {
931 // FIXME: Should the verifier check this is in the entry block?
932 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
933 return LiveIn;
934 }
935
936 // It's possible the incoming argument register and copy was added during
937 // lowering, but later deleted due to being/becoming dead. If this happens,
938 // re-insert the copy.
939 } else {
940 // The live in register was not present, so add it.
941 LiveIn = MF.addLiveIn(PhysReg, &RC);
942 if (RegTy.isValid())
943 MRI.setType(LiveIn, RegTy);
944 }
945
946 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
947 .addReg(PhysReg);
948 if (!EntryMBB.isLiveIn(PhysReg))
949 EntryMBB.addLiveIn(PhysReg);
950 return LiveIn;
951}
952
953std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
954 const Register Op1, uint64_t Imm,
955 const MachineRegisterInfo &MRI) {
956 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
957 if (MaybeOp1Cst) {
958 switch (Opcode) {
959 default:
960 break;
961 case TargetOpcode::G_SEXT_INREG: {
962 LLT Ty = MRI.getType(Op1);
963 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
964 }
965 }
966 }
967 return std::nullopt;
968}
969
970std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
971 const Register Op0,
972 const MachineRegisterInfo &MRI) {
973 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
974 if (!Val)
975 return Val;
976
977 const unsigned DstSize = DstTy.getScalarSizeInBits();
978
979 switch (Opcode) {
980 case TargetOpcode::G_SEXT:
981 return Val->sext(DstSize);
982 case TargetOpcode::G_ZEXT:
983 case TargetOpcode::G_ANYEXT:
984 // TODO: DAG considers target preference when constant folding any_extend.
985 return Val->zext(DstSize);
986 default:
987 break;
988 }
989
990 llvm_unreachable("unexpected cast opcode to constant fold");
991}
992
993std::optional<APFloat>
994llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
995 const MachineRegisterInfo &MRI) {
996 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
997 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
998 APFloat DstVal(getFltSemanticForLLT(DstTy));
999 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
1001 return DstVal;
1002 }
1003 return std::nullopt;
1004}
1005
1006std::optional<SmallVector<unsigned>>
1008 std::function<unsigned(APInt)> CB) {
1009 LLT Ty = MRI.getType(Src);
1010 SmallVector<unsigned> FoldedCTLZs;
1011 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
1012 auto MaybeCst = getIConstantVRegVal(R, MRI);
1013 if (!MaybeCst)
1014 return std::nullopt;
1015 return CB(*MaybeCst);
1016 };
1017 if (Ty.isVector()) {
1018 // Try to constant fold each element.
1019 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1020 if (!BV)
1021 return std::nullopt;
1022 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1023 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1024 FoldedCTLZs.emplace_back(*MaybeFold);
1025 continue;
1026 }
1027 return std::nullopt;
1028 }
1029 return FoldedCTLZs;
1030 }
1031 if (auto MaybeCst = tryFoldScalar(Src)) {
1032 FoldedCTLZs.emplace_back(*MaybeCst);
1033 return FoldedCTLZs;
1034 }
1035 return std::nullopt;
1036}
1037
1038std::optional<SmallVector<APInt>>
1039llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1040 unsigned DstScalarSizeInBits, unsigned ExtOp,
1041 const MachineRegisterInfo &MRI) {
1042 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
1043 ExtOp == TargetOpcode::G_ANYEXT);
1044
1045 const LLT Ty = MRI.getType(Op1);
1046
1047 auto GetICmpResultCst = [&](bool IsTrue) {
1048 if (IsTrue)
1049 return ExtOp == TargetOpcode::G_SEXT
1050 ? APInt::getAllOnes(DstScalarSizeInBits)
1051 : APInt::getOneBitSet(DstScalarSizeInBits, 0);
1052 return APInt::getZero(DstScalarSizeInBits);
1053 };
1054
1055 auto TryFoldScalar = [&](Register LHS, Register RHS) -> std::optional<APInt> {
1056 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1057 if (!RHSCst)
1058 return std::nullopt;
1059 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1060 if (!LHSCst)
1061 return std::nullopt;
1062
1063 switch (Pred) {
1065 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1067 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1069 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1071 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1073 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1075 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1077 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1079 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1081 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1083 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1084 default:
1085 return std::nullopt;
1086 }
1087 };
1088
1089 SmallVector<APInt> FoldedICmps;
1090
1091 if (Ty.isVector()) {
1092 // Try to constant fold each element.
1093 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1094 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1095 if (!BV1 || !BV2)
1096 return std::nullopt;
1097 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1098 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1099 if (auto MaybeFold =
1100 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1101 FoldedICmps.emplace_back(*MaybeFold);
1102 continue;
1103 }
1104 return std::nullopt;
1105 }
1106 return FoldedICmps;
1107 }
1108
1109 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1110 FoldedICmps.emplace_back(*MaybeCst);
1111 return FoldedICmps;
1112 }
1113
1114 return std::nullopt;
1115}
1116
1118 GISelValueTracking *VT) {
1119 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1121 if (!DefSrcReg)
1122 return false;
1123
1124 const MachineInstr &MI = *DefSrcReg->MI;
1125 const LLT Ty = MRI.getType(Reg);
1126
1127 switch (MI.getOpcode()) {
1128 case TargetOpcode::G_CONSTANT: {
1129 unsigned BitWidth = Ty.getScalarSizeInBits();
1130 const ConstantInt *CI = MI.getOperand(1).getCImm();
1131 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1132 }
1133 case TargetOpcode::G_SHL: {
1134 // A left-shift of a constant one will have exactly one bit set because
1135 // shifting the bit off the end is undefined.
1136
1137 // TODO: Constant splat
1138 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1139 if (*ConstLHS == 1)
1140 return true;
1141 }
1142
1143 break;
1144 }
1145 case TargetOpcode::G_LSHR: {
1146 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1147 if (ConstLHS->isSignMask())
1148 return true;
1149 }
1150
1151 break;
1152 }
1153 case TargetOpcode::G_BUILD_VECTOR: {
1154 // TODO: Probably should have a recursion depth guard since you could have
1155 // bitcasted vector elements.
1156 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1157 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT))
1158 return false;
1159
1160 return true;
1161 }
1162 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1163 // Only handle constants since we would need to know if number of leading
1164 // zeros is greater than the truncation amount.
1165 const unsigned BitWidth = Ty.getScalarSizeInBits();
1166 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1167 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1168 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1169 return false;
1170 }
1171
1172 return true;
1173 }
1174 default:
1175 break;
1176 }
1177
1178 if (!VT)
1179 return false;
1180
1181 // More could be done here, though the above checks are enough
1182 // to handle some common cases.
1183
1184 // Fall back to computeKnownBits to catch other known cases.
1185 KnownBits Known = VT->getKnownBits(Reg);
1186 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1187}
1188
1192
1193LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1194 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1195 return OrigTy;
1196
1197 if (OrigTy.isVector() && TargetTy.isVector()) {
1198 LLT OrigElt = OrigTy.getElementType();
1199 LLT TargetElt = TargetTy.getElementType();
1200
1201 // TODO: The docstring for this function says the intention is to use this
1202 // function to build MERGE/UNMERGE instructions. It won't be the case that
1203 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1204 // could implement getLCMType between the two in the future if there was a
1205 // need, but it is not worth it now as this function should not be used in
1206 // that way.
1207 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1208 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1209 "getLCMType not implemented between fixed and scalable vectors.");
1210
1211 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1212 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1213 TargetTy.getElementCount().getKnownMinValue());
1214 // Prefer the original element type.
1216 TargetTy.getElementCount().getKnownMinValue());
1217 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1218 OrigTy.getElementType());
1219 }
1220 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1221 TargetTy.getSizeInBits().getKnownMinValue());
1222 return LLT::vector(
1223 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1224 OrigElt);
1225 }
1226
1227 // One type is scalar, one type is vector
1228 if (OrigTy.isVector() || TargetTy.isVector()) {
1229 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1230 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1231 LLT EltTy = VecTy.getElementType();
1232 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1233
1234 // Prefer scalar type from OrigTy.
1235 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1236 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1237
1238 // Different size scalars. Create vector with the same total size.
1239 // LCM will take fixed/scalable from VecTy.
1240 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1242 ScalarTy.getSizeInBits().getFixedValue());
1243 // Prefer type from OrigTy
1244 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1245 VecTy.getElementCount().isScalable()),
1246 OrigEltTy);
1247 }
1248
1249 // At this point, both types are scalars of different size
1250 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1251 TargetTy.getSizeInBits().getFixedValue());
1252 // Preserve pointer types.
1253 if (LCM == OrigTy.getSizeInBits())
1254 return OrigTy;
1255 if (LCM == TargetTy.getSizeInBits())
1256 return TargetTy;
1257 return LLT::scalar(LCM);
1258}
1259
1260LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1261
1262 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1263 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1265 "getCoverTy not implemented between fixed and scalable vectors.");
1266
1267 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1268 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1269 return getLCMType(OrigTy, TargetTy);
1270
1271 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1272 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1273 if (OrigTyNumElts % TargetTyNumElts == 0)
1274 return OrigTy;
1275
1276 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1278 OrigTy.getElementType());
1279}
1280
1281LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1282 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1283 return OrigTy;
1284
1285 if (OrigTy.isVector() && TargetTy.isVector()) {
1286 LLT OrigElt = OrigTy.getElementType();
1287
1288 // TODO: The docstring for this function says the intention is to use this
1289 // function to build MERGE/UNMERGE instructions. It won't be the case that
1290 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1291 // could implement getGCDType between the two in the future if there was a
1292 // need, but it is not worth it now as this function should not be used in
1293 // that way.
1294 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1295 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1296 "getGCDType not implemented between fixed and scalable vectors.");
1297
1298 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1299 TargetTy.getSizeInBits().getKnownMinValue());
1300 if (GCD == OrigElt.getSizeInBits())
1302 OrigElt);
1303
1304 // Cannot produce original element type, but both have vscale in common.
1305 if (GCD < OrigElt.getSizeInBits())
1307 GCD);
1308
1309 return LLT::vector(
1311 OrigTy.isScalable()),
1312 OrigElt);
1313 }
1314
1315 // If one type is vector and the element size matches the scalar size, then
1316 // the gcd is the scalar type.
1317 if (OrigTy.isVector() &&
1318 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1319 return OrigTy.getElementType();
1320 if (TargetTy.isVector() &&
1321 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1322 return OrigTy;
1323
1324 // At this point, both types are either scalars of different type or one is a
1325 // vector and one is a scalar. If both types are scalars, the GCD type is the
1326 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1327 // the GCD type is the GCD between the scalar and the vector element size.
1328 LLT OrigScalar = OrigTy.getScalarType();
1329 LLT TargetScalar = TargetTy.getScalarType();
1330 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1331 TargetScalar.getSizeInBits().getFixedValue());
1332 return LLT::scalar(GCD);
1333}
1334
1336 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1337 "Only G_SHUFFLE_VECTOR can have a splat index!");
1338 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1339 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1340
1341 // If all elements are undefined, this shuffle can be considered a splat.
1342 // Return 0 for better potential for callers to simplify.
1343 if (FirstDefinedIdx == Mask.end())
1344 return 0;
1345
1346 // Make sure all remaining elements are either undef or the same
1347 // as the first non-undef value.
1348 int SplatValue = *FirstDefinedIdx;
1349 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1350 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1351 return std::nullopt;
1352
1353 return SplatValue;
1354}
1355
1356static bool isBuildVectorOp(unsigned Opcode) {
1357 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1358 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1359}
1360
1361namespace {
1362
1363std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1364 const MachineRegisterInfo &MRI,
1365 bool AllowUndef) {
1367 if (!MI)
1368 return std::nullopt;
1369
1370 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1371 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1372 return std::nullopt;
1373
1374 std::optional<ValueAndVReg> SplatValAndReg;
1375 for (MachineOperand &Op : MI->uses()) {
1376 Register Element = Op.getReg();
1377 // If we have a G_CONCAT_VECTOR, we recursively look into the
1378 // vectors that we're concatenating to see if they're splats.
1379 auto ElementValAndReg =
1380 isConcatVectorsOp
1381 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1383
1384 // If AllowUndef, treat undef as value that will result in a constant splat.
1385 if (!ElementValAndReg) {
1386 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1387 continue;
1388 return std::nullopt;
1389 }
1390
1391 // Record splat value
1392 if (!SplatValAndReg)
1393 SplatValAndReg = ElementValAndReg;
1394
1395 // Different constant than the one already recorded, not a constant splat.
1396 if (SplatValAndReg->Value != ElementValAndReg->Value)
1397 return std::nullopt;
1398 }
1399
1400 return SplatValAndReg;
1401}
1402
1403} // end anonymous namespace
1404
1406 const MachineRegisterInfo &MRI,
1407 int64_t SplatValue, bool AllowUndef) {
1408 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1409 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1410
1411 return false;
1412}
1413
1415 const MachineRegisterInfo &MRI,
1416 const APInt &SplatValue,
1417 bool AllowUndef) {
1418 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
1419 if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
1420 return APInt::isSameValue(
1421 SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
1422 return APInt::isSameValue(
1423 SplatValAndReg->Value,
1424 SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
1425 }
1426
1427 return false;
1428}
1429
1431 const MachineRegisterInfo &MRI,
1432 int64_t SplatValue, bool AllowUndef) {
1433 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1434 AllowUndef);
1435}
1436
1438 const MachineRegisterInfo &MRI,
1439 const APInt &SplatValue,
1440 bool AllowUndef) {
1441 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1442 AllowUndef);
1443}
1444
1445std::optional<APInt>
1447 if (auto SplatValAndReg =
1448 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1449 if (std::optional<ValueAndVReg> ValAndVReg =
1450 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1451 return ValAndVReg->Value;
1452 }
1453
1454 return std::nullopt;
1455}
1456
1457std::optional<APInt>
1459 const MachineRegisterInfo &MRI) {
1460 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1461}
1462
1463std::optional<int64_t>
1465 const MachineRegisterInfo &MRI) {
1466 if (auto SplatValAndReg =
1467 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1468 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1469 return std::nullopt;
1470}
1471
1472std::optional<int64_t>
1474 const MachineRegisterInfo &MRI) {
1475 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1476}
1477
1478std::optional<FPValueAndVReg>
1480 bool AllowUndef) {
1481 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1482 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1483 return std::nullopt;
1484}
1485
1487 const MachineRegisterInfo &MRI,
1488 bool AllowUndef) {
1489 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1490}
1491
1493 const MachineRegisterInfo &MRI,
1494 bool AllowUndef) {
1495 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1496}
1497
1498std::optional<RegOrConstant>
1500 unsigned Opc = MI.getOpcode();
1501 if (!isBuildVectorOp(Opc))
1502 return std::nullopt;
1503 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1504 return RegOrConstant(*Splat);
1505 auto Reg = MI.getOperand(1).getReg();
1506 if (any_of(drop_begin(MI.operands(), 2),
1507 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1508 return std::nullopt;
1509 return RegOrConstant(Reg);
1510}
1511
1513 const MachineRegisterInfo &MRI,
1514 bool AllowFP = true,
1515 bool AllowOpaqueConstants = true) {
1516 switch (MI.getOpcode()) {
1517 case TargetOpcode::G_CONSTANT:
1518 case TargetOpcode::G_IMPLICIT_DEF:
1519 return true;
1520 case TargetOpcode::G_FCONSTANT:
1521 return AllowFP;
1522 case TargetOpcode::G_GLOBAL_VALUE:
1523 case TargetOpcode::G_FRAME_INDEX:
1524 case TargetOpcode::G_BLOCK_ADDR:
1525 case TargetOpcode::G_JUMP_TABLE:
1526 return AllowOpaqueConstants;
1527 default:
1528 return false;
1529 }
1530}
1531
1533 const MachineRegisterInfo &MRI) {
1534 Register Def = MI.getOperand(0).getReg();
1535 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1536 return true;
1538 if (!BV)
1539 return false;
1540 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1543 continue;
1544 return false;
1545 }
1546 return true;
1547}
1548
1550 const MachineRegisterInfo &MRI,
1551 bool AllowFP, bool AllowOpaqueConstants) {
1552 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1553 return true;
1554
1555 if (!isBuildVectorOp(MI.getOpcode()))
1556 return false;
1557
1558 const unsigned NumOps = MI.getNumOperands();
1559 for (unsigned I = 1; I != NumOps; ++I) {
1560 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1561 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1562 return false;
1563 }
1564
1565 return true;
1566}
1567
1568std::optional<APInt>
1570 const MachineRegisterInfo &MRI) {
1571 Register Def = MI.getOperand(0).getReg();
1572 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1573 return C->Value;
1574 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1575 if (!MaybeCst)
1576 return std::nullopt;
1577 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1578 return APInt(ScalarSize, *MaybeCst, true);
1579}
1580
1581std::optional<APFloat>
1583 const MachineRegisterInfo &MRI) {
1584 Register Def = MI.getOperand(0).getReg();
1585 if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1586 return FpConst->Value;
1587 auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1588 if (!MaybeCstFP)
1589 return std::nullopt;
1590 return MaybeCstFP->Value;
1591}
1592
1594 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1595 switch (MI.getOpcode()) {
1596 case TargetOpcode::G_IMPLICIT_DEF:
1597 return AllowUndefs;
1598 case TargetOpcode::G_CONSTANT:
1599 return MI.getOperand(1).getCImm()->isNullValue();
1600 case TargetOpcode::G_FCONSTANT: {
1601 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1602 return FPImm->isZero() && !FPImm->isNegative();
1603 }
1604 default:
1605 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1606 return false;
1607 return isBuildVectorAllZeros(MI, MRI);
1608 }
1609}
1610
1612 const MachineRegisterInfo &MRI,
1613 bool AllowUndefs) {
1614 switch (MI.getOpcode()) {
1615 case TargetOpcode::G_IMPLICIT_DEF:
1616 return AllowUndefs;
1617 case TargetOpcode::G_CONSTANT:
1618 return MI.getOperand(1).getCImm()->isAllOnesValue();
1619 default:
1620 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1621 return false;
1622 return isBuildVectorAllOnes(MI, MRI);
1623 }
1624}
1625
1627 const MachineRegisterInfo &MRI, Register Reg,
1628 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1629
1630 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1631 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1632 return Match(nullptr);
1633
1634 // TODO: Also handle fconstant
1635 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1636 return Match(Def->getOperand(1).getCImm());
1637
1638 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1639 return false;
1640
1641 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1642 Register SrcElt = Def->getOperand(I).getReg();
1643 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1644 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1645 if (!Match(nullptr))
1646 return false;
1647 continue;
1648 }
1649
1650 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1651 !Match(SrcDef->getOperand(1).getCImm()))
1652 return false;
1653 }
1654
1655 return true;
1656}
1657
1658bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1659 bool IsFP) {
1660 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1662 return Val & 0x1;
1664 return Val == 1;
1666 return Val == -1;
1667 }
1668 llvm_unreachable("Invalid boolean contents");
1669}
1670
1671bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1672 bool IsVector, bool IsFP) {
1673 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1675 return ~Val & 0x1;
1678 return Val == 0;
1679 }
1680 llvm_unreachable("Invalid boolean contents");
1681}
1682
1683int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1684 bool IsFP) {
1685 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1688 return 1;
1690 return -1;
1691 }
1692 llvm_unreachable("Invalid boolean contents");
1693}
1694
1696 LostDebugLocObserver *LocObserver,
1697 SmallInstListTy &DeadInstChain) {
1698 for (MachineOperand &Op : MI.uses()) {
1699 if (Op.isReg() && Op.getReg().isVirtual())
1700 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1701 }
1702 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1703 DeadInstChain.remove(&MI);
1704 MI.eraseFromParent();
1705 if (LocObserver)
1706 LocObserver->checkpoint(false);
1707}
1708
1711 LostDebugLocObserver *LocObserver) {
1712 SmallInstListTy DeadInstChain;
1713 for (MachineInstr *MI : DeadInstrs)
1714 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1715
1716 while (!DeadInstChain.empty()) {
1717 MachineInstr *Inst = DeadInstChain.pop_back_val();
1718 if (!isTriviallyDead(*Inst, MRI))
1719 continue;
1720 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1721 }
1722}
1723
1725 LostDebugLocObserver *LocObserver) {
1726 return eraseInstrs({&MI}, MRI, LocObserver);
1727}
1728
1730 for (auto &Def : MI.defs()) {
1731 assert(Def.isReg() && "Must be a reg");
1732
1734 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1735 MachineInstr *DbgValue = MOUse.getParent();
1736 // Ignore partially formed DBG_VALUEs.
1737 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1738 DbgUsers.push_back(&MOUse);
1739 }
1740 }
1741
1742 if (!DbgUsers.empty()) {
1744 }
1745 }
1746}
1747
1749 switch (Opc) {
1750 case TargetOpcode::G_FABS:
1751 case TargetOpcode::G_FADD:
1752 case TargetOpcode::G_FCANONICALIZE:
1753 case TargetOpcode::G_FCEIL:
1754 case TargetOpcode::G_FCONSTANT:
1755 case TargetOpcode::G_FCOPYSIGN:
1756 case TargetOpcode::G_FCOS:
1757 case TargetOpcode::G_FDIV:
1758 case TargetOpcode::G_FEXP2:
1759 case TargetOpcode::G_FEXP:
1760 case TargetOpcode::G_FFLOOR:
1761 case TargetOpcode::G_FLOG10:
1762 case TargetOpcode::G_FLOG2:
1763 case TargetOpcode::G_FLOG:
1764 case TargetOpcode::G_FMA:
1765 case TargetOpcode::G_FMAD:
1766 case TargetOpcode::G_FMAXIMUM:
1767 case TargetOpcode::G_FMAXIMUMNUM:
1768 case TargetOpcode::G_FMAXNUM:
1769 case TargetOpcode::G_FMAXNUM_IEEE:
1770 case TargetOpcode::G_FMINIMUM:
1771 case TargetOpcode::G_FMINIMUMNUM:
1772 case TargetOpcode::G_FMINNUM:
1773 case TargetOpcode::G_FMINNUM_IEEE:
1774 case TargetOpcode::G_FMUL:
1775 case TargetOpcode::G_FNEARBYINT:
1776 case TargetOpcode::G_FNEG:
1777 case TargetOpcode::G_FPEXT:
1778 case TargetOpcode::G_FPOW:
1779 case TargetOpcode::G_FPTRUNC:
1780 case TargetOpcode::G_FREM:
1781 case TargetOpcode::G_FRINT:
1782 case TargetOpcode::G_FSIN:
1783 case TargetOpcode::G_FTAN:
1784 case TargetOpcode::G_FACOS:
1785 case TargetOpcode::G_FASIN:
1786 case TargetOpcode::G_FATAN:
1787 case TargetOpcode::G_FATAN2:
1788 case TargetOpcode::G_FCOSH:
1789 case TargetOpcode::G_FSINH:
1790 case TargetOpcode::G_FTANH:
1791 case TargetOpcode::G_FSQRT:
1792 case TargetOpcode::G_FSUB:
1793 case TargetOpcode::G_INTRINSIC_ROUND:
1794 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1795 case TargetOpcode::G_INTRINSIC_TRUNC:
1796 return true;
1797 default:
1798 return false;
1799 }
1800}
1801
1802/// Shifts return poison if shiftwidth is larger than the bitwidth.
1803static bool shiftAmountKnownInRange(Register ShiftAmount,
1804 const MachineRegisterInfo &MRI) {
1805 LLT Ty = MRI.getType(ShiftAmount);
1806
1807 if (Ty.isScalableVector())
1808 return false; // Can't tell, just return false to be safe
1809
1810 if (Ty.isScalar()) {
1811 std::optional<ValueAndVReg> Val =
1813 if (!Val)
1814 return false;
1815 return Val->Value.ult(Ty.getScalarSizeInBits());
1816 }
1817
1818 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1819 if (!BV)
1820 return false;
1821
1822 unsigned Sources = BV->getNumSources();
1823 for (unsigned I = 0; I < Sources; ++I) {
1824 std::optional<ValueAndVReg> Val =
1826 if (!Val)
1827 return false;
1828 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1829 return false;
1830 }
1831
1832 return true;
1833}
1834
1835namespace {
1836enum class UndefPoisonKind {
1837 PoisonOnly = (1 << 0),
1838 UndefOnly = (1 << 1),
1840};
1841}
1842
1844 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1845}
1846
1848 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1849}
1850
1852 bool ConsiderFlagsAndMetadata,
1853 UndefPoisonKind Kind) {
1854 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1855
1856 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1857 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1858 if (GMI->hasPoisonGeneratingFlags())
1859 return true;
1860
1861 // Check whether opcode is a poison/undef-generating operation.
1862 switch (RegDef->getOpcode()) {
1863 case TargetOpcode::G_BUILD_VECTOR:
1864 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1865 return false;
1866 case TargetOpcode::G_SHL:
1867 case TargetOpcode::G_ASHR:
1868 case TargetOpcode::G_LSHR:
1869 return includesPoison(Kind) &&
1871 case TargetOpcode::G_FPTOSI:
1872 case TargetOpcode::G_FPTOUI:
1873 // fptosi/ui yields poison if the resulting value does not fit in the
1874 // destination type.
1875 return true;
1876 case TargetOpcode::G_CTLZ:
1877 case TargetOpcode::G_CTTZ:
1878 case TargetOpcode::G_ABS:
1879 case TargetOpcode::G_CTPOP:
1880 case TargetOpcode::G_BSWAP:
1881 case TargetOpcode::G_BITREVERSE:
1882 case TargetOpcode::G_FSHL:
1883 case TargetOpcode::G_FSHR:
1884 case TargetOpcode::G_SMAX:
1885 case TargetOpcode::G_SMIN:
1886 case TargetOpcode::G_SCMP:
1887 case TargetOpcode::G_UMAX:
1888 case TargetOpcode::G_UMIN:
1889 case TargetOpcode::G_UCMP:
1890 case TargetOpcode::G_PTRMASK:
1891 case TargetOpcode::G_SADDO:
1892 case TargetOpcode::G_SSUBO:
1893 case TargetOpcode::G_UADDO:
1894 case TargetOpcode::G_USUBO:
1895 case TargetOpcode::G_SMULO:
1896 case TargetOpcode::G_UMULO:
1897 case TargetOpcode::G_SADDSAT:
1898 case TargetOpcode::G_UADDSAT:
1899 case TargetOpcode::G_SSUBSAT:
1900 case TargetOpcode::G_USUBSAT:
1901 case TargetOpcode::G_SBFX:
1902 case TargetOpcode::G_UBFX:
1903 return false;
1904 case TargetOpcode::G_SSHLSAT:
1905 case TargetOpcode::G_USHLSAT:
1906 return includesPoison(Kind) &&
1908 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1910 if (includesPoison(Kind)) {
1911 std::optional<ValueAndVReg> Index =
1912 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1913 if (!Index)
1914 return true;
1915 LLT VecTy = MRI.getType(Insert->getVectorReg());
1916 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1917 }
1918 return false;
1919 }
1920 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1922 if (includesPoison(Kind)) {
1923 std::optional<ValueAndVReg> Index =
1925 if (!Index)
1926 return true;
1927 LLT VecTy = MRI.getType(Extract->getVectorReg());
1928 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1929 }
1930 return false;
1931 }
1932 case TargetOpcode::G_SHUFFLE_VECTOR: {
1933 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1934 ArrayRef<int> Mask = Shuffle->getMask();
1935 return includesPoison(Kind) && is_contained(Mask, -1);
1936 }
1937 case TargetOpcode::G_FNEG:
1938 case TargetOpcode::G_PHI:
1939 case TargetOpcode::G_SELECT:
1940 case TargetOpcode::G_UREM:
1941 case TargetOpcode::G_SREM:
1942 case TargetOpcode::G_FREEZE:
1943 case TargetOpcode::G_ICMP:
1944 case TargetOpcode::G_FCMP:
1945 case TargetOpcode::G_FADD:
1946 case TargetOpcode::G_FSUB:
1947 case TargetOpcode::G_FMUL:
1948 case TargetOpcode::G_FDIV:
1949 case TargetOpcode::G_FREM:
1950 case TargetOpcode::G_PTR_ADD:
1951 return false;
1952 default:
1953 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1954 }
1955}
1956
1958 const MachineRegisterInfo &MRI,
1959 unsigned Depth,
1960 UndefPoisonKind Kind) {
1962 return false;
1963
1964 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1965
1966 switch (RegDef->getOpcode()) {
1967 case TargetOpcode::G_FREEZE:
1968 return true;
1969 case TargetOpcode::G_IMPLICIT_DEF:
1970 return !includesUndef(Kind);
1971 case TargetOpcode::G_CONSTANT:
1972 case TargetOpcode::G_FCONSTANT:
1973 return true;
1974 case TargetOpcode::G_BUILD_VECTOR: {
1975 GBuildVector *BV = cast<GBuildVector>(RegDef);
1976 unsigned NumSources = BV->getNumSources();
1977 for (unsigned I = 0; I < NumSources; ++I)
1979 Depth + 1, Kind))
1980 return false;
1981 return true;
1982 }
1983 case TargetOpcode::G_PHI: {
1984 GPhi *Phi = cast<GPhi>(RegDef);
1985 unsigned NumIncoming = Phi->getNumIncomingValues();
1986 for (unsigned I = 0; I < NumIncoming; ++I)
1987 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1988 Depth + 1, Kind))
1989 return false;
1990 return true;
1991 }
1992 default: {
1993 auto MOCheck = [&](const MachineOperand &MO) {
1994 if (!MO.isReg())
1995 return true;
1996 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1997 Kind);
1998 };
2000 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
2001 all_of(RegDef->uses(), MOCheck);
2002 }
2003 }
2004}
2005
2007 bool ConsiderFlagsAndMetadata) {
2008 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2010}
2011
2013 bool ConsiderFlagsAndMetadata = true) {
2014 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2016}
2017
2019 const MachineRegisterInfo &MRI,
2020 unsigned Depth) {
2021 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2023}
2024
2026 const MachineRegisterInfo &MRI,
2027 unsigned Depth) {
2028 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2030}
2031
2033 const MachineRegisterInfo &MRI,
2034 unsigned Depth) {
2035 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2037}
2038
2040 if (Ty.isVector())
2041 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
2042 Ty.getElementCount());
2043 return IntegerType::get(C, Ty.getSizeInBits());
2044}
2045
2047 switch (MI.getOpcode()) {
2048 default:
2049 return false;
2050 case TargetOpcode::G_ASSERT_ALIGN:
2051 case TargetOpcode::G_ASSERT_SEXT:
2052 case TargetOpcode::G_ASSERT_ZEXT:
2053 return true;
2054 }
2055}
2056
2058 assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
2059
2060 return Value;
2061}
2062
2063std::optional<GIConstant>
2066
2068 std::optional<ValueAndVReg> MayBeConstant =
2070 if (!MayBeConstant)
2071 return std::nullopt;
2072 return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
2073 }
2074
2076 SmallVector<APInt> Values;
2077 unsigned NumSources = Build->getNumSources();
2078 for (unsigned I = 0; I < NumSources; ++I) {
2079 Register SrcReg = Build->getSourceReg(I);
2080 std::optional<ValueAndVReg> MayBeConstant =
2082 if (!MayBeConstant)
2083 return std::nullopt;
2084 Values.push_back(MayBeConstant->Value);
2085 }
2086 return GIConstant(Values);
2087 }
2088
2089 std::optional<ValueAndVReg> MayBeConstant =
2091 if (!MayBeConstant)
2092 return std::nullopt;
2093
2094 return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2095}
2096
2098 assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2099
2100 return Values[0];
2101}
2102
2103std::optional<GFConstant>
2106
2108 std::optional<FPValueAndVReg> MayBeConstant =
2110 if (!MayBeConstant)
2111 return std::nullopt;
2112 return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2113 }
2114
2116 SmallVector<APFloat> Values;
2117 unsigned NumSources = Build->getNumSources();
2118 for (unsigned I = 0; I < NumSources; ++I) {
2119 Register SrcReg = Build->getSourceReg(I);
2120 std::optional<FPValueAndVReg> MayBeConstant =
2122 if (!MayBeConstant)
2123 return std::nullopt;
2124 Values.push_back(MayBeConstant->Value);
2125 }
2126 return GFConstant(Values);
2127 }
2128
2129 std::optional<FPValueAndVReg> MayBeConstant =
2131 if (!MayBeConstant)
2132 return std::nullopt;
2133
2134 return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2135}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition Utils.cpp:235
static bool includesPoison(UndefPoisonKind Kind)
Definition Utils.cpp:1843
static bool includesUndef(UndefPoisonKind Kind)
Definition Utils.cpp:1847
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition Utils.cpp:1803
static bool isBuildVectorOp(unsigned Opcode)
Definition Utils.cpp:1356
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition Utils.cpp:1512
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Class recording the (high level) value of a variable.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1190
void copySign(const APFloat &RHS)
Definition APFloat.h:1284
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:6053
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1172
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1163
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1329
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1181
bool isSignaling() const
Definition APFloat.h:1433
APInt bitcastToAPInt() const
Definition APFloat.h:1335
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1208
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1666
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1736
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:985
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
Definition APInt.h:554
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
bool isNegative() const
Return true if the sign bit is set.
Definition Constants.h:332
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:329
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2104
GFConstant(ArrayRef< APFloat > Values)
Definition Utils.h:700
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2097
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2057
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2064
GIConstant(ArrayRef< APInt > Values)
Definition Utils.h:659
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represents a value which can be a Register or a constant.
Definition Utils.h:404
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetOptions Options
GlobalISelAbortMode GlobalISelAbort
EnableGlobalISelAbort - Control abort behaviour when global instruction selection fails to lower/sele...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2249
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2254
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2259
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2264
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition Utils.cpp:920
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1039
@ Offset
Definition DWP.cpp:532
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1486
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:2039
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:652
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:460
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:295
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition Utils.cpp:994
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1446
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1611
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:740
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1729
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition Utils.cpp:1007
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition Utils.cpp:953
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1499
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1625
GISelWorkList< 4 > SmallInstListTy
Definition Utils.h:579
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1569
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1593
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition Utils.cpp:493
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1626
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:253
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1658
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1193
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:315
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:671
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:306
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1580
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition Utils.cpp:1549
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition Utils.cpp:201
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition Utils.cpp:1695
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition Utils.cpp:440
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1492
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition Utils.cpp:798
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition Utils.cpp:1479
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition Utils.cpp:970
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:507
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1189
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition Utils.cpp:1260
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1561
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
Definition Utils.cpp:280
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:448
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1671
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
Definition Utils.cpp:1582
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:658
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition Utils.cpp:1405
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1724
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition Utils.cpp:46
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition Utils.cpp:1683
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:434
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1748
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition Utils.h:349
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition Utils.cpp:468
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1909
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1709
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition Utils.cpp:500
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1281
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1598
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1464
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
Definition Utils.cpp:2046
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:610
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition Utils.cpp:222
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:903
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
#define MORE()
Definition regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition Utils.h:231
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:289
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:286
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition Utils.h:190