LLVM 20.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198 return true;
199}
200
203 // Give up if either DstReg or SrcReg is a physical register.
204 if (DstReg.isPhysical() || SrcReg.isPhysical())
205 return false;
206 // Give up if the types don't match.
207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208 return false;
209 // Replace if either DstReg has no constraints or the register
210 // constraints match.
211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213 return true;
214
215 // Otherwise match if the Src is already a regclass that is covered by the Dst
216 // RegBank.
217 return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
218 DstRBC.get<const RegisterBank *>()->covers(
219 *MRI.getRegClassOrNull(SrcReg));
220}
221
223 const MachineRegisterInfo &MRI) {
224 // FIXME: This logical is mostly duplicated with
225 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
226 // MachineInstr::isLabel?
227
228 // Don't delete frame allocation labels.
229 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
230 return false;
231 // LIFETIME markers should be preserved even if they seem dead.
232 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
233 MI.getOpcode() == TargetOpcode::LIFETIME_END)
234 return false;
235
236 // If we can move an instruction, we can remove it. Otherwise, it has
237 // a side-effect of some sort.
238 bool SawStore = false;
239 if (!MI.isSafeToMove(SawStore) && !MI.isPHI())
240 return false;
241
242 // Instructions without side-effects are dead iff they only define dead vregs.
243 for (const auto &MO : MI.all_defs()) {
244 Register Reg = MO.getReg();
245 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
246 return false;
247 }
248 return true;
249}
250
252 MachineFunction &MF,
253 const TargetPassConfig &TPC,
256 bool IsFatal = Severity == DS_Error &&
258 // Print the function name explicitly if we don't have a debug location (which
259 // makes the diagnostic less useful) or if we're going to emit a raw error.
260 if (!R.getLocation().isValid() || IsFatal)
261 R << (" (in function: " + MF.getName() + ")").str();
262
263 if (IsFatal)
264 report_fatal_error(Twine(R.getMsg()));
265 else
266 MORE.emit(R);
267}
268
273}
274
278 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
279 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
280}
281
284 const char *PassName, StringRef Msg,
285 const MachineInstr &MI) {
286 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
287 MI.getDebugLoc(), MI.getParent());
288 R << Msg;
289 // Printing MI is expensive; only do it if expensive remarks are enabled.
290 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
291 R << ": " << ore::MNV("Inst", MI);
292 reportGISelFailure(MF, TPC, MORE, R);
293}
294
295std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
296 const MachineRegisterInfo &MRI) {
297 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
298 VReg, MRI, /*LookThroughInstrs*/ false);
299 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
300 "Value found while looking through instrs");
301 if (!ValAndVReg)
302 return std::nullopt;
303 return ValAndVReg->Value;
304}
305
307 MachineInstr *Const = MRI.getVRegDef(Reg);
308 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
309 "expected a G_CONSTANT on Reg");
310 return Const->getOperand(1).getCImm()->getValue();
311}
312
313std::optional<int64_t>
315 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
316 if (Val && Val->getBitWidth() <= 64)
317 return Val->getSExtValue();
318 return std::nullopt;
319}
320
321namespace {
322
323// This function is used in many places, and as such, it has some
324// micro-optimizations to try and make it as fast as it can be.
325//
326// - We use template arguments to avoid an indirect call caused by passing a
327// function_ref/std::function
328// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
329// Instead it returns true/false and places the result in a pre-constructed
330// APInt.
331//
332// Please change this function carefully and benchmark your changes.
333template <bool (*IsConstantOpcode)(const MachineInstr *),
334 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
335std::optional<ValueAndVReg>
336getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
337 bool LookThroughInstrs = true,
338 bool LookThroughAnyExt = false) {
341
342 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
343 LookThroughInstrs) {
344 switch (MI->getOpcode()) {
345 case TargetOpcode::G_ANYEXT:
346 if (!LookThroughAnyExt)
347 return std::nullopt;
348 [[fallthrough]];
349 case TargetOpcode::G_TRUNC:
350 case TargetOpcode::G_SEXT:
351 case TargetOpcode::G_ZEXT:
352 SeenOpcodes.push_back(std::make_pair(
353 MI->getOpcode(),
354 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
355 VReg = MI->getOperand(1).getReg();
356 break;
357 case TargetOpcode::COPY:
358 VReg = MI->getOperand(1).getReg();
359 if (VReg.isPhysical())
360 return std::nullopt;
361 break;
362 case TargetOpcode::G_INTTOPTR:
363 VReg = MI->getOperand(1).getReg();
364 break;
365 default:
366 return std::nullopt;
367 }
368 }
369 if (!MI || !IsConstantOpcode(MI))
370 return std::nullopt;
371
372 APInt Val;
373 if (!GetAPCstValue(MI, Val))
374 return std::nullopt;
375 for (auto &Pair : reverse(SeenOpcodes)) {
376 switch (Pair.first) {
377 case TargetOpcode::G_TRUNC:
378 Val = Val.trunc(Pair.second);
379 break;
380 case TargetOpcode::G_ANYEXT:
381 case TargetOpcode::G_SEXT:
382 Val = Val.sext(Pair.second);
383 break;
384 case TargetOpcode::G_ZEXT:
385 Val = Val.zext(Pair.second);
386 break;
387 }
388 }
389
390 return ValueAndVReg{std::move(Val), VReg};
391}
392
393bool isIConstant(const MachineInstr *MI) {
394 if (!MI)
395 return false;
396 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
397}
398
399bool isFConstant(const MachineInstr *MI) {
400 if (!MI)
401 return false;
402 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
403}
404
405bool isAnyConstant(const MachineInstr *MI) {
406 if (!MI)
407 return false;
408 unsigned Opc = MI->getOpcode();
409 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
410}
411
412bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
413 const MachineOperand &CstVal = MI->getOperand(1);
414 if (!CstVal.isCImm())
415 return false;
416 Result = CstVal.getCImm()->getValue();
417 return true;
418}
419
420bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
421 const MachineOperand &CstVal = MI->getOperand(1);
422 if (CstVal.isCImm())
423 Result = CstVal.getCImm()->getValue();
424 else if (CstVal.isFPImm())
426 else
427 return false;
428 return true;
429}
430
431} // end anonymous namespace
432
434 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
435 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
436 VReg, MRI, LookThroughInstrs);
437}
438
440 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
441 bool LookThroughAnyExt) {
442 return getConstantVRegValWithLookThrough<isAnyConstant,
443 getCImmOrFPImmAsAPInt>(
444 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
445}
446
447std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
448 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
449 auto Reg =
450 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
451 VReg, MRI, LookThroughInstrs);
452 if (!Reg)
453 return std::nullopt;
455 Reg->VReg};
456}
457
458const ConstantFP *
460 MachineInstr *MI = MRI.getVRegDef(VReg);
461 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
462 return nullptr;
463 return MI->getOperand(1).getFPImm();
464}
465
466std::optional<DefinitionAndSourceRegister>
468 Register DefSrcReg = Reg;
469 auto *DefMI = MRI.getVRegDef(Reg);
470 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
471 if (!DstTy.isValid())
472 return std::nullopt;
473 unsigned Opc = DefMI->getOpcode();
474 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
475 Register SrcReg = DefMI->getOperand(1).getReg();
476 auto SrcTy = MRI.getType(SrcReg);
477 if (!SrcTy.isValid())
478 break;
479 DefMI = MRI.getVRegDef(SrcReg);
480 DefSrcReg = SrcReg;
481 Opc = DefMI->getOpcode();
482 }
483 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
484}
485
487 const MachineRegisterInfo &MRI) {
488 std::optional<DefinitionAndSourceRegister> DefSrcReg =
490 return DefSrcReg ? DefSrcReg->MI : nullptr;
491}
492
494 const MachineRegisterInfo &MRI) {
495 std::optional<DefinitionAndSourceRegister> DefSrcReg =
497 return DefSrcReg ? DefSrcReg->Reg : Register();
498}
499
500void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
502 MachineIRBuilder &MIRBuilder,
504 for (int i = 0; i < NumParts; ++i)
505 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
506 MIRBuilder.buildUnmerge(VRegs, Reg);
507}
508
509bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
511 SmallVectorImpl<Register> &LeftoverRegs,
512 MachineIRBuilder &MIRBuilder,
514 assert(!LeftoverTy.isValid() && "this is an out argument");
515
516 unsigned RegSize = RegTy.getSizeInBits();
517 unsigned MainSize = MainTy.getSizeInBits();
518 unsigned NumParts = RegSize / MainSize;
519 unsigned LeftoverSize = RegSize - NumParts * MainSize;
520
521 // Use an unmerge when possible.
522 if (LeftoverSize == 0) {
523 for (unsigned I = 0; I < NumParts; ++I)
524 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
525 MIRBuilder.buildUnmerge(VRegs, Reg);
526 return true;
527 }
528
529 // Try to use unmerge for irregular vector split where possible
530 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
531 // leftover, it becomes:
532 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
533 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
534 if (RegTy.isVector() && MainTy.isVector()) {
535 unsigned RegNumElts = RegTy.getNumElements();
536 unsigned MainNumElts = MainTy.getNumElements();
537 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
538 // If can unmerge to LeftoverTy, do it
539 if (MainNumElts % LeftoverNumElts == 0 &&
540 RegNumElts % LeftoverNumElts == 0 &&
541 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
542 LeftoverNumElts > 1) {
543 LeftoverTy =
544 LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
545
546 // Unmerge the SrcReg to LeftoverTy vectors
547 SmallVector<Register, 4> UnmergeValues;
548 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
549 MIRBuilder, MRI);
550
551 // Find how many LeftoverTy makes one MainTy
552 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
553 unsigned NumOfLeftoverVal =
554 ((RegNumElts % MainNumElts) / LeftoverNumElts);
555
556 // Create as many MainTy as possible using unmerged value
557 SmallVector<Register, 4> MergeValues;
558 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
559 MergeValues.push_back(UnmergeValues[I]);
560 if (MergeValues.size() == LeftoverPerMain) {
561 VRegs.push_back(
562 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
563 MergeValues.clear();
564 }
565 }
566 // Populate LeftoverRegs with the leftovers
567 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
568 I < UnmergeValues.size(); I++) {
569 LeftoverRegs.push_back(UnmergeValues[I]);
570 }
571 return true;
572 }
573 }
574 // Perform irregular split. Leftover is last element of RegPieces.
575 if (MainTy.isVector()) {
576 SmallVector<Register, 8> RegPieces;
577 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
578 MRI);
579 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
580 VRegs.push_back(RegPieces[i]);
581 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
582 LeftoverTy = MRI.getType(LeftoverRegs[0]);
583 return true;
584 }
585
586 LeftoverTy = LLT::scalar(LeftoverSize);
587 // For irregular sizes, extract the individual parts.
588 for (unsigned I = 0; I != NumParts; ++I) {
589 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
590 VRegs.push_back(NewReg);
591 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
592 }
593
594 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
595 Offset += LeftoverSize) {
596 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
597 LeftoverRegs.push_back(NewReg);
598 MIRBuilder.buildExtract(NewReg, Reg, Offset);
599 }
600
601 return true;
602}
603
604void llvm::extractVectorParts(Register Reg, unsigned NumElts,
606 MachineIRBuilder &MIRBuilder,
608 LLT RegTy = MRI.getType(Reg);
609 assert(RegTy.isVector() && "Expected a vector type");
610
611 LLT EltTy = RegTy.getElementType();
612 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
613 unsigned RegNumElts = RegTy.getNumElements();
614 unsigned LeftoverNumElts = RegNumElts % NumElts;
615 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
616
617 // Perfect split without leftover
618 if (LeftoverNumElts == 0)
619 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
620 MRI);
621
622 // Irregular split. Provide direct access to all elements for artifact
623 // combiner using unmerge to elements. Then build vectors with NumElts
624 // elements. Remaining element(s) will be (used to build vector) Leftover.
626 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
627
628 unsigned Offset = 0;
629 // Requested sub-vectors of NarrowTy.
630 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
631 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
632 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
633 }
634
635 // Leftover element(s).
636 if (LeftoverNumElts == 1) {
637 VRegs.push_back(Elts[Offset]);
638 } else {
639 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
640 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
641 VRegs.push_back(
642 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
643 }
644}
645
647 const MachineRegisterInfo &MRI) {
649 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
650}
651
652APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
653 if (Size == 32)
654 return APFloat(float(Val));
655 if (Size == 64)
656 return APFloat(Val);
657 if (Size != 16)
658 llvm_unreachable("Unsupported FPConstant size");
659 bool Ignored;
660 APFloat APF(Val);
661 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
662 return APF;
663}
664
665std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
666 const Register Op1,
667 const Register Op2,
668 const MachineRegisterInfo &MRI) {
669 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
670 if (!MaybeOp2Cst)
671 return std::nullopt;
672
673 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
674 if (!MaybeOp1Cst)
675 return std::nullopt;
676
677 const APInt &C1 = MaybeOp1Cst->Value;
678 const APInt &C2 = MaybeOp2Cst->Value;
679 switch (Opcode) {
680 default:
681 break;
682 case TargetOpcode::G_ADD:
683 return C1 + C2;
684 case TargetOpcode::G_PTR_ADD:
685 // Types can be of different width here.
686 // Result needs to be the same width as C1, so trunc or sext C2.
687 return C1 + C2.sextOrTrunc(C1.getBitWidth());
688 case TargetOpcode::G_AND:
689 return C1 & C2;
690 case TargetOpcode::G_ASHR:
691 return C1.ashr(C2);
692 case TargetOpcode::G_LSHR:
693 return C1.lshr(C2);
694 case TargetOpcode::G_MUL:
695 return C1 * C2;
696 case TargetOpcode::G_OR:
697 return C1 | C2;
698 case TargetOpcode::G_SHL:
699 return C1 << C2;
700 case TargetOpcode::G_SUB:
701 return C1 - C2;
702 case TargetOpcode::G_XOR:
703 return C1 ^ C2;
704 case TargetOpcode::G_UDIV:
705 if (!C2.getBoolValue())
706 break;
707 return C1.udiv(C2);
708 case TargetOpcode::G_SDIV:
709 if (!C2.getBoolValue())
710 break;
711 return C1.sdiv(C2);
712 case TargetOpcode::G_UREM:
713 if (!C2.getBoolValue())
714 break;
715 return C1.urem(C2);
716 case TargetOpcode::G_SREM:
717 if (!C2.getBoolValue())
718 break;
719 return C1.srem(C2);
720 case TargetOpcode::G_SMIN:
721 return APIntOps::smin(C1, C2);
722 case TargetOpcode::G_SMAX:
723 return APIntOps::smax(C1, C2);
724 case TargetOpcode::G_UMIN:
725 return APIntOps::umin(C1, C2);
726 case TargetOpcode::G_UMAX:
727 return APIntOps::umax(C1, C2);
728 }
729
730 return std::nullopt;
731}
732
733std::optional<APFloat>
734llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
735 const Register Op2, const MachineRegisterInfo &MRI) {
736 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
737 if (!Op2Cst)
738 return std::nullopt;
739
740 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
741 if (!Op1Cst)
742 return std::nullopt;
743
744 APFloat C1 = Op1Cst->getValueAPF();
745 const APFloat &C2 = Op2Cst->getValueAPF();
746 switch (Opcode) {
747 case TargetOpcode::G_FADD:
748 C1.add(C2, APFloat::rmNearestTiesToEven);
749 return C1;
750 case TargetOpcode::G_FSUB:
751 C1.subtract(C2, APFloat::rmNearestTiesToEven);
752 return C1;
753 case TargetOpcode::G_FMUL:
754 C1.multiply(C2, APFloat::rmNearestTiesToEven);
755 return C1;
756 case TargetOpcode::G_FDIV:
757 C1.divide(C2, APFloat::rmNearestTiesToEven);
758 return C1;
759 case TargetOpcode::G_FREM:
760 C1.mod(C2);
761 return C1;
762 case TargetOpcode::G_FCOPYSIGN:
763 C1.copySign(C2);
764 return C1;
765 case TargetOpcode::G_FMINNUM:
766 return minnum(C1, C2);
767 case TargetOpcode::G_FMAXNUM:
768 return maxnum(C1, C2);
769 case TargetOpcode::G_FMINIMUM:
770 return minimum(C1, C2);
771 case TargetOpcode::G_FMAXIMUM:
772 return maximum(C1, C2);
773 case TargetOpcode::G_FMINNUM_IEEE:
774 case TargetOpcode::G_FMAXNUM_IEEE:
775 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
776 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
777 // and currently there isn't a nice wrapper in APFloat for the version with
778 // correct snan handling.
779 break;
780 default:
781 break;
782 }
783
784 return std::nullopt;
785}
786
788llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
789 const Register Op2,
790 const MachineRegisterInfo &MRI) {
791 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
792 if (!SrcVec2)
793 return SmallVector<APInt>();
794
795 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
796 if (!SrcVec1)
797 return SmallVector<APInt>();
798
799 SmallVector<APInt> FoldedElements;
800 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
801 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
802 SrcVec2->getSourceReg(Idx), MRI);
803 if (!MaybeCst)
804 return SmallVector<APInt>();
805 FoldedElements.push_back(*MaybeCst);
806 }
807 return FoldedElements;
808}
809
811 bool SNaN) {
812 const MachineInstr *DefMI = MRI.getVRegDef(Val);
813 if (!DefMI)
814 return false;
815
816 const TargetMachine& TM = DefMI->getMF()->getTarget();
817 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
818 return true;
819
820 // If the value is a constant, we can obviously see if it is a NaN or not.
821 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
822 return !FPVal->getValueAPF().isNaN() ||
823 (SNaN && !FPVal->getValueAPF().isSignaling());
824 }
825
826 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
827 for (const auto &Op : DefMI->uses())
828 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
829 return false;
830 return true;
831 }
832
833 switch (DefMI->getOpcode()) {
834 default:
835 break;
836 case TargetOpcode::G_FADD:
837 case TargetOpcode::G_FSUB:
838 case TargetOpcode::G_FMUL:
839 case TargetOpcode::G_FDIV:
840 case TargetOpcode::G_FREM:
841 case TargetOpcode::G_FSIN:
842 case TargetOpcode::G_FCOS:
843 case TargetOpcode::G_FTAN:
844 case TargetOpcode::G_FACOS:
845 case TargetOpcode::G_FASIN:
846 case TargetOpcode::G_FATAN:
847 case TargetOpcode::G_FCOSH:
848 case TargetOpcode::G_FSINH:
849 case TargetOpcode::G_FTANH:
850 case TargetOpcode::G_FMA:
851 case TargetOpcode::G_FMAD:
852 if (SNaN)
853 return true;
854
855 // TODO: Need isKnownNeverInfinity
856 return false;
857 case TargetOpcode::G_FMINNUM_IEEE:
858 case TargetOpcode::G_FMAXNUM_IEEE: {
859 if (SNaN)
860 return true;
861 // This can return a NaN if either operand is an sNaN, or if both operands
862 // are NaN.
863 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
867 }
868 case TargetOpcode::G_FMINNUM:
869 case TargetOpcode::G_FMAXNUM: {
870 // Only one needs to be known not-nan, since it will be returned if the
871 // other ends up being one.
872 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
874 }
875 }
876
877 if (SNaN) {
878 // FP operations quiet. For now, just handle the ones inserted during
879 // legalization.
880 switch (DefMI->getOpcode()) {
881 case TargetOpcode::G_FPEXT:
882 case TargetOpcode::G_FPTRUNC:
883 case TargetOpcode::G_FCANONICALIZE:
884 return true;
885 default:
886 return false;
887 }
888 }
889
890 return false;
891}
892
894 const MachinePointerInfo &MPO) {
895 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
896 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
897 MachineFrameInfo &MFI = MF.getFrameInfo();
898 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
899 MPO.Offset);
900 }
901
902 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
903 const Module *M = MF.getFunction().getParent();
904 return V->getPointerAlignment(M->getDataLayout());
905 }
906
907 return Align(1);
908}
909
911 const TargetInstrInfo &TII,
912 MCRegister PhysReg,
913 const TargetRegisterClass &RC,
914 const DebugLoc &DL, LLT RegTy) {
915 MachineBasicBlock &EntryMBB = MF.front();
917 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
918 if (LiveIn) {
919 MachineInstr *Def = MRI.getVRegDef(LiveIn);
920 if (Def) {
921 // FIXME: Should the verifier check this is in the entry block?
922 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
923 return LiveIn;
924 }
925
926 // It's possible the incoming argument register and copy was added during
927 // lowering, but later deleted due to being/becoming dead. If this happens,
928 // re-insert the copy.
929 } else {
930 // The live in register was not present, so add it.
931 LiveIn = MF.addLiveIn(PhysReg, &RC);
932 if (RegTy.isValid())
933 MRI.setType(LiveIn, RegTy);
934 }
935
936 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
937 .addReg(PhysReg);
938 if (!EntryMBB.isLiveIn(PhysReg))
939 EntryMBB.addLiveIn(PhysReg);
940 return LiveIn;
941}
942
943std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
944 const Register Op1, uint64_t Imm,
945 const MachineRegisterInfo &MRI) {
946 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
947 if (MaybeOp1Cst) {
948 switch (Opcode) {
949 default:
950 break;
951 case TargetOpcode::G_SEXT_INREG: {
952 LLT Ty = MRI.getType(Op1);
953 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
954 }
955 }
956 }
957 return std::nullopt;
958}
959
960std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
961 const Register Op0,
962 const MachineRegisterInfo &MRI) {
963 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
964 if (!Val)
965 return Val;
966
967 const unsigned DstSize = DstTy.getScalarSizeInBits();
968
969 switch (Opcode) {
970 case TargetOpcode::G_SEXT:
971 return Val->sext(DstSize);
972 case TargetOpcode::G_ZEXT:
973 case TargetOpcode::G_ANYEXT:
974 // TODO: DAG considers target preference when constant folding any_extend.
975 return Val->zext(DstSize);
976 default:
977 break;
978 }
979
980 llvm_unreachable("unexpected cast opcode to constant fold");
981}
982
983std::optional<APFloat>
984llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
985 const MachineRegisterInfo &MRI) {
986 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
987 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
988 APFloat DstVal(getFltSemanticForLLT(DstTy));
989 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
990 APFloat::rmNearestTiesToEven);
991 return DstVal;
992 }
993 return std::nullopt;
994}
995
996std::optional<SmallVector<unsigned>>
998 std::function<unsigned(APInt)> CB) {
999 LLT Ty = MRI.getType(Src);
1000 SmallVector<unsigned> FoldedCTLZs;
1001 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
1002 auto MaybeCst = getIConstantVRegVal(R, MRI);
1003 if (!MaybeCst)
1004 return std::nullopt;
1005 return CB(*MaybeCst);
1006 };
1007 if (Ty.isVector()) {
1008 // Try to constant fold each element.
1009 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1010 if (!BV)
1011 return std::nullopt;
1012 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1013 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1014 FoldedCTLZs.emplace_back(*MaybeFold);
1015 continue;
1016 }
1017 return std::nullopt;
1018 }
1019 return FoldedCTLZs;
1020 }
1021 if (auto MaybeCst = tryFoldScalar(Src)) {
1022 FoldedCTLZs.emplace_back(*MaybeCst);
1023 return FoldedCTLZs;
1024 }
1025 return std::nullopt;
1026}
1027
1028std::optional<SmallVector<APInt>>
1029llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1030 const MachineRegisterInfo &MRI) {
1031 LLT Ty = MRI.getType(Op1);
1032 if (Ty != MRI.getType(Op2))
1033 return std::nullopt;
1034
1035 auto TryFoldScalar = [&MRI, Pred](Register LHS,
1036 Register RHS) -> std::optional<APInt> {
1037 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1038 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1039 if (!LHSCst || !RHSCst)
1040 return std::nullopt;
1041
1042 switch (Pred) {
1043 case CmpInst::Predicate::ICMP_EQ:
1044 return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
1045 case CmpInst::Predicate::ICMP_NE:
1046 return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
1047 case CmpInst::Predicate::ICMP_UGT:
1048 return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
1049 case CmpInst::Predicate::ICMP_UGE:
1050 return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
1051 case CmpInst::Predicate::ICMP_ULT:
1052 return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
1053 case CmpInst::Predicate::ICMP_ULE:
1054 return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
1055 case CmpInst::Predicate::ICMP_SGT:
1056 return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
1057 case CmpInst::Predicate::ICMP_SGE:
1058 return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
1059 case CmpInst::Predicate::ICMP_SLT:
1060 return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
1061 case CmpInst::Predicate::ICMP_SLE:
1062 return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
1063 default:
1064 return std::nullopt;
1065 }
1066 };
1067
1068 SmallVector<APInt> FoldedICmps;
1069
1070 if (Ty.isVector()) {
1071 // Try to constant fold each element.
1072 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1073 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1074 if (!BV1 || !BV2)
1075 return std::nullopt;
1076 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1077 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1078 if (auto MaybeFold =
1079 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1080 FoldedICmps.emplace_back(*MaybeFold);
1081 continue;
1082 }
1083 return std::nullopt;
1084 }
1085 return FoldedICmps;
1086 }
1087
1088 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1089 FoldedICmps.emplace_back(*MaybeCst);
1090 return FoldedICmps;
1091 }
1092
1093 return std::nullopt;
1094}
1095
1097 GISelKnownBits *KB) {
1098 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1100 if (!DefSrcReg)
1101 return false;
1102
1103 const MachineInstr &MI = *DefSrcReg->MI;
1104 const LLT Ty = MRI.getType(Reg);
1105
1106 switch (MI.getOpcode()) {
1107 case TargetOpcode::G_CONSTANT: {
1108 unsigned BitWidth = Ty.getScalarSizeInBits();
1109 const ConstantInt *CI = MI.getOperand(1).getCImm();
1110 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1111 }
1112 case TargetOpcode::G_SHL: {
1113 // A left-shift of a constant one will have exactly one bit set because
1114 // shifting the bit off the end is undefined.
1115
1116 // TODO: Constant splat
1117 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1118 if (*ConstLHS == 1)
1119 return true;
1120 }
1121
1122 break;
1123 }
1124 case TargetOpcode::G_LSHR: {
1125 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1126 if (ConstLHS->isSignMask())
1127 return true;
1128 }
1129
1130 break;
1131 }
1132 case TargetOpcode::G_BUILD_VECTOR: {
1133 // TODO: Probably should have a recursion depth guard since you could have
1134 // bitcasted vector elements.
1135 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1136 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1137 return false;
1138
1139 return true;
1140 }
1141 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1142 // Only handle constants since we would need to know if number of leading
1143 // zeros is greater than the truncation amount.
1144 const unsigned BitWidth = Ty.getScalarSizeInBits();
1145 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1146 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1147 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1148 return false;
1149 }
1150
1151 return true;
1152 }
1153 default:
1154 break;
1155 }
1156
1157 if (!KB)
1158 return false;
1159
1160 // More could be done here, though the above checks are enough
1161 // to handle some common cases.
1162
1163 // Fall back to computeKnownBits to catch other known cases.
1164 KnownBits Known = KB->getKnownBits(Reg);
1165 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1166}
1167
1170}
1171
1172LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1173 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1174 return OrigTy;
1175
1176 if (OrigTy.isVector() && TargetTy.isVector()) {
1177 LLT OrigElt = OrigTy.getElementType();
1178 LLT TargetElt = TargetTy.getElementType();
1179
1180 // TODO: The docstring for this function says the intention is to use this
1181 // function to build MERGE/UNMERGE instructions. It won't be the case that
1182 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1183 // could implement getLCMType between the two in the future if there was a
1184 // need, but it is not worth it now as this function should not be used in
1185 // that way.
1186 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1187 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1188 "getLCMType not implemented between fixed and scalable vectors.");
1189
1190 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1191 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1192 TargetTy.getElementCount().getKnownMinValue());
1193 // Prefer the original element type.
1195 TargetTy.getElementCount().getKnownMinValue());
1196 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1197 OrigTy.getElementType());
1198 }
1199 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1200 TargetTy.getSizeInBits().getKnownMinValue());
1201 return LLT::vector(
1202 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1203 OrigElt);
1204 }
1205
1206 // One type is scalar, one type is vector
1207 if (OrigTy.isVector() || TargetTy.isVector()) {
1208 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1209 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1210 LLT EltTy = VecTy.getElementType();
1211 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1212
1213 // Prefer scalar type from OrigTy.
1214 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1215 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1216
1217 // Different size scalars. Create vector with the same total size.
1218 // LCM will take fixed/scalable from VecTy.
1219 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1221 ScalarTy.getSizeInBits().getFixedValue());
1222 // Prefer type from OrigTy
1223 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1224 VecTy.getElementCount().isScalable()),
1225 OrigEltTy);
1226 }
1227
1228 // At this point, both types are scalars of different size
1229 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1230 TargetTy.getSizeInBits().getFixedValue());
1231 // Preserve pointer types.
1232 if (LCM == OrigTy.getSizeInBits())
1233 return OrigTy;
1234 if (LCM == TargetTy.getSizeInBits())
1235 return TargetTy;
1236 return LLT::scalar(LCM);
1237}
1238
1239LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1240
1241 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1242 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1244 "getCoverTy not implemented between fixed and scalable vectors.");
1245
1246 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1247 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1248 return getLCMType(OrigTy, TargetTy);
1249
1250 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1251 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1252 if (OrigTyNumElts % TargetTyNumElts == 0)
1253 return OrigTy;
1254
1255 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1257 OrigTy.getElementType());
1258}
1259
1260LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1261 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1262 return OrigTy;
1263
1264 if (OrigTy.isVector() && TargetTy.isVector()) {
1265 LLT OrigElt = OrigTy.getElementType();
1266
1267 // TODO: The docstring for this function says the intention is to use this
1268 // function to build MERGE/UNMERGE instructions. It won't be the case that
1269 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1270 // could implement getGCDType between the two in the future if there was a
1271 // need, but it is not worth it now as this function should not be used in
1272 // that way.
1273 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1274 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1275 "getGCDType not implemented between fixed and scalable vectors.");
1276
1277 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1278 TargetTy.getSizeInBits().getKnownMinValue());
1279 if (GCD == OrigElt.getSizeInBits())
1281 OrigElt);
1282
1283 // Cannot produce original element type, but both have vscale in common.
1284 if (GCD < OrigElt.getSizeInBits())
1286 GCD);
1287
1288 return LLT::vector(
1290 OrigTy.isScalable()),
1291 OrigElt);
1292 }
1293
1294 // If one type is vector and the element size matches the scalar size, then
1295 // the gcd is the scalar type.
1296 if (OrigTy.isVector() &&
1297 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1298 return OrigTy.getElementType();
1299 if (TargetTy.isVector() &&
1300 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1301 return OrigTy;
1302
1303 // At this point, both types are either scalars of different type or one is a
1304 // vector and one is a scalar. If both types are scalars, the GCD type is the
1305 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1306 // the GCD type is the GCD between the scalar and the vector element size.
1307 LLT OrigScalar = OrigTy.getScalarType();
1308 LLT TargetScalar = TargetTy.getScalarType();
1309 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1310 TargetScalar.getSizeInBits().getFixedValue());
1311 return LLT::scalar(GCD);
1312}
1313
1315 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1316 "Only G_SHUFFLE_VECTOR can have a splat index!");
1317 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1318 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1319
1320 // If all elements are undefined, this shuffle can be considered a splat.
1321 // Return 0 for better potential for callers to simplify.
1322 if (FirstDefinedIdx == Mask.end())
1323 return 0;
1324
1325 // Make sure all remaining elements are either undef or the same
1326 // as the first non-undef value.
1327 int SplatValue = *FirstDefinedIdx;
1328 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1329 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1330 return std::nullopt;
1331
1332 return SplatValue;
1333}
1334
1335static bool isBuildVectorOp(unsigned Opcode) {
1336 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1337 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1338}
1339
1340namespace {
1341
1342std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1343 const MachineRegisterInfo &MRI,
1344 bool AllowUndef) {
1346 if (!MI)
1347 return std::nullopt;
1348
1349 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1350 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1351 return std::nullopt;
1352
1353 std::optional<ValueAndVReg> SplatValAndReg;
1354 for (MachineOperand &Op : MI->uses()) {
1355 Register Element = Op.getReg();
1356 // If we have a G_CONCAT_VECTOR, we recursively look into the
1357 // vectors that we're concatenating to see if they're splats.
1358 auto ElementValAndReg =
1359 isConcatVectorsOp
1360 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1362
1363 // If AllowUndef, treat undef as value that will result in a constant splat.
1364 if (!ElementValAndReg) {
1365 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1366 continue;
1367 return std::nullopt;
1368 }
1369
1370 // Record splat value
1371 if (!SplatValAndReg)
1372 SplatValAndReg = ElementValAndReg;
1373
1374 // Different constant than the one already recorded, not a constant splat.
1375 if (SplatValAndReg->Value != ElementValAndReg->Value)
1376 return std::nullopt;
1377 }
1378
1379 return SplatValAndReg;
1380}
1381
1382} // end anonymous namespace
1383
1385 const MachineRegisterInfo &MRI,
1386 int64_t SplatValue, bool AllowUndef) {
1387 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1388 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1389 return false;
1390}
1391
1393 const MachineRegisterInfo &MRI,
1394 int64_t SplatValue, bool AllowUndef) {
1395 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1396 AllowUndef);
1397}
1398
1399std::optional<APInt>
1401 if (auto SplatValAndReg =
1402 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1403 if (std::optional<ValueAndVReg> ValAndVReg =
1404 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1405 return ValAndVReg->Value;
1406 }
1407
1408 return std::nullopt;
1409}
1410
1411std::optional<APInt>
1413 const MachineRegisterInfo &MRI) {
1414 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1415}
1416
1417std::optional<int64_t>
1419 const MachineRegisterInfo &MRI) {
1420 if (auto SplatValAndReg =
1421 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1422 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1423 return std::nullopt;
1424}
1425
1426std::optional<int64_t>
1428 const MachineRegisterInfo &MRI) {
1429 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1430}
1431
1432std::optional<FPValueAndVReg>
1434 bool AllowUndef) {
1435 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1436 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1437 return std::nullopt;
1438}
1439
1441 const MachineRegisterInfo &MRI,
1442 bool AllowUndef) {
1443 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1444}
1445
1447 const MachineRegisterInfo &MRI,
1448 bool AllowUndef) {
1449 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1450}
1451
1452std::optional<RegOrConstant>
1454 unsigned Opc = MI.getOpcode();
1455 if (!isBuildVectorOp(Opc))
1456 return std::nullopt;
1457 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1458 return RegOrConstant(*Splat);
1459 auto Reg = MI.getOperand(1).getReg();
1460 if (any_of(drop_begin(MI.operands(), 2),
1461 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1462 return std::nullopt;
1463 return RegOrConstant(Reg);
1464}
1465
1467 const MachineRegisterInfo &MRI,
1468 bool AllowFP = true,
1469 bool AllowOpaqueConstants = true) {
1470 switch (MI.getOpcode()) {
1471 case TargetOpcode::G_CONSTANT:
1472 case TargetOpcode::G_IMPLICIT_DEF:
1473 return true;
1474 case TargetOpcode::G_FCONSTANT:
1475 return AllowFP;
1476 case TargetOpcode::G_GLOBAL_VALUE:
1477 case TargetOpcode::G_FRAME_INDEX:
1478 case TargetOpcode::G_BLOCK_ADDR:
1479 case TargetOpcode::G_JUMP_TABLE:
1480 return AllowOpaqueConstants;
1481 default:
1482 return false;
1483 }
1484}
1485
1487 const MachineRegisterInfo &MRI) {
1488 Register Def = MI.getOperand(0).getReg();
1489 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1490 return true;
1491 GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1492 if (!BV)
1493 return false;
1494 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1496 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1497 continue;
1498 return false;
1499 }
1500 return true;
1501}
1502
1504 const MachineRegisterInfo &MRI,
1505 bool AllowFP, bool AllowOpaqueConstants) {
1506 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1507 return true;
1508
1509 if (!isBuildVectorOp(MI.getOpcode()))
1510 return false;
1511
1512 const unsigned NumOps = MI.getNumOperands();
1513 for (unsigned I = 1; I != NumOps; ++I) {
1514 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1515 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1516 return false;
1517 }
1518
1519 return true;
1520}
1521
1522std::optional<APInt>
1524 const MachineRegisterInfo &MRI) {
1525 Register Def = MI.getOperand(0).getReg();
1526 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1527 return C->Value;
1528 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1529 if (!MaybeCst)
1530 return std::nullopt;
1531 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1532 return APInt(ScalarSize, *MaybeCst, true);
1533}
1534
1536 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1537 switch (MI.getOpcode()) {
1538 case TargetOpcode::G_IMPLICIT_DEF:
1539 return AllowUndefs;
1540 case TargetOpcode::G_CONSTANT:
1541 return MI.getOperand(1).getCImm()->isNullValue();
1542 case TargetOpcode::G_FCONSTANT: {
1543 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1544 return FPImm->isZero() && !FPImm->isNegative();
1545 }
1546 default:
1547 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1548 return false;
1549 return isBuildVectorAllZeros(MI, MRI);
1550 }
1551}
1552
1554 const MachineRegisterInfo &MRI,
1555 bool AllowUndefs) {
1556 switch (MI.getOpcode()) {
1557 case TargetOpcode::G_IMPLICIT_DEF:
1558 return AllowUndefs;
1559 case TargetOpcode::G_CONSTANT:
1560 return MI.getOperand(1).getCImm()->isAllOnesValue();
1561 default:
1562 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1563 return false;
1564 return isBuildVectorAllOnes(MI, MRI);
1565 }
1566}
1567
1569 const MachineRegisterInfo &MRI, Register Reg,
1570 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1571
1572 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1573 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1574 return Match(nullptr);
1575
1576 // TODO: Also handle fconstant
1577 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1578 return Match(Def->getOperand(1).getCImm());
1579
1580 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1581 return false;
1582
1583 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1584 Register SrcElt = Def->getOperand(I).getReg();
1585 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1586 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1587 if (!Match(nullptr))
1588 return false;
1589 continue;
1590 }
1591
1592 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1593 !Match(SrcDef->getOperand(1).getCImm()))
1594 return false;
1595 }
1596
1597 return true;
1598}
1599
1600bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1601 bool IsFP) {
1602 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1603 case TargetLowering::UndefinedBooleanContent:
1604 return Val & 0x1;
1605 case TargetLowering::ZeroOrOneBooleanContent:
1606 return Val == 1;
1607 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1608 return Val == -1;
1609 }
1610 llvm_unreachable("Invalid boolean contents");
1611}
1612
1613bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1614 bool IsVector, bool IsFP) {
1615 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1616 case TargetLowering::UndefinedBooleanContent:
1617 return ~Val & 0x1;
1618 case TargetLowering::ZeroOrOneBooleanContent:
1619 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1620 return Val == 0;
1621 }
1622 llvm_unreachable("Invalid boolean contents");
1623}
1624
1625int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1626 bool IsFP) {
1627 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1628 case TargetLowering::UndefinedBooleanContent:
1629 case TargetLowering::ZeroOrOneBooleanContent:
1630 return 1;
1631 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1632 return -1;
1633 }
1634 llvm_unreachable("Invalid boolean contents");
1635}
1636
1639 const auto &F = MBB.getParent()->getFunction();
1640 return F.hasOptSize() || F.hasMinSize() ||
1642}
1643
1645 LostDebugLocObserver *LocObserver,
1646 SmallInstListTy &DeadInstChain) {
1647 for (MachineOperand &Op : MI.uses()) {
1648 if (Op.isReg() && Op.getReg().isVirtual())
1649 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1650 }
1651 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1652 DeadInstChain.remove(&MI);
1653 MI.eraseFromParent();
1654 if (LocObserver)
1655 LocObserver->checkpoint(false);
1656}
1657
1660 LostDebugLocObserver *LocObserver) {
1661 SmallInstListTy DeadInstChain;
1662 for (MachineInstr *MI : DeadInstrs)
1663 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1664
1665 while (!DeadInstChain.empty()) {
1666 MachineInstr *Inst = DeadInstChain.pop_back_val();
1667 if (!isTriviallyDead(*Inst, MRI))
1668 continue;
1669 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1670 }
1671}
1672
1674 LostDebugLocObserver *LocObserver) {
1675 return eraseInstrs({&MI}, MRI, LocObserver);
1676}
1677
1679 for (auto &Def : MI.defs()) {
1680 assert(Def.isReg() && "Must be a reg");
1681
1683 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1684 MachineInstr *DbgValue = MOUse.getParent();
1685 // Ignore partially formed DBG_VALUEs.
1686 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1687 DbgUsers.push_back(&MOUse);
1688 }
1689 }
1690
1691 if (!DbgUsers.empty()) {
1693 }
1694 }
1695}
1696
1698 switch (Opc) {
1699 case TargetOpcode::G_FABS:
1700 case TargetOpcode::G_FADD:
1701 case TargetOpcode::G_FCANONICALIZE:
1702 case TargetOpcode::G_FCEIL:
1703 case TargetOpcode::G_FCONSTANT:
1704 case TargetOpcode::G_FCOPYSIGN:
1705 case TargetOpcode::G_FCOS:
1706 case TargetOpcode::G_FDIV:
1707 case TargetOpcode::G_FEXP2:
1708 case TargetOpcode::G_FEXP:
1709 case TargetOpcode::G_FFLOOR:
1710 case TargetOpcode::G_FLOG10:
1711 case TargetOpcode::G_FLOG2:
1712 case TargetOpcode::G_FLOG:
1713 case TargetOpcode::G_FMA:
1714 case TargetOpcode::G_FMAD:
1715 case TargetOpcode::G_FMAXIMUM:
1716 case TargetOpcode::G_FMAXNUM:
1717 case TargetOpcode::G_FMAXNUM_IEEE:
1718 case TargetOpcode::G_FMINIMUM:
1719 case TargetOpcode::G_FMINNUM:
1720 case TargetOpcode::G_FMINNUM_IEEE:
1721 case TargetOpcode::G_FMUL:
1722 case TargetOpcode::G_FNEARBYINT:
1723 case TargetOpcode::G_FNEG:
1724 case TargetOpcode::G_FPEXT:
1725 case TargetOpcode::G_FPOW:
1726 case TargetOpcode::G_FPTRUNC:
1727 case TargetOpcode::G_FREM:
1728 case TargetOpcode::G_FRINT:
1729 case TargetOpcode::G_FSIN:
1730 case TargetOpcode::G_FTAN:
1731 case TargetOpcode::G_FACOS:
1732 case TargetOpcode::G_FASIN:
1733 case TargetOpcode::G_FATAN:
1734 case TargetOpcode::G_FCOSH:
1735 case TargetOpcode::G_FSINH:
1736 case TargetOpcode::G_FTANH:
1737 case TargetOpcode::G_FSQRT:
1738 case TargetOpcode::G_FSUB:
1739 case TargetOpcode::G_INTRINSIC_ROUND:
1740 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1741 case TargetOpcode::G_INTRINSIC_TRUNC:
1742 return true;
1743 default:
1744 return false;
1745 }
1746}
1747
1748/// Shifts return poison if shiftwidth is larger than the bitwidth.
1749static bool shiftAmountKnownInRange(Register ShiftAmount,
1750 const MachineRegisterInfo &MRI) {
1751 LLT Ty = MRI.getType(ShiftAmount);
1752
1753 if (Ty.isScalableVector())
1754 return false; // Can't tell, just return false to be safe
1755
1756 if (Ty.isScalar()) {
1757 std::optional<ValueAndVReg> Val =
1759 if (!Val)
1760 return false;
1761 return Val->Value.ult(Ty.getScalarSizeInBits());
1762 }
1763
1764 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1765 if (!BV)
1766 return false;
1767
1768 unsigned Sources = BV->getNumSources();
1769 for (unsigned I = 0; I < Sources; ++I) {
1770 std::optional<ValueAndVReg> Val =
1772 if (!Val)
1773 return false;
1774 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1775 return false;
1776 }
1777
1778 return true;
1779}
1780
1781namespace {
1782enum class UndefPoisonKind {
1783 PoisonOnly = (1 << 0),
1784 UndefOnly = (1 << 1),
1786};
1787}
1788
1790 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1791}
1792
1794 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1795}
1796
1798 bool ConsiderFlagsAndMetadata,
1799 UndefPoisonKind Kind) {
1800 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1801
1802 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1803 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1804 if (GMI->hasPoisonGeneratingFlags())
1805 return true;
1806
1807 // Check whether opcode is a poison/undef-generating operation.
1808 switch (RegDef->getOpcode()) {
1809 case TargetOpcode::G_BUILD_VECTOR:
1810 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1811 return false;
1812 case TargetOpcode::G_SHL:
1813 case TargetOpcode::G_ASHR:
1814 case TargetOpcode::G_LSHR:
1815 return includesPoison(Kind) &&
1817 case TargetOpcode::G_FPTOSI:
1818 case TargetOpcode::G_FPTOUI:
1819 // fptosi/ui yields poison if the resulting value does not fit in the
1820 // destination type.
1821 return true;
1822 case TargetOpcode::G_CTLZ:
1823 case TargetOpcode::G_CTTZ:
1824 case TargetOpcode::G_ABS:
1825 case TargetOpcode::G_CTPOP:
1826 case TargetOpcode::G_BSWAP:
1827 case TargetOpcode::G_BITREVERSE:
1828 case TargetOpcode::G_FSHL:
1829 case TargetOpcode::G_FSHR:
1830 case TargetOpcode::G_SMAX:
1831 case TargetOpcode::G_SMIN:
1832 case TargetOpcode::G_UMAX:
1833 case TargetOpcode::G_UMIN:
1834 case TargetOpcode::G_PTRMASK:
1835 case TargetOpcode::G_SADDO:
1836 case TargetOpcode::G_SSUBO:
1837 case TargetOpcode::G_UADDO:
1838 case TargetOpcode::G_USUBO:
1839 case TargetOpcode::G_SMULO:
1840 case TargetOpcode::G_UMULO:
1841 case TargetOpcode::G_SADDSAT:
1842 case TargetOpcode::G_UADDSAT:
1843 case TargetOpcode::G_SSUBSAT:
1844 case TargetOpcode::G_USUBSAT:
1845 return false;
1846 case TargetOpcode::G_SSHLSAT:
1847 case TargetOpcode::G_USHLSAT:
1848 return includesPoison(Kind) &&
1850 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1851 GInsertVectorElement *Insert = cast<GInsertVectorElement>(RegDef);
1852 if (includesPoison(Kind)) {
1853 std::optional<ValueAndVReg> Index =
1854 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1855 if (!Index)
1856 return true;
1857 LLT VecTy = MRI.getType(Insert->getVectorReg());
1858 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1859 }
1860 return false;
1861 }
1862 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1863 GExtractVectorElement *Extract = cast<GExtractVectorElement>(RegDef);
1864 if (includesPoison(Kind)) {
1865 std::optional<ValueAndVReg> Index =
1867 if (!Index)
1868 return true;
1869 LLT VecTy = MRI.getType(Extract->getVectorReg());
1870 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1871 }
1872 return false;
1873 }
1874 case TargetOpcode::G_SHUFFLE_VECTOR: {
1875 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1876 ArrayRef<int> Mask = Shuffle->getMask();
1877 return includesPoison(Kind) && is_contained(Mask, -1);
1878 }
1879 case TargetOpcode::G_FNEG:
1880 case TargetOpcode::G_PHI:
1881 case TargetOpcode::G_SELECT:
1882 case TargetOpcode::G_UREM:
1883 case TargetOpcode::G_SREM:
1884 case TargetOpcode::G_FREEZE:
1885 case TargetOpcode::G_ICMP:
1886 case TargetOpcode::G_FCMP:
1887 case TargetOpcode::G_FADD:
1888 case TargetOpcode::G_FSUB:
1889 case TargetOpcode::G_FMUL:
1890 case TargetOpcode::G_FDIV:
1891 case TargetOpcode::G_FREM:
1892 case TargetOpcode::G_PTR_ADD:
1893 return false;
1894 default:
1895 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1896 }
1897}
1898
1900 const MachineRegisterInfo &MRI,
1901 unsigned Depth,
1902 UndefPoisonKind Kind) {
1904 return false;
1905
1906 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1907
1908 switch (RegDef->getOpcode()) {
1909 case TargetOpcode::G_FREEZE:
1910 return true;
1911 case TargetOpcode::G_IMPLICIT_DEF:
1912 return !includesUndef(Kind);
1913 case TargetOpcode::G_CONSTANT:
1914 case TargetOpcode::G_FCONSTANT:
1915 return true;
1916 case TargetOpcode::G_BUILD_VECTOR: {
1917 GBuildVector *BV = cast<GBuildVector>(RegDef);
1918 unsigned NumSources = BV->getNumSources();
1919 for (unsigned I = 0; I < NumSources; ++I)
1921 Depth + 1, Kind))
1922 return false;
1923 return true;
1924 }
1925 case TargetOpcode::G_PHI: {
1926 GPhi *Phi = cast<GPhi>(RegDef);
1927 unsigned NumIncoming = Phi->getNumIncomingValues();
1928 for (unsigned I = 0; I < NumIncoming; ++I)
1929 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1930 Depth + 1, Kind))
1931 return false;
1932 return true;
1933 }
1934 default: {
1935 auto MOCheck = [&](const MachineOperand &MO) {
1936 if (!MO.isReg())
1937 return true;
1938 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1939 Kind);
1940 };
1941 return !::canCreateUndefOrPoison(Reg, MRI,
1942 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
1943 all_of(RegDef->uses(), MOCheck);
1944 }
1945 }
1946}
1947
1949 bool ConsiderFlagsAndMetadata) {
1950 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1952}
1953
1955 bool ConsiderFlagsAndMetadata = true) {
1956 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1958}
1959
1961 const MachineRegisterInfo &MRI,
1962 unsigned Depth) {
1963 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1965}
1966
1968 const MachineRegisterInfo &MRI,
1969 unsigned Depth) {
1970 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1972}
1973
1975 const MachineRegisterInfo &MRI,
1976 unsigned Depth) {
1977 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1979}
1980
1982 if (Ty.isVector())
1983 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1984 Ty.getElementCount());
1985 return IntegerType::get(C, Ty.getSizeInBits());
1986}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
basic Basic Alias true
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)
Definition: Utils.cpp:1797
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)
Definition: Utils.cpp:1899
static bool includesPoison(UndefPoisonKind Kind)
Definition: Utils.cpp:1789
static bool includesUndef(UndefPoisonKind Kind)
Definition: Utils.cpp:1793
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition: Utils.cpp:251
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition: Utils.cpp:1749
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)
Definition: Utils.cpp:1954
static bool isBuildVectorOp(unsigned Opcode)
Definition: Utils.cpp:1335
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition: Utils.cpp:1466
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Value * RHS
Value * LHS
BinaryOperator * Mul
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1125
void copySign(const APFloat &RHS)
Definition: APFloat.h:1219
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5337
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1107
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1098
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:1249
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1116
APInt bitcastToAPInt() const
Definition: APFloat.h:1266
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:1143
Class for arbitrary precision integers.
Definition: APInt.h:78
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1543
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1446
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1614
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:805
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1706
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:829
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
bool isNegative() const
Return true if the sign bit is set.
Definition: Constants.h:319
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:316
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:317
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
Definition: GISelWorkList.h:74
MachineInstr * pop_back_val()
bool empty() const
Definition: GISelWorkList.h:38
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Definition: GISelWorkList.h:83
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
Definition: LowLevelType.h:124
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:396
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:733
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:498
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Analysis providing profile information.
Represents a value which can be a Register or a constant.
Definition: Utils.h:395
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:258
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2195
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2200
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2205
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2210
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition: Utils.cpp:910
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1440
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition: Utils.cpp:1981
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:646
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:459
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:295
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:984
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1400
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1553
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:734
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1678
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition: Utils.cpp:997
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:943
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1453
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition: APFloat.h:1514
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition: Utils.cpp:1523
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1535
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:486
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition: Utils.cpp:1568
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition: Utils.cpp:1600
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:1172
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:314
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:665
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Returns true if the given block should be optimized for size.
Definition: Utils.cpp:1637
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Definition: APFloat.h:1475
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition: Utils.cpp:1503
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:201
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition: Utils.cpp:1644
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:275
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1029
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition: Utils.cpp:439
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1446
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition: Utils.cpp:788
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition: Utils.cpp:1433
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:960
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition: Utils.cpp:500
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1168
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:1239
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
Definition: APFloat.h:1461
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
Definition: TargetOpcodes.h:36
APInt getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:306
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition: Utils.cpp:447
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition: Utils.cpp:1613
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:652
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition: Utils.cpp:1384
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1673
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
@ DS_Warning
@ DS_Error
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition: Utils.cpp:46
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1625
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:433
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1697
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition: Utils.h:338
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition: Utils.cpp:467
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1658
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition: Utils.cpp:493
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:1260
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition: APFloat.h:1488
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1418
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition: Utils.cpp:604
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:222
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:893
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:269
#define MORE()
Definition: regcomp.c:252
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition: Utils.h:228
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:278
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:275
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition: Utils.h:187