LLVM 19.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198 return true;
199}
200
203 // Give up if either DstReg or SrcReg is a physical register.
204 if (DstReg.isPhysical() || SrcReg.isPhysical())
205 return false;
206 // Give up if the types don't match.
207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208 return false;
209 // Replace if either DstReg has no constraints or the register
210 // constraints match.
211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213 return true;
214
215 // Otherwise match if the Src is already a regclass that is covered by the Dst
216 // RegBank.
217 return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
218 DstRBC.get<const RegisterBank *>()->covers(
219 *MRI.getRegClassOrNull(SrcReg));
220}
221
223 const MachineRegisterInfo &MRI) {
224 // FIXME: This logical is mostly duplicated with
225 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
226 // MachineInstr::isLabel?
227
228 // Don't delete frame allocation labels.
229 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
230 return false;
231 // LIFETIME markers should be preserved even if they seem dead.
232 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
233 MI.getOpcode() == TargetOpcode::LIFETIME_END)
234 return false;
235
236 // If we can move an instruction, we can remove it. Otherwise, it has
237 // a side-effect of some sort.
238 bool SawStore = false;
239 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
240 return false;
241
242 // Instructions without side-effects are dead iff they only define dead vregs.
243 for (const auto &MO : MI.all_defs()) {
244 Register Reg = MO.getReg();
245 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
246 return false;
247 }
248 return true;
249}
250
252 MachineFunction &MF,
253 const TargetPassConfig &TPC,
256 bool IsFatal = Severity == DS_Error &&
258 // Print the function name explicitly if we don't have a debug location (which
259 // makes the diagnostic less useful) or if we're going to emit a raw error.
260 if (!R.getLocation().isValid() || IsFatal)
261 R << (" (in function: " + MF.getName() + ")").str();
262
263 if (IsFatal)
264 report_fatal_error(Twine(R.getMsg()));
265 else
266 MORE.emit(R);
267}
268
273}
274
278 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
279 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
280}
281
284 const char *PassName, StringRef Msg,
285 const MachineInstr &MI) {
286 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
287 MI.getDebugLoc(), MI.getParent());
288 R << Msg;
289 // Printing MI is expensive; only do it if expensive remarks are enabled.
290 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
291 R << ": " << ore::MNV("Inst", MI);
292 reportGISelFailure(MF, TPC, MORE, R);
293}
294
295std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
296 const MachineRegisterInfo &MRI) {
297 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
298 VReg, MRI, /*LookThroughInstrs*/ false);
299 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
300 "Value found while looking through instrs");
301 if (!ValAndVReg)
302 return std::nullopt;
303 return ValAndVReg->Value;
304}
305
306std::optional<int64_t>
308 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
309 if (Val && Val->getBitWidth() <= 64)
310 return Val->getSExtValue();
311 return std::nullopt;
312}
313
314namespace {
315
316// This function is used in many places, and as such, it has some
317// micro-optimizations to try and make it as fast as it can be.
318//
319// - We use template arguments to avoid an indirect call caused by passing a
320// function_ref/std::function
321// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
322// Instead it returns true/false and places the result in a pre-constructed
323// APInt.
324//
325// Please change this function carefully and benchmark your changes.
326template <bool (*IsConstantOpcode)(const MachineInstr *),
327 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
328std::optional<ValueAndVReg>
329getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
330 bool LookThroughInstrs = true,
331 bool LookThroughAnyExt = false) {
334
335 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
336 LookThroughInstrs) {
337 switch (MI->getOpcode()) {
338 case TargetOpcode::G_ANYEXT:
339 if (!LookThroughAnyExt)
340 return std::nullopt;
341 [[fallthrough]];
342 case TargetOpcode::G_TRUNC:
343 case TargetOpcode::G_SEXT:
344 case TargetOpcode::G_ZEXT:
345 SeenOpcodes.push_back(std::make_pair(
346 MI->getOpcode(),
347 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
348 VReg = MI->getOperand(1).getReg();
349 break;
350 case TargetOpcode::COPY:
351 VReg = MI->getOperand(1).getReg();
352 if (VReg.isPhysical())
353 return std::nullopt;
354 break;
355 case TargetOpcode::G_INTTOPTR:
356 VReg = MI->getOperand(1).getReg();
357 break;
358 default:
359 return std::nullopt;
360 }
361 }
362 if (!MI || !IsConstantOpcode(MI))
363 return std::nullopt;
364
365 APInt Val;
366 if (!GetAPCstValue(MI, Val))
367 return std::nullopt;
368 for (auto &Pair : reverse(SeenOpcodes)) {
369 switch (Pair.first) {
370 case TargetOpcode::G_TRUNC:
371 Val = Val.trunc(Pair.second);
372 break;
373 case TargetOpcode::G_ANYEXT:
374 case TargetOpcode::G_SEXT:
375 Val = Val.sext(Pair.second);
376 break;
377 case TargetOpcode::G_ZEXT:
378 Val = Val.zext(Pair.second);
379 break;
380 }
381 }
382
383 return ValueAndVReg{std::move(Val), VReg};
384}
385
386bool isIConstant(const MachineInstr *MI) {
387 if (!MI)
388 return false;
389 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
390}
391
392bool isFConstant(const MachineInstr *MI) {
393 if (!MI)
394 return false;
395 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
396}
397
398bool isAnyConstant(const MachineInstr *MI) {
399 if (!MI)
400 return false;
401 unsigned Opc = MI->getOpcode();
402 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
403}
404
405bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
406 const MachineOperand &CstVal = MI->getOperand(1);
407 if (!CstVal.isCImm())
408 return false;
409 Result = CstVal.getCImm()->getValue();
410 return true;
411}
412
413bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
414 const MachineOperand &CstVal = MI->getOperand(1);
415 if (CstVal.isCImm())
416 Result = CstVal.getCImm()->getValue();
417 else if (CstVal.isFPImm())
419 else
420 return false;
421 return true;
422}
423
424} // end anonymous namespace
425
427 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
428 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
429 VReg, MRI, LookThroughInstrs);
430}
431
433 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
434 bool LookThroughAnyExt) {
435 return getConstantVRegValWithLookThrough<isAnyConstant,
436 getCImmOrFPImmAsAPInt>(
437 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
438}
439
440std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
441 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
442 auto Reg =
443 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
444 VReg, MRI, LookThroughInstrs);
445 if (!Reg)
446 return std::nullopt;
448 Reg->VReg};
449}
450
451const ConstantFP *
453 MachineInstr *MI = MRI.getVRegDef(VReg);
454 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
455 return nullptr;
456 return MI->getOperand(1).getFPImm();
457}
458
459std::optional<DefinitionAndSourceRegister>
461 Register DefSrcReg = Reg;
462 auto *DefMI = MRI.getVRegDef(Reg);
463 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
464 if (!DstTy.isValid())
465 return std::nullopt;
466 unsigned Opc = DefMI->getOpcode();
467 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
468 Register SrcReg = DefMI->getOperand(1).getReg();
469 auto SrcTy = MRI.getType(SrcReg);
470 if (!SrcTy.isValid())
471 break;
472 DefMI = MRI.getVRegDef(SrcReg);
473 DefSrcReg = SrcReg;
474 Opc = DefMI->getOpcode();
475 }
476 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
477}
478
480 const MachineRegisterInfo &MRI) {
481 std::optional<DefinitionAndSourceRegister> DefSrcReg =
483 return DefSrcReg ? DefSrcReg->MI : nullptr;
484}
485
487 const MachineRegisterInfo &MRI) {
488 std::optional<DefinitionAndSourceRegister> DefSrcReg =
490 return DefSrcReg ? DefSrcReg->Reg : Register();
491}
492
493void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
495 MachineIRBuilder &MIRBuilder,
497 for (int i = 0; i < NumParts; ++i)
498 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
499 MIRBuilder.buildUnmerge(VRegs, Reg);
500}
501
502bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
504 SmallVectorImpl<Register> &LeftoverRegs,
505 MachineIRBuilder &MIRBuilder,
507 assert(!LeftoverTy.isValid() && "this is an out argument");
508
509 unsigned RegSize = RegTy.getSizeInBits();
510 unsigned MainSize = MainTy.getSizeInBits();
511 unsigned NumParts = RegSize / MainSize;
512 unsigned LeftoverSize = RegSize - NumParts * MainSize;
513
514 // Use an unmerge when possible.
515 if (LeftoverSize == 0) {
516 for (unsigned I = 0; I < NumParts; ++I)
517 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
518 MIRBuilder.buildUnmerge(VRegs, Reg);
519 return true;
520 }
521
522 // Try to use unmerge for irregular vector split where possible
523 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
524 // leftover, it becomes:
525 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
526 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
527 if (RegTy.isVector() && MainTy.isVector()) {
528 unsigned RegNumElts = RegTy.getNumElements();
529 unsigned MainNumElts = MainTy.getNumElements();
530 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
531 // If can unmerge to LeftoverTy, do it
532 if (MainNumElts % LeftoverNumElts == 0 &&
533 RegNumElts % LeftoverNumElts == 0 &&
534 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
535 LeftoverNumElts > 1) {
536 LeftoverTy =
537 LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
538
539 // Unmerge the SrcReg to LeftoverTy vectors
540 SmallVector<Register, 4> UnmergeValues;
541 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
542 MIRBuilder, MRI);
543
544 // Find how many LeftoverTy makes one MainTy
545 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
546 unsigned NumOfLeftoverVal =
547 ((RegNumElts % MainNumElts) / LeftoverNumElts);
548
549 // Create as many MainTy as possible using unmerged value
550 SmallVector<Register, 4> MergeValues;
551 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
552 MergeValues.push_back(UnmergeValues[I]);
553 if (MergeValues.size() == LeftoverPerMain) {
554 VRegs.push_back(
555 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
556 MergeValues.clear();
557 }
558 }
559 // Populate LeftoverRegs with the leftovers
560 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
561 I < UnmergeValues.size(); I++) {
562 LeftoverRegs.push_back(UnmergeValues[I]);
563 }
564 return true;
565 }
566 }
567 // Perform irregular split. Leftover is last element of RegPieces.
568 if (MainTy.isVector()) {
569 SmallVector<Register, 8> RegPieces;
570 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
571 MRI);
572 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
573 VRegs.push_back(RegPieces[i]);
574 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
575 LeftoverTy = MRI.getType(LeftoverRegs[0]);
576 return true;
577 }
578
579 LeftoverTy = LLT::scalar(LeftoverSize);
580 // For irregular sizes, extract the individual parts.
581 for (unsigned I = 0; I != NumParts; ++I) {
582 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
583 VRegs.push_back(NewReg);
584 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
585 }
586
587 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
588 Offset += LeftoverSize) {
589 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
590 LeftoverRegs.push_back(NewReg);
591 MIRBuilder.buildExtract(NewReg, Reg, Offset);
592 }
593
594 return true;
595}
596
597void llvm::extractVectorParts(Register Reg, unsigned NumElts,
599 MachineIRBuilder &MIRBuilder,
601 LLT RegTy = MRI.getType(Reg);
602 assert(RegTy.isVector() && "Expected a vector type");
603
604 LLT EltTy = RegTy.getElementType();
605 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
606 unsigned RegNumElts = RegTy.getNumElements();
607 unsigned LeftoverNumElts = RegNumElts % NumElts;
608 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
609
610 // Perfect split without leftover
611 if (LeftoverNumElts == 0)
612 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
613 MRI);
614
615 // Irregular split. Provide direct access to all elements for artifact
616 // combiner using unmerge to elements. Then build vectors with NumElts
617 // elements. Remaining element(s) will be (used to build vector) Leftover.
619 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
620
621 unsigned Offset = 0;
622 // Requested sub-vectors of NarrowTy.
623 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
624 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
625 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
626 }
627
628 // Leftover element(s).
629 if (LeftoverNumElts == 1) {
630 VRegs.push_back(Elts[Offset]);
631 } else {
632 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
633 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
634 VRegs.push_back(
635 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
636 }
637}
638
640 const MachineRegisterInfo &MRI) {
642 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
643}
644
645APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
646 if (Size == 32)
647 return APFloat(float(Val));
648 if (Size == 64)
649 return APFloat(Val);
650 if (Size != 16)
651 llvm_unreachable("Unsupported FPConstant size");
652 bool Ignored;
653 APFloat APF(Val);
654 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
655 return APF;
656}
657
658std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
659 const Register Op1,
660 const Register Op2,
661 const MachineRegisterInfo &MRI) {
662 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
663 if (!MaybeOp2Cst)
664 return std::nullopt;
665
666 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
667 if (!MaybeOp1Cst)
668 return std::nullopt;
669
670 const APInt &C1 = MaybeOp1Cst->Value;
671 const APInt &C2 = MaybeOp2Cst->Value;
672 switch (Opcode) {
673 default:
674 break;
675 case TargetOpcode::G_ADD:
676 return C1 + C2;
677 case TargetOpcode::G_PTR_ADD:
678 // Types can be of different width here.
679 // Result needs to be the same width as C1, so trunc or sext C2.
680 return C1 + C2.sextOrTrunc(C1.getBitWidth());
681 case TargetOpcode::G_AND:
682 return C1 & C2;
683 case TargetOpcode::G_ASHR:
684 return C1.ashr(C2);
685 case TargetOpcode::G_LSHR:
686 return C1.lshr(C2);
687 case TargetOpcode::G_MUL:
688 return C1 * C2;
689 case TargetOpcode::G_OR:
690 return C1 | C2;
691 case TargetOpcode::G_SHL:
692 return C1 << C2;
693 case TargetOpcode::G_SUB:
694 return C1 - C2;
695 case TargetOpcode::G_XOR:
696 return C1 ^ C2;
697 case TargetOpcode::G_UDIV:
698 if (!C2.getBoolValue())
699 break;
700 return C1.udiv(C2);
701 case TargetOpcode::G_SDIV:
702 if (!C2.getBoolValue())
703 break;
704 return C1.sdiv(C2);
705 case TargetOpcode::G_UREM:
706 if (!C2.getBoolValue())
707 break;
708 return C1.urem(C2);
709 case TargetOpcode::G_SREM:
710 if (!C2.getBoolValue())
711 break;
712 return C1.srem(C2);
713 case TargetOpcode::G_SMIN:
714 return APIntOps::smin(C1, C2);
715 case TargetOpcode::G_SMAX:
716 return APIntOps::smax(C1, C2);
717 case TargetOpcode::G_UMIN:
718 return APIntOps::umin(C1, C2);
719 case TargetOpcode::G_UMAX:
720 return APIntOps::umax(C1, C2);
721 }
722
723 return std::nullopt;
724}
725
726std::optional<APFloat>
727llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
728 const Register Op2, const MachineRegisterInfo &MRI) {
729 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
730 if (!Op2Cst)
731 return std::nullopt;
732
733 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
734 if (!Op1Cst)
735 return std::nullopt;
736
737 APFloat C1 = Op1Cst->getValueAPF();
738 const APFloat &C2 = Op2Cst->getValueAPF();
739 switch (Opcode) {
740 case TargetOpcode::G_FADD:
741 C1.add(C2, APFloat::rmNearestTiesToEven);
742 return C1;
743 case TargetOpcode::G_FSUB:
744 C1.subtract(C2, APFloat::rmNearestTiesToEven);
745 return C1;
746 case TargetOpcode::G_FMUL:
747 C1.multiply(C2, APFloat::rmNearestTiesToEven);
748 return C1;
749 case TargetOpcode::G_FDIV:
750 C1.divide(C2, APFloat::rmNearestTiesToEven);
751 return C1;
752 case TargetOpcode::G_FREM:
753 C1.mod(C2);
754 return C1;
755 case TargetOpcode::G_FCOPYSIGN:
756 C1.copySign(C2);
757 return C1;
758 case TargetOpcode::G_FMINNUM:
759 return minnum(C1, C2);
760 case TargetOpcode::G_FMAXNUM:
761 return maxnum(C1, C2);
762 case TargetOpcode::G_FMINIMUM:
763 return minimum(C1, C2);
764 case TargetOpcode::G_FMAXIMUM:
765 return maximum(C1, C2);
766 case TargetOpcode::G_FMINNUM_IEEE:
767 case TargetOpcode::G_FMAXNUM_IEEE:
768 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
769 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
770 // and currently there isn't a nice wrapper in APFloat for the version with
771 // correct snan handling.
772 break;
773 default:
774 break;
775 }
776
777 return std::nullopt;
778}
779
781llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
782 const Register Op2,
783 const MachineRegisterInfo &MRI) {
784 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
785 if (!SrcVec2)
786 return SmallVector<APInt>();
787
788 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
789 if (!SrcVec1)
790 return SmallVector<APInt>();
791
792 SmallVector<APInt> FoldedElements;
793 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
794 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
795 SrcVec2->getSourceReg(Idx), MRI);
796 if (!MaybeCst)
797 return SmallVector<APInt>();
798 FoldedElements.push_back(*MaybeCst);
799 }
800 return FoldedElements;
801}
802
804 bool SNaN) {
805 const MachineInstr *DefMI = MRI.getVRegDef(Val);
806 if (!DefMI)
807 return false;
808
809 const TargetMachine& TM = DefMI->getMF()->getTarget();
810 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
811 return true;
812
813 // If the value is a constant, we can obviously see if it is a NaN or not.
814 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
815 return !FPVal->getValueAPF().isNaN() ||
816 (SNaN && !FPVal->getValueAPF().isSignaling());
817 }
818
819 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
820 for (const auto &Op : DefMI->uses())
821 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
822 return false;
823 return true;
824 }
825
826 switch (DefMI->getOpcode()) {
827 default:
828 break;
829 case TargetOpcode::G_FADD:
830 case TargetOpcode::G_FSUB:
831 case TargetOpcode::G_FMUL:
832 case TargetOpcode::G_FDIV:
833 case TargetOpcode::G_FREM:
834 case TargetOpcode::G_FSIN:
835 case TargetOpcode::G_FCOS:
836 case TargetOpcode::G_FTAN:
837 case TargetOpcode::G_FACOS:
838 case TargetOpcode::G_FASIN:
839 case TargetOpcode::G_FATAN:
840 case TargetOpcode::G_FCOSH:
841 case TargetOpcode::G_FSINH:
842 case TargetOpcode::G_FTANH:
843 case TargetOpcode::G_FMA:
844 case TargetOpcode::G_FMAD:
845 if (SNaN)
846 return true;
847
848 // TODO: Need isKnownNeverInfinity
849 return false;
850 case TargetOpcode::G_FMINNUM_IEEE:
851 case TargetOpcode::G_FMAXNUM_IEEE: {
852 if (SNaN)
853 return true;
854 // This can return a NaN if either operand is an sNaN, or if both operands
855 // are NaN.
856 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
860 }
861 case TargetOpcode::G_FMINNUM:
862 case TargetOpcode::G_FMAXNUM: {
863 // Only one needs to be known not-nan, since it will be returned if the
864 // other ends up being one.
865 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
867 }
868 }
869
870 if (SNaN) {
871 // FP operations quiet. For now, just handle the ones inserted during
872 // legalization.
873 switch (DefMI->getOpcode()) {
874 case TargetOpcode::G_FPEXT:
875 case TargetOpcode::G_FPTRUNC:
876 case TargetOpcode::G_FCANONICALIZE:
877 return true;
878 default:
879 return false;
880 }
881 }
882
883 return false;
884}
885
887 const MachinePointerInfo &MPO) {
888 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
889 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
890 MachineFrameInfo &MFI = MF.getFrameInfo();
891 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
892 MPO.Offset);
893 }
894
895 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
896 const Module *M = MF.getFunction().getParent();
897 return V->getPointerAlignment(M->getDataLayout());
898 }
899
900 return Align(1);
901}
902
904 const TargetInstrInfo &TII,
905 MCRegister PhysReg,
906 const TargetRegisterClass &RC,
907 const DebugLoc &DL, LLT RegTy) {
908 MachineBasicBlock &EntryMBB = MF.front();
910 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
911 if (LiveIn) {
912 MachineInstr *Def = MRI.getVRegDef(LiveIn);
913 if (Def) {
914 // FIXME: Should the verifier check this is in the entry block?
915 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
916 return LiveIn;
917 }
918
919 // It's possible the incoming argument register and copy was added during
920 // lowering, but later deleted due to being/becoming dead. If this happens,
921 // re-insert the copy.
922 } else {
923 // The live in register was not present, so add it.
924 LiveIn = MF.addLiveIn(PhysReg, &RC);
925 if (RegTy.isValid())
926 MRI.setType(LiveIn, RegTy);
927 }
928
929 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
930 .addReg(PhysReg);
931 if (!EntryMBB.isLiveIn(PhysReg))
932 EntryMBB.addLiveIn(PhysReg);
933 return LiveIn;
934}
935
936std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
937 const Register Op1, uint64_t Imm,
938 const MachineRegisterInfo &MRI) {
939 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
940 if (MaybeOp1Cst) {
941 switch (Opcode) {
942 default:
943 break;
944 case TargetOpcode::G_SEXT_INREG: {
945 LLT Ty = MRI.getType(Op1);
946 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
947 }
948 }
949 }
950 return std::nullopt;
951}
952
953std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
954 const Register Op0,
955 const MachineRegisterInfo &MRI) {
956 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
957 if (!Val)
958 return Val;
959
960 const unsigned DstSize = DstTy.getScalarSizeInBits();
961
962 switch (Opcode) {
963 case TargetOpcode::G_SEXT:
964 return Val->sext(DstSize);
965 case TargetOpcode::G_ZEXT:
966 case TargetOpcode::G_ANYEXT:
967 // TODO: DAG considers target preference when constant folding any_extend.
968 return Val->zext(DstSize);
969 default:
970 break;
971 }
972
973 llvm_unreachable("unexpected cast opcode to constant fold");
974}
975
976std::optional<APFloat>
977llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
978 const MachineRegisterInfo &MRI) {
979 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
980 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
981 APFloat DstVal(getFltSemanticForLLT(DstTy));
982 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
983 APFloat::rmNearestTiesToEven);
984 return DstVal;
985 }
986 return std::nullopt;
987}
988
989std::optional<SmallVector<unsigned>>
991 std::function<unsigned(APInt)> CB) {
992 LLT Ty = MRI.getType(Src);
993 SmallVector<unsigned> FoldedCTLZs;
994 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
995 auto MaybeCst = getIConstantVRegVal(R, MRI);
996 if (!MaybeCst)
997 return std::nullopt;
998 return CB(*MaybeCst);
999 };
1000 if (Ty.isVector()) {
1001 // Try to constant fold each element.
1002 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1003 if (!BV)
1004 return std::nullopt;
1005 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1006 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1007 FoldedCTLZs.emplace_back(*MaybeFold);
1008 continue;
1009 }
1010 return std::nullopt;
1011 }
1012 return FoldedCTLZs;
1013 }
1014 if (auto MaybeCst = tryFoldScalar(Src)) {
1015 FoldedCTLZs.emplace_back(*MaybeCst);
1016 return FoldedCTLZs;
1017 }
1018 return std::nullopt;
1019}
1020
1021std::optional<SmallVector<APInt>>
1022llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1023 const MachineRegisterInfo &MRI) {
1024 LLT Ty = MRI.getType(Op1);
1025 if (Ty != MRI.getType(Op2))
1026 return std::nullopt;
1027
1028 auto TryFoldScalar = [&MRI, Pred](Register LHS,
1029 Register RHS) -> std::optional<APInt> {
1030 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1031 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1032 if (!LHSCst || !RHSCst)
1033 return std::nullopt;
1034
1035 switch (Pred) {
1036 case CmpInst::Predicate::ICMP_EQ:
1037 return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
1038 case CmpInst::Predicate::ICMP_NE:
1039 return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
1040 case CmpInst::Predicate::ICMP_UGT:
1041 return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
1042 case CmpInst::Predicate::ICMP_UGE:
1043 return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
1044 case CmpInst::Predicate::ICMP_ULT:
1045 return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
1046 case CmpInst::Predicate::ICMP_ULE:
1047 return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
1048 case CmpInst::Predicate::ICMP_SGT:
1049 return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
1050 case CmpInst::Predicate::ICMP_SGE:
1051 return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
1052 case CmpInst::Predicate::ICMP_SLT:
1053 return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
1054 case CmpInst::Predicate::ICMP_SLE:
1055 return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
1056 default:
1057 return std::nullopt;
1058 }
1059 };
1060
1061 SmallVector<APInt> FoldedICmps;
1062
1063 if (Ty.isVector()) {
1064 // Try to constant fold each element.
1065 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1066 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1067 if (!BV1 || !BV2)
1068 return std::nullopt;
1069 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1070 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1071 if (auto MaybeFold =
1072 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1073 FoldedICmps.emplace_back(*MaybeFold);
1074 continue;
1075 }
1076 return std::nullopt;
1077 }
1078 return FoldedICmps;
1079 }
1080
1081 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1082 FoldedICmps.emplace_back(*MaybeCst);
1083 return FoldedICmps;
1084 }
1085
1086 return std::nullopt;
1087}
1088
1090 GISelKnownBits *KB) {
1091 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1093 if (!DefSrcReg)
1094 return false;
1095
1096 const MachineInstr &MI = *DefSrcReg->MI;
1097 const LLT Ty = MRI.getType(Reg);
1098
1099 switch (MI.getOpcode()) {
1100 case TargetOpcode::G_CONSTANT: {
1101 unsigned BitWidth = Ty.getScalarSizeInBits();
1102 const ConstantInt *CI = MI.getOperand(1).getCImm();
1103 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1104 }
1105 case TargetOpcode::G_SHL: {
1106 // A left-shift of a constant one will have exactly one bit set because
1107 // shifting the bit off the end is undefined.
1108
1109 // TODO: Constant splat
1110 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1111 if (*ConstLHS == 1)
1112 return true;
1113 }
1114
1115 break;
1116 }
1117 case TargetOpcode::G_LSHR: {
1118 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1119 if (ConstLHS->isSignMask())
1120 return true;
1121 }
1122
1123 break;
1124 }
1125 case TargetOpcode::G_BUILD_VECTOR: {
1126 // TODO: Probably should have a recursion depth guard since you could have
1127 // bitcasted vector elements.
1128 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1129 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1130 return false;
1131
1132 return true;
1133 }
1134 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1135 // Only handle constants since we would need to know if number of leading
1136 // zeros is greater than the truncation amount.
1137 const unsigned BitWidth = Ty.getScalarSizeInBits();
1138 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1139 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1140 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1141 return false;
1142 }
1143
1144 return true;
1145 }
1146 default:
1147 break;
1148 }
1149
1150 if (!KB)
1151 return false;
1152
1153 // More could be done here, though the above checks are enough
1154 // to handle some common cases.
1155
1156 // Fall back to computeKnownBits to catch other known cases.
1157 KnownBits Known = KB->getKnownBits(Reg);
1158 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1159}
1160
1163}
1164
1165LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1166 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1167 return OrigTy;
1168
1169 if (OrigTy.isVector() && TargetTy.isVector()) {
1170 LLT OrigElt = OrigTy.getElementType();
1171 LLT TargetElt = TargetTy.getElementType();
1172
1173 // TODO: The docstring for this function says the intention is to use this
1174 // function to build MERGE/UNMERGE instructions. It won't be the case that
1175 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1176 // could implement getLCMType between the two in the future if there was a
1177 // need, but it is not worth it now as this function should not be used in
1178 // that way.
1179 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1180 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1181 "getLCMType not implemented between fixed and scalable vectors.");
1182
1183 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1184 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1185 TargetTy.getElementCount().getKnownMinValue());
1186 // Prefer the original element type.
1188 TargetTy.getElementCount().getKnownMinValue());
1189 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1190 OrigTy.getElementType());
1191 }
1192 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1193 TargetTy.getSizeInBits().getKnownMinValue());
1194 return LLT::vector(
1195 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1196 OrigElt);
1197 }
1198
1199 // One type is scalar, one type is vector
1200 if (OrigTy.isVector() || TargetTy.isVector()) {
1201 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1202 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1203 LLT EltTy = VecTy.getElementType();
1204 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1205
1206 // Prefer scalar type from OrigTy.
1207 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1208 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1209
1210 // Different size scalars. Create vector with the same total size.
1211 // LCM will take fixed/scalable from VecTy.
1212 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1214 ScalarTy.getSizeInBits().getFixedValue());
1215 // Prefer type from OrigTy
1216 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1217 VecTy.getElementCount().isScalable()),
1218 OrigEltTy);
1219 }
1220
1221 // At this point, both types are scalars of different size
1222 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1223 TargetTy.getSizeInBits().getFixedValue());
1224 // Preserve pointer types.
1225 if (LCM == OrigTy.getSizeInBits())
1226 return OrigTy;
1227 if (LCM == TargetTy.getSizeInBits())
1228 return TargetTy;
1229 return LLT::scalar(LCM);
1230}
1231
1232LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1233
1234 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1235 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1237 "getCoverTy not implemented between fixed and scalable vectors.");
1238
1239 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1240 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1241 return getLCMType(OrigTy, TargetTy);
1242
1243 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1244 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1245 if (OrigTyNumElts % TargetTyNumElts == 0)
1246 return OrigTy;
1247
1248 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1250 OrigTy.getElementType());
1251}
1252
1253LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1254 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1255 return OrigTy;
1256
1257 if (OrigTy.isVector() && TargetTy.isVector()) {
1258 LLT OrigElt = OrigTy.getElementType();
1259
1260 // TODO: The docstring for this function says the intention is to use this
1261 // function to build MERGE/UNMERGE instructions. It won't be the case that
1262 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1263 // could implement getGCDType between the two in the future if there was a
1264 // need, but it is not worth it now as this function should not be used in
1265 // that way.
1266 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1267 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1268 "getGCDType not implemented between fixed and scalable vectors.");
1269
1270 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1271 TargetTy.getSizeInBits().getKnownMinValue());
1272 if (GCD == OrigElt.getSizeInBits())
1274 OrigElt);
1275
1276 // Cannot produce original element type, but both have vscale in common.
1277 if (GCD < OrigElt.getSizeInBits())
1279 GCD);
1280
1281 return LLT::vector(
1283 OrigTy.isScalable()),
1284 OrigElt);
1285 }
1286
1287 // If one type is vector and the element size matches the scalar size, then
1288 // the gcd is the scalar type.
1289 if (OrigTy.isVector() &&
1290 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1291 return OrigTy.getElementType();
1292 if (TargetTy.isVector() &&
1293 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1294 return OrigTy;
1295
1296 // At this point, both types are either scalars of different type or one is a
1297 // vector and one is a scalar. If both types are scalars, the GCD type is the
1298 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1299 // the GCD type is the GCD between the scalar and the vector element size.
1300 LLT OrigScalar = OrigTy.getScalarType();
1301 LLT TargetScalar = TargetTy.getScalarType();
1302 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1303 TargetScalar.getSizeInBits().getFixedValue());
1304 return LLT::scalar(GCD);
1305}
1306
1308 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1309 "Only G_SHUFFLE_VECTOR can have a splat index!");
1310 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1311 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1312
1313 // If all elements are undefined, this shuffle can be considered a splat.
1314 // Return 0 for better potential for callers to simplify.
1315 if (FirstDefinedIdx == Mask.end())
1316 return 0;
1317
1318 // Make sure all remaining elements are either undef or the same
1319 // as the first non-undef value.
1320 int SplatValue = *FirstDefinedIdx;
1321 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1322 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1323 return std::nullopt;
1324
1325 return SplatValue;
1326}
1327
1328static bool isBuildVectorOp(unsigned Opcode) {
1329 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1330 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1331}
1332
1333namespace {
1334
1335std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1336 const MachineRegisterInfo &MRI,
1337 bool AllowUndef) {
1339 if (!MI)
1340 return std::nullopt;
1341
1342 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1343 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1344 return std::nullopt;
1345
1346 std::optional<ValueAndVReg> SplatValAndReg;
1347 for (MachineOperand &Op : MI->uses()) {
1348 Register Element = Op.getReg();
1349 // If we have a G_CONCAT_VECTOR, we recursively look into the
1350 // vectors that we're concatenating to see if they're splats.
1351 auto ElementValAndReg =
1352 isConcatVectorsOp
1353 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1355
1356 // If AllowUndef, treat undef as value that will result in a constant splat.
1357 if (!ElementValAndReg) {
1358 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1359 continue;
1360 return std::nullopt;
1361 }
1362
1363 // Record splat value
1364 if (!SplatValAndReg)
1365 SplatValAndReg = ElementValAndReg;
1366
1367 // Different constant than the one already recorded, not a constant splat.
1368 if (SplatValAndReg->Value != ElementValAndReg->Value)
1369 return std::nullopt;
1370 }
1371
1372 return SplatValAndReg;
1373}
1374
1375} // end anonymous namespace
1376
1378 const MachineRegisterInfo &MRI,
1379 int64_t SplatValue, bool AllowUndef) {
1380 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1381 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1382 return false;
1383}
1384
1386 const MachineRegisterInfo &MRI,
1387 int64_t SplatValue, bool AllowUndef) {
1388 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1389 AllowUndef);
1390}
1391
1392std::optional<APInt>
1394 if (auto SplatValAndReg =
1395 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1396 if (std::optional<ValueAndVReg> ValAndVReg =
1397 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1398 return ValAndVReg->Value;
1399 }
1400
1401 return std::nullopt;
1402}
1403
1404std::optional<APInt>
1406 const MachineRegisterInfo &MRI) {
1407 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1408}
1409
1410std::optional<int64_t>
1412 const MachineRegisterInfo &MRI) {
1413 if (auto SplatValAndReg =
1414 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1415 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1416 return std::nullopt;
1417}
1418
1419std::optional<int64_t>
1421 const MachineRegisterInfo &MRI) {
1422 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1423}
1424
1425std::optional<FPValueAndVReg>
1427 bool AllowUndef) {
1428 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1429 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1430 return std::nullopt;
1431}
1432
1434 const MachineRegisterInfo &MRI,
1435 bool AllowUndef) {
1436 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1437}
1438
1440 const MachineRegisterInfo &MRI,
1441 bool AllowUndef) {
1442 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1443}
1444
1445std::optional<RegOrConstant>
1447 unsigned Opc = MI.getOpcode();
1448 if (!isBuildVectorOp(Opc))
1449 return std::nullopt;
1450 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1451 return RegOrConstant(*Splat);
1452 auto Reg = MI.getOperand(1).getReg();
1453 if (any_of(drop_begin(MI.operands(), 2),
1454 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1455 return std::nullopt;
1456 return RegOrConstant(Reg);
1457}
1458
1460 const MachineRegisterInfo &MRI,
1461 bool AllowFP = true,
1462 bool AllowOpaqueConstants = true) {
1463 switch (MI.getOpcode()) {
1464 case TargetOpcode::G_CONSTANT:
1465 case TargetOpcode::G_IMPLICIT_DEF:
1466 return true;
1467 case TargetOpcode::G_FCONSTANT:
1468 return AllowFP;
1469 case TargetOpcode::G_GLOBAL_VALUE:
1470 case TargetOpcode::G_FRAME_INDEX:
1471 case TargetOpcode::G_BLOCK_ADDR:
1472 case TargetOpcode::G_JUMP_TABLE:
1473 return AllowOpaqueConstants;
1474 default:
1475 return false;
1476 }
1477}
1478
1480 const MachineRegisterInfo &MRI) {
1481 Register Def = MI.getOperand(0).getReg();
1482 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1483 return true;
1484 GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1485 if (!BV)
1486 return false;
1487 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1489 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1490 continue;
1491 return false;
1492 }
1493 return true;
1494}
1495
1497 const MachineRegisterInfo &MRI,
1498 bool AllowFP, bool AllowOpaqueConstants) {
1499 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1500 return true;
1501
1502 if (!isBuildVectorOp(MI.getOpcode()))
1503 return false;
1504
1505 const unsigned NumOps = MI.getNumOperands();
1506 for (unsigned I = 1; I != NumOps; ++I) {
1507 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1508 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1509 return false;
1510 }
1511
1512 return true;
1513}
1514
1515std::optional<APInt>
1517 const MachineRegisterInfo &MRI) {
1518 Register Def = MI.getOperand(0).getReg();
1519 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1520 return C->Value;
1521 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1522 if (!MaybeCst)
1523 return std::nullopt;
1524 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1525 return APInt(ScalarSize, *MaybeCst, true);
1526}
1527
1529 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1530 switch (MI.getOpcode()) {
1531 case TargetOpcode::G_IMPLICIT_DEF:
1532 return AllowUndefs;
1533 case TargetOpcode::G_CONSTANT:
1534 return MI.getOperand(1).getCImm()->isNullValue();
1535 case TargetOpcode::G_FCONSTANT: {
1536 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1537 return FPImm->isZero() && !FPImm->isNegative();
1538 }
1539 default:
1540 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1541 return false;
1542 return isBuildVectorAllZeros(MI, MRI);
1543 }
1544}
1545
1547 const MachineRegisterInfo &MRI,
1548 bool AllowUndefs) {
1549 switch (MI.getOpcode()) {
1550 case TargetOpcode::G_IMPLICIT_DEF:
1551 return AllowUndefs;
1552 case TargetOpcode::G_CONSTANT:
1553 return MI.getOperand(1).getCImm()->isAllOnesValue();
1554 default:
1555 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1556 return false;
1557 return isBuildVectorAllOnes(MI, MRI);
1558 }
1559}
1560
1562 const MachineRegisterInfo &MRI, Register Reg,
1563 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1564
1565 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1566 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1567 return Match(nullptr);
1568
1569 // TODO: Also handle fconstant
1570 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1571 return Match(Def->getOperand(1).getCImm());
1572
1573 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1574 return false;
1575
1576 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1577 Register SrcElt = Def->getOperand(I).getReg();
1578 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1579 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1580 if (!Match(nullptr))
1581 return false;
1582 continue;
1583 }
1584
1585 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1586 !Match(SrcDef->getOperand(1).getCImm()))
1587 return false;
1588 }
1589
1590 return true;
1591}
1592
1593bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1594 bool IsFP) {
1595 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1596 case TargetLowering::UndefinedBooleanContent:
1597 return Val & 0x1;
1598 case TargetLowering::ZeroOrOneBooleanContent:
1599 return Val == 1;
1600 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1601 return Val == -1;
1602 }
1603 llvm_unreachable("Invalid boolean contents");
1604}
1605
1606bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1607 bool IsVector, bool IsFP) {
1608 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1609 case TargetLowering::UndefinedBooleanContent:
1610 return ~Val & 0x1;
1611 case TargetLowering::ZeroOrOneBooleanContent:
1612 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1613 return Val == 0;
1614 }
1615 llvm_unreachable("Invalid boolean contents");
1616}
1617
1618int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1619 bool IsFP) {
1620 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1621 case TargetLowering::UndefinedBooleanContent:
1622 case TargetLowering::ZeroOrOneBooleanContent:
1623 return 1;
1624 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1625 return -1;
1626 }
1627 llvm_unreachable("Invalid boolean contents");
1628}
1629
1632 const auto &F = MBB.getParent()->getFunction();
1633 return F.hasOptSize() || F.hasMinSize() ||
1635}
1636
1638 LostDebugLocObserver *LocObserver,
1639 SmallInstListTy &DeadInstChain) {
1640 for (MachineOperand &Op : MI.uses()) {
1641 if (Op.isReg() && Op.getReg().isVirtual())
1642 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1643 }
1644 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1645 DeadInstChain.remove(&MI);
1646 MI.eraseFromParent();
1647 if (LocObserver)
1648 LocObserver->checkpoint(false);
1649}
1650
1653 LostDebugLocObserver *LocObserver) {
1654 SmallInstListTy DeadInstChain;
1655 for (MachineInstr *MI : DeadInstrs)
1656 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1657
1658 while (!DeadInstChain.empty()) {
1659 MachineInstr *Inst = DeadInstChain.pop_back_val();
1660 if (!isTriviallyDead(*Inst, MRI))
1661 continue;
1662 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1663 }
1664}
1665
1667 LostDebugLocObserver *LocObserver) {
1668 return eraseInstrs({&MI}, MRI, LocObserver);
1669}
1670
1672 for (auto &Def : MI.defs()) {
1673 assert(Def.isReg() && "Must be a reg");
1674
1676 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1677 MachineInstr *DbgValue = MOUse.getParent();
1678 // Ignore partially formed DBG_VALUEs.
1679 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1680 DbgUsers.push_back(&MOUse);
1681 }
1682 }
1683
1684 if (!DbgUsers.empty()) {
1686 }
1687 }
1688}
1689
1691 switch (Opc) {
1692 case TargetOpcode::G_FABS:
1693 case TargetOpcode::G_FADD:
1694 case TargetOpcode::G_FCANONICALIZE:
1695 case TargetOpcode::G_FCEIL:
1696 case TargetOpcode::G_FCONSTANT:
1697 case TargetOpcode::G_FCOPYSIGN:
1698 case TargetOpcode::G_FCOS:
1699 case TargetOpcode::G_FDIV:
1700 case TargetOpcode::G_FEXP2:
1701 case TargetOpcode::G_FEXP:
1702 case TargetOpcode::G_FFLOOR:
1703 case TargetOpcode::G_FLOG10:
1704 case TargetOpcode::G_FLOG2:
1705 case TargetOpcode::G_FLOG:
1706 case TargetOpcode::G_FMA:
1707 case TargetOpcode::G_FMAD:
1708 case TargetOpcode::G_FMAXIMUM:
1709 case TargetOpcode::G_FMAXNUM:
1710 case TargetOpcode::G_FMAXNUM_IEEE:
1711 case TargetOpcode::G_FMINIMUM:
1712 case TargetOpcode::G_FMINNUM:
1713 case TargetOpcode::G_FMINNUM_IEEE:
1714 case TargetOpcode::G_FMUL:
1715 case TargetOpcode::G_FNEARBYINT:
1716 case TargetOpcode::G_FNEG:
1717 case TargetOpcode::G_FPEXT:
1718 case TargetOpcode::G_FPOW:
1719 case TargetOpcode::G_FPTRUNC:
1720 case TargetOpcode::G_FREM:
1721 case TargetOpcode::G_FRINT:
1722 case TargetOpcode::G_FSIN:
1723 case TargetOpcode::G_FTAN:
1724 case TargetOpcode::G_FACOS:
1725 case TargetOpcode::G_FASIN:
1726 case TargetOpcode::G_FATAN:
1727 case TargetOpcode::G_FCOSH:
1728 case TargetOpcode::G_FSINH:
1729 case TargetOpcode::G_FTANH:
1730 case TargetOpcode::G_FSQRT:
1731 case TargetOpcode::G_FSUB:
1732 case TargetOpcode::G_INTRINSIC_ROUND:
1733 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1734 case TargetOpcode::G_INTRINSIC_TRUNC:
1735 return true;
1736 default:
1737 return false;
1738 }
1739}
1740
1741/// Shifts return poison if shiftwidth is larger than the bitwidth.
1742static bool shiftAmountKnownInRange(Register ShiftAmount,
1743 const MachineRegisterInfo &MRI) {
1744 LLT Ty = MRI.getType(ShiftAmount);
1745
1746 if (Ty.isScalableVector())
1747 return false; // Can't tell, just return false to be safe
1748
1749 if (Ty.isScalar()) {
1750 std::optional<ValueAndVReg> Val =
1752 if (!Val)
1753 return false;
1754 return Val->Value.ult(Ty.getScalarSizeInBits());
1755 }
1756
1757 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1758 if (!BV)
1759 return false;
1760
1761 unsigned Sources = BV->getNumSources();
1762 for (unsigned I = 0; I < Sources; ++I) {
1763 std::optional<ValueAndVReg> Val =
1765 if (!Val)
1766 return false;
1767 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1768 return false;
1769 }
1770
1771 return true;
1772}
1773
1774namespace {
1775enum class UndefPoisonKind {
1776 PoisonOnly = (1 << 0),
1777 UndefOnly = (1 << 1),
1779};
1780}
1781
1783 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1784}
1785
1787 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1788}
1789
1791 bool ConsiderFlagsAndMetadata,
1792 UndefPoisonKind Kind) {
1793 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1794
1795 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1796 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1797 if (GMI->hasPoisonGeneratingFlags())
1798 return true;
1799
1800 // Check whether opcode is a poison/undef-generating operation.
1801 switch (RegDef->getOpcode()) {
1802 case TargetOpcode::G_BUILD_VECTOR:
1803 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1804 return false;
1805 case TargetOpcode::G_SHL:
1806 case TargetOpcode::G_ASHR:
1807 case TargetOpcode::G_LSHR:
1808 return includesPoison(Kind) &&
1810 case TargetOpcode::G_FPTOSI:
1811 case TargetOpcode::G_FPTOUI:
1812 // fptosi/ui yields poison if the resulting value does not fit in the
1813 // destination type.
1814 return true;
1815 case TargetOpcode::G_CTLZ:
1816 case TargetOpcode::G_CTTZ:
1817 case TargetOpcode::G_ABS:
1818 case TargetOpcode::G_CTPOP:
1819 case TargetOpcode::G_BSWAP:
1820 case TargetOpcode::G_BITREVERSE:
1821 case TargetOpcode::G_FSHL:
1822 case TargetOpcode::G_FSHR:
1823 case TargetOpcode::G_SMAX:
1824 case TargetOpcode::G_SMIN:
1825 case TargetOpcode::G_UMAX:
1826 case TargetOpcode::G_UMIN:
1827 case TargetOpcode::G_PTRMASK:
1828 case TargetOpcode::G_SADDO:
1829 case TargetOpcode::G_SSUBO:
1830 case TargetOpcode::G_UADDO:
1831 case TargetOpcode::G_USUBO:
1832 case TargetOpcode::G_SMULO:
1833 case TargetOpcode::G_UMULO:
1834 case TargetOpcode::G_SADDSAT:
1835 case TargetOpcode::G_UADDSAT:
1836 case TargetOpcode::G_SSUBSAT:
1837 case TargetOpcode::G_USUBSAT:
1838 return false;
1839 case TargetOpcode::G_SSHLSAT:
1840 case TargetOpcode::G_USHLSAT:
1841 return includesPoison(Kind) &&
1843 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1844 GInsertVectorElement *Insert = cast<GInsertVectorElement>(RegDef);
1845 if (includesPoison(Kind)) {
1846 std::optional<ValueAndVReg> Index =
1847 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1848 if (!Index)
1849 return true;
1850 LLT VecTy = MRI.getType(Insert->getVectorReg());
1851 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1852 }
1853 return false;
1854 }
1855 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1856 GExtractVectorElement *Extract = cast<GExtractVectorElement>(RegDef);
1857 if (includesPoison(Kind)) {
1858 std::optional<ValueAndVReg> Index =
1860 if (!Index)
1861 return true;
1862 LLT VecTy = MRI.getType(Extract->getVectorReg());
1863 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1864 }
1865 return false;
1866 }
1867 case TargetOpcode::G_SHUFFLE_VECTOR: {
1868 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1869 ArrayRef<int> Mask = Shuffle->getMask();
1870 return includesPoison(Kind) && is_contained(Mask, -1);
1871 }
1872 case TargetOpcode::G_FNEG:
1873 case TargetOpcode::G_PHI:
1874 case TargetOpcode::G_SELECT:
1875 case TargetOpcode::G_UREM:
1876 case TargetOpcode::G_SREM:
1877 case TargetOpcode::G_FREEZE:
1878 case TargetOpcode::G_ICMP:
1879 case TargetOpcode::G_FCMP:
1880 case TargetOpcode::G_FADD:
1881 case TargetOpcode::G_FSUB:
1882 case TargetOpcode::G_FMUL:
1883 case TargetOpcode::G_FDIV:
1884 case TargetOpcode::G_FREM:
1885 case TargetOpcode::G_PTR_ADD:
1886 return false;
1887 default:
1888 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1889 }
1890}
1891
1893 const MachineRegisterInfo &MRI,
1894 unsigned Depth,
1895 UndefPoisonKind Kind) {
1897 return false;
1898
1899 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1900
1901 switch (RegDef->getOpcode()) {
1902 case TargetOpcode::G_FREEZE:
1903 return true;
1904 case TargetOpcode::G_IMPLICIT_DEF:
1905 return !includesUndef(Kind);
1906 case TargetOpcode::G_CONSTANT:
1907 case TargetOpcode::G_FCONSTANT:
1908 return true;
1909 case TargetOpcode::G_BUILD_VECTOR: {
1910 GBuildVector *BV = cast<GBuildVector>(RegDef);
1911 unsigned NumSources = BV->getNumSources();
1912 for (unsigned I = 0; I < NumSources; ++I)
1914 Depth + 1, Kind))
1915 return false;
1916 return true;
1917 }
1918 case TargetOpcode::G_PHI: {
1919 GPhi *Phi = cast<GPhi>(RegDef);
1920 unsigned NumIncoming = Phi->getNumIncomingValues();
1921 for (unsigned I = 0; I < NumIncoming; ++I)
1922 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1923 Depth + 1, Kind))
1924 return false;
1925 return true;
1926 }
1927 default: {
1928 auto MOCheck = [&](const MachineOperand &MO) {
1929 if (!MO.isReg())
1930 return true;
1931 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1932 Kind);
1933 };
1934 return !::canCreateUndefOrPoison(Reg, MRI,
1935 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
1936 all_of(RegDef->uses(), MOCheck);
1937 }
1938 }
1939}
1940
1942 bool ConsiderFlagsAndMetadata) {
1943 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1945}
1946
1948 bool ConsiderFlagsAndMetadata = true) {
1949 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1951}
1952
1954 const MachineRegisterInfo &MRI,
1955 unsigned Depth) {
1956 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1958}
1959
1961 const MachineRegisterInfo &MRI,
1962 unsigned Depth) {
1963 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1965}
1966
1968 const MachineRegisterInfo &MRI,
1969 unsigned Depth) {
1970 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1972}
1973
1975 if (Ty.isVector())
1976 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1977 Ty.getElementCount());
1978 return IntegerType::get(C, Ty.getSizeInBits());
1979}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
basic Basic Alias true
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)
Definition: Utils.cpp:1790
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)
Definition: Utils.cpp:1892
static bool includesPoison(UndefPoisonKind Kind)
Definition: Utils.cpp:1782
static bool includesUndef(UndefPoisonKind Kind)
Definition: Utils.cpp:1786
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition: Utils.cpp:251
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition: Utils.cpp:1742
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)
Definition: Utils.cpp:1947
static bool isBuildVectorOp(unsigned Opcode)
Definition: Utils.cpp:1328
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition: Utils.cpp:1459
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Value * RHS
Value * LHS
BinaryOperator * Mul
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1119
void copySign(const APFloat &RHS)
Definition: APFloat.h:1213
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5317
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1101
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1092
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:1243
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1110
APInt bitcastToAPInt() const
Definition: APFloat.h:1260
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:1137
Class for arbitrary precision integers.
Definition: APInt.h:78
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1543
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1448
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1614
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:807
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1706
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:420
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:831
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
bool isNegative() const
Return true if the sign bit is set.
Definition: Constants.h:319
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:316
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:317
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
Definition: GISelWorkList.h:74
MachineInstr * pop_back_val()
bool empty() const
Definition: GISelWorkList.h:38
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Definition: GISelWorkList.h:83
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
Definition: LowLevelType.h:124
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:396
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:733
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:498
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Analysis providing profile information.
Represents a value which can be a Register or a constant.
Definition: Utils.h:391
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:258
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2197
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2202
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2207
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2212
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition: Utils.cpp:903
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1433
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition: Utils.cpp:1974
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:639
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:452
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:295
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:977
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1393
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1546
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:727
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1671
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition: Utils.cpp:990
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:936
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1446
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition: APFloat.h:1508
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition: Utils.cpp:1516
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1528
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:479
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition: Utils.cpp:1561
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition: Utils.cpp:1593
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:1165
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:307
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:658
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Returns true if the given block should be optimized for size.
Definition: Utils.cpp:1630
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Definition: APFloat.h:1469
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition: Utils.cpp:1496
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:48
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:201
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition: Utils.cpp:1637
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:275
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1022
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition: Utils.cpp:432
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1439
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition: Utils.cpp:781
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition: Utils.cpp:1426
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:953
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition: Utils.cpp:493
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1161
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:1232
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
Definition: APFloat.h:1455
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
Definition: TargetOpcodes.h:36
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition: Utils.cpp:440
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition: Utils.cpp:1606
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:645
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition: Utils.cpp:1377
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1666
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
@ DS_Warning
@ DS_Error
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition: Utils.cpp:46
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1618
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:426
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1690
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition: Utils.h:334
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition: Utils.cpp:460
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1651
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition: Utils.cpp:486
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:1253
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition: APFloat.h:1482
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1411
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition: Utils.cpp:597
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:222
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:886
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:269
#define MORE()
Definition: regcomp.c:252
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition: Utils.h:224
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:278
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:275
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition: Utils.h:183