LLVM 19.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constants.h"
36#include <numeric>
37#include <optional>
38
39#define DEBUG_TYPE "globalisel-utils"
40
41using namespace llvm;
42using namespace MIPatternMatch;
43
45 const TargetInstrInfo &TII,
46 const RegisterBankInfo &RBI, Register Reg,
47 const TargetRegisterClass &RegClass) {
48 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
49 return MRI.createVirtualRegister(&RegClass);
50
51 return Reg;
52}
53
55 const MachineFunction &MF, const TargetRegisterInfo &TRI,
57 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
58 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
59 Register Reg = RegMO.getReg();
60 // Assume physical registers are properly constrained.
61 assert(Reg.isVirtual() && "PhysReg not implemented");
62
63 // Save the old register class to check whether
64 // the change notifications will be required.
65 // TODO: A better approach would be to pass
66 // the observers to constrainRegToClass().
67 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
68 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
69 // If we created a new virtual register because the class is not compatible
70 // then create a copy between the new and the old register.
71 if (ConstrainedReg != Reg) {
72 MachineBasicBlock::iterator InsertIt(&InsertPt);
73 MachineBasicBlock &MBB = *InsertPt.getParent();
74 // FIXME: The copy needs to have the classes constrained for its operands.
75 // Use operand's regbank to get the class for old register (Reg).
76 if (RegMO.isUse()) {
77 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
78 TII.get(TargetOpcode::COPY), ConstrainedReg)
79 .addReg(Reg);
80 } else {
81 assert(RegMO.isDef() && "Must be a definition");
82 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
83 TII.get(TargetOpcode::COPY), Reg)
84 .addReg(ConstrainedReg);
85 }
86 if (GISelChangeObserver *Observer = MF.getObserver()) {
87 Observer->changingInstr(*RegMO.getParent());
88 }
89 RegMO.setReg(ConstrainedReg);
90 if (GISelChangeObserver *Observer = MF.getObserver()) {
91 Observer->changedInstr(*RegMO.getParent());
92 }
93 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
94 if (GISelChangeObserver *Observer = MF.getObserver()) {
95 if (!RegMO.isDef()) {
96 MachineInstr *RegDef = MRI.getVRegDef(Reg);
97 Observer->changedInstr(*RegDef);
98 }
99 Observer->changingAllUsesOfReg(MRI, Reg);
100 Observer->finishedChangingAllUsesOfReg();
101 }
102 }
103 return ConstrainedReg;
104}
105
107 const MachineFunction &MF, const TargetRegisterInfo &TRI,
109 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
110 MachineOperand &RegMO, unsigned OpIdx) {
111 Register Reg = RegMO.getReg();
112 // Assume physical registers are properly constrained.
113 assert(Reg.isVirtual() && "PhysReg not implemented");
114
115 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
116 // Some of the target independent instructions, like COPY, may not impose any
117 // register class constraints on some of their operands: If it's a use, we can
118 // skip constraining as the instruction defining the register would constrain
119 // it.
120
121 if (OpRC) {
122 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
123 // can have multiple regbanks for a superclass that combine different
124 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
125 // resolved by targets during regbankselect should not be overridden.
126 if (const auto *SubRC = TRI.getCommonSubClass(
127 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
128 OpRC = SubRC;
129
130 OpRC = TRI.getAllocatableClass(OpRC);
131 }
132
133 if (!OpRC) {
134 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
135 "Register class constraint is required unless either the "
136 "instruction is target independent or the operand is a use");
137 // FIXME: Just bailing out like this here could be not enough, unless we
138 // expect the users of this function to do the right thing for PHIs and
139 // COPY:
140 // v1 = COPY v0
141 // v2 = COPY v1
142 // v1 here may end up not being constrained at all. Please notice that to
143 // reproduce the issue we likely need a destination pattern of a selection
144 // rule producing such extra copies, not just an input GMIR with them as
145 // every existing target using selectImpl handles copies before calling it
146 // and they never reach this function.
147 return Reg;
148 }
149 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
150 RegMO);
151}
152
154 const TargetInstrInfo &TII,
155 const TargetRegisterInfo &TRI,
156 const RegisterBankInfo &RBI) {
157 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
158 "A selected instruction is expected");
159 MachineBasicBlock &MBB = *I.getParent();
162
163 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
164 MachineOperand &MO = I.getOperand(OpI);
165
166 // There's nothing to be done on non-register operands.
167 if (!MO.isReg())
168 continue;
169
170 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
171 assert(MO.isReg() && "Unsupported non-reg operand");
172
173 Register Reg = MO.getReg();
174 // Physical registers don't need to be constrained.
175 if (Reg.isPhysical())
176 continue;
177
178 // Register operands with a value of 0 (e.g. predicate operands) don't need
179 // to be constrained.
180 if (Reg == 0)
181 continue;
182
183 // If the operand is a vreg, we should constrain its regclass, and only
184 // insert COPYs if that's impossible.
185 // constrainOperandRegClass does that for us.
186 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
187
188 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
189 // done.
190 if (MO.isUse()) {
191 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
192 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
193 I.tieOperands(DefIdx, OpI);
194 }
195 }
196 return true;
197}
198
201 // Give up if either DstReg or SrcReg is a physical register.
202 if (DstReg.isPhysical() || SrcReg.isPhysical())
203 return false;
204 // Give up if the types don't match.
205 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
206 return false;
207 // Replace if either DstReg has no constraints or the register
208 // constraints match.
209 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
210 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
211 return true;
212
213 // Otherwise match if the Src is already a regclass that is covered by the Dst
214 // RegBank.
215 return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
216 DstRBC.get<const RegisterBank *>()->covers(
217 *MRI.getRegClassOrNull(SrcReg));
218}
219
221 const MachineRegisterInfo &MRI) {
222 // FIXME: This logical is mostly duplicated with
223 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
224 // MachineInstr::isLabel?
225
226 // Don't delete frame allocation labels.
227 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
228 return false;
229 // LIFETIME markers should be preserved even if they seem dead.
230 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
231 MI.getOpcode() == TargetOpcode::LIFETIME_END)
232 return false;
233
234 // If we can move an instruction, we can remove it. Otherwise, it has
235 // a side-effect of some sort.
236 bool SawStore = false;
237 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
238 return false;
239
240 // Instructions without side-effects are dead iff they only define dead vregs.
241 for (const auto &MO : MI.all_defs()) {
242 Register Reg = MO.getReg();
243 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
244 return false;
245 }
246 return true;
247}
248
250 MachineFunction &MF,
251 const TargetPassConfig &TPC,
254 bool IsFatal = Severity == DS_Error &&
256 // Print the function name explicitly if we don't have a debug location (which
257 // makes the diagnostic less useful) or if we're going to emit a raw error.
258 if (!R.getLocation().isValid() || IsFatal)
259 R << (" (in function: " + MF.getName() + ")").str();
260
261 if (IsFatal)
262 report_fatal_error(Twine(R.getMsg()));
263 else
264 MORE.emit(R);
265}
266
271}
272
276 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
277 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
278}
279
282 const char *PassName, StringRef Msg,
283 const MachineInstr &MI) {
284 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
285 MI.getDebugLoc(), MI.getParent());
286 R << Msg;
287 // Printing MI is expensive; only do it if expensive remarks are enabled.
288 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
289 R << ": " << ore::MNV("Inst", MI);
290 reportGISelFailure(MF, TPC, MORE, R);
291}
292
293std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
294 const MachineRegisterInfo &MRI) {
295 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
296 VReg, MRI, /*LookThroughInstrs*/ false);
297 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
298 "Value found while looking through instrs");
299 if (!ValAndVReg)
300 return std::nullopt;
301 return ValAndVReg->Value;
302}
303
304std::optional<int64_t>
306 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
307 if (Val && Val->getBitWidth() <= 64)
308 return Val->getSExtValue();
309 return std::nullopt;
310}
311
312namespace {
313
314typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
315typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
316
317std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
318 Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
319 GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
320 bool LookThroughAnyExt = false) {
323
324 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
325 LookThroughInstrs) {
326 switch (MI->getOpcode()) {
327 case TargetOpcode::G_ANYEXT:
328 if (!LookThroughAnyExt)
329 return std::nullopt;
330 [[fallthrough]];
331 case TargetOpcode::G_TRUNC:
332 case TargetOpcode::G_SEXT:
333 case TargetOpcode::G_ZEXT:
334 SeenOpcodes.push_back(std::make_pair(
335 MI->getOpcode(),
336 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
337 VReg = MI->getOperand(1).getReg();
338 break;
339 case TargetOpcode::COPY:
340 VReg = MI->getOperand(1).getReg();
341 if (VReg.isPhysical())
342 return std::nullopt;
343 break;
344 case TargetOpcode::G_INTTOPTR:
345 VReg = MI->getOperand(1).getReg();
346 break;
347 default:
348 return std::nullopt;
349 }
350 }
351 if (!MI || !IsConstantOpcode(MI))
352 return std::nullopt;
353
354 std::optional<APInt> MaybeVal = getAPCstValue(MI);
355 if (!MaybeVal)
356 return std::nullopt;
357 APInt &Val = *MaybeVal;
358 for (auto [Opcode, Size] : reverse(SeenOpcodes)) {
359 switch (Opcode) {
360 case TargetOpcode::G_TRUNC:
361 Val = Val.trunc(Size);
362 break;
363 case TargetOpcode::G_ANYEXT:
364 case TargetOpcode::G_SEXT:
365 Val = Val.sext(Size);
366 break;
367 case TargetOpcode::G_ZEXT:
368 Val = Val.zext(Size);
369 break;
370 }
371 }
372
373 return ValueAndVReg{Val, VReg};
374}
375
376bool isIConstant(const MachineInstr *MI) {
377 if (!MI)
378 return false;
379 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
380}
381
382bool isFConstant(const MachineInstr *MI) {
383 if (!MI)
384 return false;
385 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
386}
387
388bool isAnyConstant(const MachineInstr *MI) {
389 if (!MI)
390 return false;
391 unsigned Opc = MI->getOpcode();
392 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
393}
394
395std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
396 const MachineOperand &CstVal = MI->getOperand(1);
397 if (CstVal.isCImm())
398 return CstVal.getCImm()->getValue();
399 return std::nullopt;
400}
401
402std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
403 const MachineOperand &CstVal = MI->getOperand(1);
404 if (CstVal.isCImm())
405 return CstVal.getCImm()->getValue();
406 if (CstVal.isFPImm())
407 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
408 return std::nullopt;
409}
410
411} // end anonymous namespace
412
414 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
415 return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
416 getCImmAsAPInt, LookThroughInstrs);
417}
418
420 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
421 bool LookThroughAnyExt) {
422 return getConstantVRegValWithLookThrough(
423 VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
424 LookThroughAnyExt);
425}
426
427std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
428 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
429 auto Reg = getConstantVRegValWithLookThrough(
430 VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
431 if (!Reg)
432 return std::nullopt;
434 Reg->VReg};
435}
436
437const ConstantFP *
439 MachineInstr *MI = MRI.getVRegDef(VReg);
440 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
441 return nullptr;
442 return MI->getOperand(1).getFPImm();
443}
444
445std::optional<DefinitionAndSourceRegister>
447 Register DefSrcReg = Reg;
448 auto *DefMI = MRI.getVRegDef(Reg);
449 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
450 if (!DstTy.isValid())
451 return std::nullopt;
452 unsigned Opc = DefMI->getOpcode();
453 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
454 Register SrcReg = DefMI->getOperand(1).getReg();
455 auto SrcTy = MRI.getType(SrcReg);
456 if (!SrcTy.isValid())
457 break;
458 DefMI = MRI.getVRegDef(SrcReg);
459 DefSrcReg = SrcReg;
460 Opc = DefMI->getOpcode();
461 }
462 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
463}
464
466 const MachineRegisterInfo &MRI) {
467 std::optional<DefinitionAndSourceRegister> DefSrcReg =
469 return DefSrcReg ? DefSrcReg->MI : nullptr;
470}
471
473 const MachineRegisterInfo &MRI) {
474 std::optional<DefinitionAndSourceRegister> DefSrcReg =
476 return DefSrcReg ? DefSrcReg->Reg : Register();
477}
478
479void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
481 MachineIRBuilder &MIRBuilder,
483 for (int i = 0; i < NumParts; ++i)
484 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
485 MIRBuilder.buildUnmerge(VRegs, Reg);
486}
487
488bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
490 SmallVectorImpl<Register> &LeftoverRegs,
491 MachineIRBuilder &MIRBuilder,
493 assert(!LeftoverTy.isValid() && "this is an out argument");
494
495 unsigned RegSize = RegTy.getSizeInBits();
496 unsigned MainSize = MainTy.getSizeInBits();
497 unsigned NumParts = RegSize / MainSize;
498 unsigned LeftoverSize = RegSize - NumParts * MainSize;
499
500 // Use an unmerge when possible.
501 if (LeftoverSize == 0) {
502 for (unsigned I = 0; I < NumParts; ++I)
503 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
504 MIRBuilder.buildUnmerge(VRegs, Reg);
505 return true;
506 }
507
508 // Try to use unmerge for irregular vector split where possible
509 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
510 // leftover, it becomes:
511 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
512 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
513 if (RegTy.isVector() && MainTy.isVector()) {
514 unsigned RegNumElts = RegTy.getNumElements();
515 unsigned MainNumElts = MainTy.getNumElements();
516 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
517 // If can unmerge to LeftoverTy, do it
518 if (MainNumElts % LeftoverNumElts == 0 &&
519 RegNumElts % LeftoverNumElts == 0 &&
520 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
521 LeftoverNumElts > 1) {
522 LeftoverTy =
523 LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
524
525 // Unmerge the SrcReg to LeftoverTy vectors
526 SmallVector<Register, 4> UnmergeValues;
527 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
528 MIRBuilder, MRI);
529
530 // Find how many LeftoverTy makes one MainTy
531 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
532 unsigned NumOfLeftoverVal =
533 ((RegNumElts % MainNumElts) / LeftoverNumElts);
534
535 // Create as many MainTy as possible using unmerged value
536 SmallVector<Register, 4> MergeValues;
537 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
538 MergeValues.push_back(UnmergeValues[I]);
539 if (MergeValues.size() == LeftoverPerMain) {
540 VRegs.push_back(
541 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
542 MergeValues.clear();
543 }
544 }
545 // Populate LeftoverRegs with the leftovers
546 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
547 I < UnmergeValues.size(); I++) {
548 LeftoverRegs.push_back(UnmergeValues[I]);
549 }
550 return true;
551 }
552 }
553 // Perform irregular split. Leftover is last element of RegPieces.
554 if (MainTy.isVector()) {
555 SmallVector<Register, 8> RegPieces;
556 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
557 MRI);
558 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
559 VRegs.push_back(RegPieces[i]);
560 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
561 LeftoverTy = MRI.getType(LeftoverRegs[0]);
562 return true;
563 }
564
565 LeftoverTy = LLT::scalar(LeftoverSize);
566 // For irregular sizes, extract the individual parts.
567 for (unsigned I = 0; I != NumParts; ++I) {
568 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
569 VRegs.push_back(NewReg);
570 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
571 }
572
573 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
574 Offset += LeftoverSize) {
575 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
576 LeftoverRegs.push_back(NewReg);
577 MIRBuilder.buildExtract(NewReg, Reg, Offset);
578 }
579
580 return true;
581}
582
583void llvm::extractVectorParts(Register Reg, unsigned NumElts,
585 MachineIRBuilder &MIRBuilder,
587 LLT RegTy = MRI.getType(Reg);
588 assert(RegTy.isVector() && "Expected a vector type");
589
590 LLT EltTy = RegTy.getElementType();
591 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
592 unsigned RegNumElts = RegTy.getNumElements();
593 unsigned LeftoverNumElts = RegNumElts % NumElts;
594 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
595
596 // Perfect split without leftover
597 if (LeftoverNumElts == 0)
598 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
599 MRI);
600
601 // Irregular split. Provide direct access to all elements for artifact
602 // combiner using unmerge to elements. Then build vectors with NumElts
603 // elements. Remaining element(s) will be (used to build vector) Leftover.
605 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
606
607 unsigned Offset = 0;
608 // Requested sub-vectors of NarrowTy.
609 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
610 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
611 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
612 }
613
614 // Leftover element(s).
615 if (LeftoverNumElts == 1) {
616 VRegs.push_back(Elts[Offset]);
617 } else {
618 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
619 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
620 VRegs.push_back(
621 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
622 }
623}
624
626 const MachineRegisterInfo &MRI) {
628 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
629}
630
631APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
632 if (Size == 32)
633 return APFloat(float(Val));
634 if (Size == 64)
635 return APFloat(Val);
636 if (Size != 16)
637 llvm_unreachable("Unsupported FPConstant size");
638 bool Ignored;
639 APFloat APF(Val);
640 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
641 return APF;
642}
643
644std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
645 const Register Op1,
646 const Register Op2,
647 const MachineRegisterInfo &MRI) {
648 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
649 if (!MaybeOp2Cst)
650 return std::nullopt;
651
652 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
653 if (!MaybeOp1Cst)
654 return std::nullopt;
655
656 const APInt &C1 = MaybeOp1Cst->Value;
657 const APInt &C2 = MaybeOp2Cst->Value;
658 switch (Opcode) {
659 default:
660 break;
661 case TargetOpcode::G_ADD:
662 return C1 + C2;
663 case TargetOpcode::G_PTR_ADD:
664 // Types can be of different width here.
665 // Result needs to be the same width as C1, so trunc or sext C2.
666 return C1 + C2.sextOrTrunc(C1.getBitWidth());
667 case TargetOpcode::G_AND:
668 return C1 & C2;
669 case TargetOpcode::G_ASHR:
670 return C1.ashr(C2);
671 case TargetOpcode::G_LSHR:
672 return C1.lshr(C2);
673 case TargetOpcode::G_MUL:
674 return C1 * C2;
675 case TargetOpcode::G_OR:
676 return C1 | C2;
677 case TargetOpcode::G_SHL:
678 return C1 << C2;
679 case TargetOpcode::G_SUB:
680 return C1 - C2;
681 case TargetOpcode::G_XOR:
682 return C1 ^ C2;
683 case TargetOpcode::G_UDIV:
684 if (!C2.getBoolValue())
685 break;
686 return C1.udiv(C2);
687 case TargetOpcode::G_SDIV:
688 if (!C2.getBoolValue())
689 break;
690 return C1.sdiv(C2);
691 case TargetOpcode::G_UREM:
692 if (!C2.getBoolValue())
693 break;
694 return C1.urem(C2);
695 case TargetOpcode::G_SREM:
696 if (!C2.getBoolValue())
697 break;
698 return C1.srem(C2);
699 case TargetOpcode::G_SMIN:
700 return APIntOps::smin(C1, C2);
701 case TargetOpcode::G_SMAX:
702 return APIntOps::smax(C1, C2);
703 case TargetOpcode::G_UMIN:
704 return APIntOps::umin(C1, C2);
705 case TargetOpcode::G_UMAX:
706 return APIntOps::umax(C1, C2);
707 }
708
709 return std::nullopt;
710}
711
712std::optional<APFloat>
713llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
714 const Register Op2, const MachineRegisterInfo &MRI) {
715 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
716 if (!Op2Cst)
717 return std::nullopt;
718
719 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
720 if (!Op1Cst)
721 return std::nullopt;
722
723 APFloat C1 = Op1Cst->getValueAPF();
724 const APFloat &C2 = Op2Cst->getValueAPF();
725 switch (Opcode) {
726 case TargetOpcode::G_FADD:
727 C1.add(C2, APFloat::rmNearestTiesToEven);
728 return C1;
729 case TargetOpcode::G_FSUB:
730 C1.subtract(C2, APFloat::rmNearestTiesToEven);
731 return C1;
732 case TargetOpcode::G_FMUL:
733 C1.multiply(C2, APFloat::rmNearestTiesToEven);
734 return C1;
735 case TargetOpcode::G_FDIV:
736 C1.divide(C2, APFloat::rmNearestTiesToEven);
737 return C1;
738 case TargetOpcode::G_FREM:
739 C1.mod(C2);
740 return C1;
741 case TargetOpcode::G_FCOPYSIGN:
742 C1.copySign(C2);
743 return C1;
744 case TargetOpcode::G_FMINNUM:
745 return minnum(C1, C2);
746 case TargetOpcode::G_FMAXNUM:
747 return maxnum(C1, C2);
748 case TargetOpcode::G_FMINIMUM:
749 return minimum(C1, C2);
750 case TargetOpcode::G_FMAXIMUM:
751 return maximum(C1, C2);
752 case TargetOpcode::G_FMINNUM_IEEE:
753 case TargetOpcode::G_FMAXNUM_IEEE:
754 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
755 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
756 // and currently there isn't a nice wrapper in APFloat for the version with
757 // correct snan handling.
758 break;
759 default:
760 break;
761 }
762
763 return std::nullopt;
764}
765
767llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
768 const Register Op2,
769 const MachineRegisterInfo &MRI) {
770 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
771 if (!SrcVec2)
772 return SmallVector<APInt>();
773
774 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
775 if (!SrcVec1)
776 return SmallVector<APInt>();
777
778 SmallVector<APInt> FoldedElements;
779 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
780 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
781 SrcVec2->getSourceReg(Idx), MRI);
782 if (!MaybeCst)
783 return SmallVector<APInt>();
784 FoldedElements.push_back(*MaybeCst);
785 }
786 return FoldedElements;
787}
788
790 bool SNaN) {
791 const MachineInstr *DefMI = MRI.getVRegDef(Val);
792 if (!DefMI)
793 return false;
794
795 const TargetMachine& TM = DefMI->getMF()->getTarget();
796 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
797 return true;
798
799 // If the value is a constant, we can obviously see if it is a NaN or not.
800 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
801 return !FPVal->getValueAPF().isNaN() ||
802 (SNaN && !FPVal->getValueAPF().isSignaling());
803 }
804
805 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
806 for (const auto &Op : DefMI->uses())
807 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
808 return false;
809 return true;
810 }
811
812 switch (DefMI->getOpcode()) {
813 default:
814 break;
815 case TargetOpcode::G_FADD:
816 case TargetOpcode::G_FSUB:
817 case TargetOpcode::G_FMUL:
818 case TargetOpcode::G_FDIV:
819 case TargetOpcode::G_FREM:
820 case TargetOpcode::G_FSIN:
821 case TargetOpcode::G_FCOS:
822 case TargetOpcode::G_FMA:
823 case TargetOpcode::G_FMAD:
824 if (SNaN)
825 return true;
826
827 // TODO: Need isKnownNeverInfinity
828 return false;
829 case TargetOpcode::G_FMINNUM_IEEE:
830 case TargetOpcode::G_FMAXNUM_IEEE: {
831 if (SNaN)
832 return true;
833 // This can return a NaN if either operand is an sNaN, or if both operands
834 // are NaN.
835 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
839 }
840 case TargetOpcode::G_FMINNUM:
841 case TargetOpcode::G_FMAXNUM: {
842 // Only one needs to be known not-nan, since it will be returned if the
843 // other ends up being one.
844 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
846 }
847 }
848
849 if (SNaN) {
850 // FP operations quiet. For now, just handle the ones inserted during
851 // legalization.
852 switch (DefMI->getOpcode()) {
853 case TargetOpcode::G_FPEXT:
854 case TargetOpcode::G_FPTRUNC:
855 case TargetOpcode::G_FCANONICALIZE:
856 return true;
857 default:
858 return false;
859 }
860 }
861
862 return false;
863}
864
866 const MachinePointerInfo &MPO) {
867 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
868 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
869 MachineFrameInfo &MFI = MF.getFrameInfo();
870 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
871 MPO.Offset);
872 }
873
874 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
875 const Module *M = MF.getFunction().getParent();
876 return V->getPointerAlignment(M->getDataLayout());
877 }
878
879 return Align(1);
880}
881
883 const TargetInstrInfo &TII,
884 MCRegister PhysReg,
885 const TargetRegisterClass &RC,
886 const DebugLoc &DL, LLT RegTy) {
887 MachineBasicBlock &EntryMBB = MF.front();
889 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
890 if (LiveIn) {
891 MachineInstr *Def = MRI.getVRegDef(LiveIn);
892 if (Def) {
893 // FIXME: Should the verifier check this is in the entry block?
894 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
895 return LiveIn;
896 }
897
898 // It's possible the incoming argument register and copy was added during
899 // lowering, but later deleted due to being/becoming dead. If this happens,
900 // re-insert the copy.
901 } else {
902 // The live in register was not present, so add it.
903 LiveIn = MF.addLiveIn(PhysReg, &RC);
904 if (RegTy.isValid())
905 MRI.setType(LiveIn, RegTy);
906 }
907
908 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
909 .addReg(PhysReg);
910 if (!EntryMBB.isLiveIn(PhysReg))
911 EntryMBB.addLiveIn(PhysReg);
912 return LiveIn;
913}
914
915std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
916 const Register Op1, uint64_t Imm,
917 const MachineRegisterInfo &MRI) {
918 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
919 if (MaybeOp1Cst) {
920 switch (Opcode) {
921 default:
922 break;
923 case TargetOpcode::G_SEXT_INREG: {
924 LLT Ty = MRI.getType(Op1);
925 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
926 }
927 }
928 }
929 return std::nullopt;
930}
931
932std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
933 const Register Op0,
934 const MachineRegisterInfo &MRI) {
935 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
936 if (!Val)
937 return Val;
938
939 const unsigned DstSize = DstTy.getScalarSizeInBits();
940
941 switch (Opcode) {
942 case TargetOpcode::G_SEXT:
943 return Val->sext(DstSize);
944 case TargetOpcode::G_ZEXT:
945 case TargetOpcode::G_ANYEXT:
946 // TODO: DAG considers target preference when constant folding any_extend.
947 return Val->zext(DstSize);
948 default:
949 break;
950 }
951
952 llvm_unreachable("unexpected cast opcode to constant fold");
953}
954
955std::optional<APFloat>
956llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
957 const MachineRegisterInfo &MRI) {
958 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
959 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
960 APFloat DstVal(getFltSemanticForLLT(DstTy));
961 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
962 APFloat::rmNearestTiesToEven);
963 return DstVal;
964 }
965 return std::nullopt;
966}
967
968std::optional<SmallVector<unsigned>>
970 std::function<unsigned(APInt)> CB) {
971 LLT Ty = MRI.getType(Src);
972 SmallVector<unsigned> FoldedCTLZs;
973 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
974 auto MaybeCst = getIConstantVRegVal(R, MRI);
975 if (!MaybeCst)
976 return std::nullopt;
977 return CB(*MaybeCst);
978 };
979 if (Ty.isVector()) {
980 // Try to constant fold each element.
981 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
982 if (!BV)
983 return std::nullopt;
984 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
985 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
986 FoldedCTLZs.emplace_back(*MaybeFold);
987 continue;
988 }
989 return std::nullopt;
990 }
991 return FoldedCTLZs;
992 }
993 if (auto MaybeCst = tryFoldScalar(Src)) {
994 FoldedCTLZs.emplace_back(*MaybeCst);
995 return FoldedCTLZs;
996 }
997 return std::nullopt;
998}
999
1000std::optional<SmallVector<APInt>>
1001llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1002 const MachineRegisterInfo &MRI) {
1003 LLT Ty = MRI.getType(Op1);
1004 if (Ty != MRI.getType(Op2))
1005 return std::nullopt;
1006
1007 auto TryFoldScalar = [&MRI, Pred](Register LHS,
1008 Register RHS) -> std::optional<APInt> {
1009 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1010 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1011 if (!LHSCst || !RHSCst)
1012 return std::nullopt;
1013
1014 switch (Pred) {
1015 case CmpInst::Predicate::ICMP_EQ:
1016 return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
1017 case CmpInst::Predicate::ICMP_NE:
1018 return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
1019 case CmpInst::Predicate::ICMP_UGT:
1020 return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
1021 case CmpInst::Predicate::ICMP_UGE:
1022 return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
1023 case CmpInst::Predicate::ICMP_ULT:
1024 return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
1025 case CmpInst::Predicate::ICMP_ULE:
1026 return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
1027 case CmpInst::Predicate::ICMP_SGT:
1028 return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
1029 case CmpInst::Predicate::ICMP_SGE:
1030 return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
1031 case CmpInst::Predicate::ICMP_SLT:
1032 return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
1033 case CmpInst::Predicate::ICMP_SLE:
1034 return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
1035 default:
1036 return std::nullopt;
1037 }
1038 };
1039
1040 SmallVector<APInt> FoldedICmps;
1041
1042 if (Ty.isVector()) {
1043 // Try to constant fold each element.
1044 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1045 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1046 if (!BV1 || !BV2)
1047 return std::nullopt;
1048 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1049 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1050 if (auto MaybeFold =
1051 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1052 FoldedICmps.emplace_back(*MaybeFold);
1053 continue;
1054 }
1055 return std::nullopt;
1056 }
1057 return FoldedICmps;
1058 }
1059
1060 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1061 FoldedICmps.emplace_back(*MaybeCst);
1062 return FoldedICmps;
1063 }
1064
1065 return std::nullopt;
1066}
1067
1069 GISelKnownBits *KB) {
1070 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1072 if (!DefSrcReg)
1073 return false;
1074
1075 const MachineInstr &MI = *DefSrcReg->MI;
1076 const LLT Ty = MRI.getType(Reg);
1077
1078 switch (MI.getOpcode()) {
1079 case TargetOpcode::G_CONSTANT: {
1080 unsigned BitWidth = Ty.getScalarSizeInBits();
1081 const ConstantInt *CI = MI.getOperand(1).getCImm();
1082 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1083 }
1084 case TargetOpcode::G_SHL: {
1085 // A left-shift of a constant one will have exactly one bit set because
1086 // shifting the bit off the end is undefined.
1087
1088 // TODO: Constant splat
1089 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1090 if (*ConstLHS == 1)
1091 return true;
1092 }
1093
1094 break;
1095 }
1096 case TargetOpcode::G_LSHR: {
1097 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1098 if (ConstLHS->isSignMask())
1099 return true;
1100 }
1101
1102 break;
1103 }
1104 case TargetOpcode::G_BUILD_VECTOR: {
1105 // TODO: Probably should have a recursion depth guard since you could have
1106 // bitcasted vector elements.
1107 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1108 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1109 return false;
1110
1111 return true;
1112 }
1113 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1114 // Only handle constants since we would need to know if number of leading
1115 // zeros is greater than the truncation amount.
1116 const unsigned BitWidth = Ty.getScalarSizeInBits();
1117 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1118 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1119 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1120 return false;
1121 }
1122
1123 return true;
1124 }
1125 default:
1126 break;
1127 }
1128
1129 if (!KB)
1130 return false;
1131
1132 // More could be done here, though the above checks are enough
1133 // to handle some common cases.
1134
1135 // Fall back to computeKnownBits to catch other known cases.
1136 KnownBits Known = KB->getKnownBits(Reg);
1137 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1138}
1139
1142}
1143
1144LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1145 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1146 return OrigTy;
1147
1148 if (OrigTy.isVector() && TargetTy.isVector()) {
1149 LLT OrigElt = OrigTy.getElementType();
1150 LLT TargetElt = TargetTy.getElementType();
1151
1152 // TODO: The docstring for this function says the intention is to use this
1153 // function to build MERGE/UNMERGE instructions. It won't be the case that
1154 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1155 // could implement getLCMType between the two in the future if there was a
1156 // need, but it is not worth it now as this function should not be used in
1157 // that way.
1158 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1159 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1160 "getLCMType not implemented between fixed and scalable vectors.");
1161
1162 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1163 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1164 TargetTy.getElementCount().getKnownMinValue());
1165 // Prefer the original element type.
1167 TargetTy.getElementCount().getKnownMinValue());
1168 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1169 OrigTy.getElementType());
1170 }
1171 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1172 TargetTy.getSizeInBits().getKnownMinValue());
1173 return LLT::vector(
1174 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1175 OrigElt);
1176 }
1177
1178 // One type is scalar, one type is vector
1179 if (OrigTy.isVector() || TargetTy.isVector()) {
1180 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1181 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1182 LLT EltTy = VecTy.getElementType();
1183 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1184
1185 // Prefer scalar type from OrigTy.
1186 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1187 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1188
1189 // Different size scalars. Create vector with the same total size.
1190 // LCM will take fixed/scalable from VecTy.
1191 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1193 ScalarTy.getSizeInBits().getFixedValue());
1194 // Prefer type from OrigTy
1195 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1196 VecTy.getElementCount().isScalable()),
1197 OrigEltTy);
1198 }
1199
1200 // At this point, both types are scalars of different size
1201 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1202 TargetTy.getSizeInBits().getFixedValue());
1203 // Preserve pointer types.
1204 if (LCM == OrigTy.getSizeInBits())
1205 return OrigTy;
1206 if (LCM == TargetTy.getSizeInBits())
1207 return TargetTy;
1208 return LLT::scalar(LCM);
1209}
1210
1211LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1212
1213 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1214 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1216 "getCoverTy not implemented between fixed and scalable vectors.");
1217
1218 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1219 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1220 return getLCMType(OrigTy, TargetTy);
1221
1222 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1223 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1224 if (OrigTyNumElts % TargetTyNumElts == 0)
1225 return OrigTy;
1226
1227 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1229 OrigTy.getElementType());
1230}
1231
1232LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1233 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1234 return OrigTy;
1235
1236 if (OrigTy.isVector() && TargetTy.isVector()) {
1237 LLT OrigElt = OrigTy.getElementType();
1238
1239 // TODO: The docstring for this function says the intention is to use this
1240 // function to build MERGE/UNMERGE instructions. It won't be the case that
1241 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1242 // could implement getGCDType between the two in the future if there was a
1243 // need, but it is not worth it now as this function should not be used in
1244 // that way.
1245 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1246 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1247 "getGCDType not implemented between fixed and scalable vectors.");
1248
1249 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1250 TargetTy.getSizeInBits().getKnownMinValue());
1251 if (GCD == OrigElt.getSizeInBits())
1253 OrigElt);
1254
1255 // Cannot produce original element type, but both have vscale in common.
1256 if (GCD < OrigElt.getSizeInBits())
1258 GCD);
1259
1260 return LLT::vector(
1262 OrigTy.isScalable()),
1263 OrigElt);
1264 }
1265
1266 // If one type is vector and the element size matches the scalar size, then
1267 // the gcd is the scalar type.
1268 if (OrigTy.isVector() &&
1269 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1270 return OrigTy.getElementType();
1271 if (TargetTy.isVector() &&
1272 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1273 return OrigTy;
1274
1275 // At this point, both types are either scalars of different type or one is a
1276 // vector and one is a scalar. If both types are scalars, the GCD type is the
1277 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1278 // the GCD type is the GCD between the scalar and the vector element size.
1279 LLT OrigScalar = OrigTy.getScalarType();
1280 LLT TargetScalar = TargetTy.getScalarType();
1281 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1282 TargetScalar.getSizeInBits().getFixedValue());
1283 return LLT::scalar(GCD);
1284}
1285
1287 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1288 "Only G_SHUFFLE_VECTOR can have a splat index!");
1289 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1290 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1291
1292 // If all elements are undefined, this shuffle can be considered a splat.
1293 // Return 0 for better potential for callers to simplify.
1294 if (FirstDefinedIdx == Mask.end())
1295 return 0;
1296
1297 // Make sure all remaining elements are either undef or the same
1298 // as the first non-undef value.
1299 int SplatValue = *FirstDefinedIdx;
1300 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1301 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1302 return std::nullopt;
1303
1304 return SplatValue;
1305}
1306
1307static bool isBuildVectorOp(unsigned Opcode) {
1308 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1309 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1310}
1311
1312namespace {
1313
1314std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1315 const MachineRegisterInfo &MRI,
1316 bool AllowUndef) {
1318 if (!MI)
1319 return std::nullopt;
1320
1321 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1322 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1323 return std::nullopt;
1324
1325 std::optional<ValueAndVReg> SplatValAndReg;
1326 for (MachineOperand &Op : MI->uses()) {
1327 Register Element = Op.getReg();
1328 // If we have a G_CONCAT_VECTOR, we recursively look into the
1329 // vectors that we're concatenating to see if they're splats.
1330 auto ElementValAndReg =
1331 isConcatVectorsOp
1332 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1334
1335 // If AllowUndef, treat undef as value that will result in a constant splat.
1336 if (!ElementValAndReg) {
1337 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1338 continue;
1339 return std::nullopt;
1340 }
1341
1342 // Record splat value
1343 if (!SplatValAndReg)
1344 SplatValAndReg = ElementValAndReg;
1345
1346 // Different constant than the one already recorded, not a constant splat.
1347 if (SplatValAndReg->Value != ElementValAndReg->Value)
1348 return std::nullopt;
1349 }
1350
1351 return SplatValAndReg;
1352}
1353
1354} // end anonymous namespace
1355
1357 const MachineRegisterInfo &MRI,
1358 int64_t SplatValue, bool AllowUndef) {
1359 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1360 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1361 return false;
1362}
1363
1365 const MachineRegisterInfo &MRI,
1366 int64_t SplatValue, bool AllowUndef) {
1367 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1368 AllowUndef);
1369}
1370
1371std::optional<APInt>
1373 if (auto SplatValAndReg =
1374 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1375 if (std::optional<ValueAndVReg> ValAndVReg =
1376 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1377 return ValAndVReg->Value;
1378 }
1379
1380 return std::nullopt;
1381}
1382
1383std::optional<APInt>
1385 const MachineRegisterInfo &MRI) {
1386 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1387}
1388
1389std::optional<int64_t>
1391 const MachineRegisterInfo &MRI) {
1392 if (auto SplatValAndReg =
1393 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1394 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1395 return std::nullopt;
1396}
1397
1398std::optional<int64_t>
1400 const MachineRegisterInfo &MRI) {
1401 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1402}
1403
1404std::optional<FPValueAndVReg>
1406 bool AllowUndef) {
1407 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1408 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1409 return std::nullopt;
1410}
1411
1413 const MachineRegisterInfo &MRI,
1414 bool AllowUndef) {
1415 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1416}
1417
1419 const MachineRegisterInfo &MRI,
1420 bool AllowUndef) {
1421 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1422}
1423
1424std::optional<RegOrConstant>
1426 unsigned Opc = MI.getOpcode();
1427 if (!isBuildVectorOp(Opc))
1428 return std::nullopt;
1429 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1430 return RegOrConstant(*Splat);
1431 auto Reg = MI.getOperand(1).getReg();
1432 if (any_of(drop_begin(MI.operands(), 2),
1433 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1434 return std::nullopt;
1435 return RegOrConstant(Reg);
1436}
1437
1439 const MachineRegisterInfo &MRI,
1440 bool AllowFP = true,
1441 bool AllowOpaqueConstants = true) {
1442 switch (MI.getOpcode()) {
1443 case TargetOpcode::G_CONSTANT:
1444 case TargetOpcode::G_IMPLICIT_DEF:
1445 return true;
1446 case TargetOpcode::G_FCONSTANT:
1447 return AllowFP;
1448 case TargetOpcode::G_GLOBAL_VALUE:
1449 case TargetOpcode::G_FRAME_INDEX:
1450 case TargetOpcode::G_BLOCK_ADDR:
1451 case TargetOpcode::G_JUMP_TABLE:
1452 return AllowOpaqueConstants;
1453 default:
1454 return false;
1455 }
1456}
1457
1459 const MachineRegisterInfo &MRI) {
1460 Register Def = MI.getOperand(0).getReg();
1461 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1462 return true;
1463 GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1464 if (!BV)
1465 return false;
1466 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1468 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1469 continue;
1470 return false;
1471 }
1472 return true;
1473}
1474
1476 const MachineRegisterInfo &MRI,
1477 bool AllowFP, bool AllowOpaqueConstants) {
1478 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1479 return true;
1480
1481 if (!isBuildVectorOp(MI.getOpcode()))
1482 return false;
1483
1484 const unsigned NumOps = MI.getNumOperands();
1485 for (unsigned I = 1; I != NumOps; ++I) {
1486 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1487 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1488 return false;
1489 }
1490
1491 return true;
1492}
1493
1494std::optional<APInt>
1496 const MachineRegisterInfo &MRI) {
1497 Register Def = MI.getOperand(0).getReg();
1498 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1499 return C->Value;
1500 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1501 if (!MaybeCst)
1502 return std::nullopt;
1503 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1504 return APInt(ScalarSize, *MaybeCst, true);
1505}
1506
1508 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1509 switch (MI.getOpcode()) {
1510 case TargetOpcode::G_IMPLICIT_DEF:
1511 return AllowUndefs;
1512 case TargetOpcode::G_CONSTANT:
1513 return MI.getOperand(1).getCImm()->isNullValue();
1514 case TargetOpcode::G_FCONSTANT: {
1515 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1516 return FPImm->isZero() && !FPImm->isNegative();
1517 }
1518 default:
1519 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1520 return false;
1521 return isBuildVectorAllZeros(MI, MRI);
1522 }
1523}
1524
1526 const MachineRegisterInfo &MRI,
1527 bool AllowUndefs) {
1528 switch (MI.getOpcode()) {
1529 case TargetOpcode::G_IMPLICIT_DEF:
1530 return AllowUndefs;
1531 case TargetOpcode::G_CONSTANT:
1532 return MI.getOperand(1).getCImm()->isAllOnesValue();
1533 default:
1534 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1535 return false;
1536 return isBuildVectorAllOnes(MI, MRI);
1537 }
1538}
1539
1541 const MachineRegisterInfo &MRI, Register Reg,
1542 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1543
1544 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1545 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1546 return Match(nullptr);
1547
1548 // TODO: Also handle fconstant
1549 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1550 return Match(Def->getOperand(1).getCImm());
1551
1552 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1553 return false;
1554
1555 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1556 Register SrcElt = Def->getOperand(I).getReg();
1557 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1558 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1559 if (!Match(nullptr))
1560 return false;
1561 continue;
1562 }
1563
1564 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1565 !Match(SrcDef->getOperand(1).getCImm()))
1566 return false;
1567 }
1568
1569 return true;
1570}
1571
1572bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1573 bool IsFP) {
1574 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1575 case TargetLowering::UndefinedBooleanContent:
1576 return Val & 0x1;
1577 case TargetLowering::ZeroOrOneBooleanContent:
1578 return Val == 1;
1579 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1580 return Val == -1;
1581 }
1582 llvm_unreachable("Invalid boolean contents");
1583}
1584
1585bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1586 bool IsVector, bool IsFP) {
1587 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1588 case TargetLowering::UndefinedBooleanContent:
1589 return ~Val & 0x1;
1590 case TargetLowering::ZeroOrOneBooleanContent:
1591 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1592 return Val == 0;
1593 }
1594 llvm_unreachable("Invalid boolean contents");
1595}
1596
1597int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1598 bool IsFP) {
1599 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1600 case TargetLowering::UndefinedBooleanContent:
1601 case TargetLowering::ZeroOrOneBooleanContent:
1602 return 1;
1603 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1604 return -1;
1605 }
1606 llvm_unreachable("Invalid boolean contents");
1607}
1608
1611 const auto &F = MBB.getParent()->getFunction();
1612 return F.hasOptSize() || F.hasMinSize() ||
1614}
1615
1617 LostDebugLocObserver *LocObserver,
1618 SmallInstListTy &DeadInstChain) {
1619 for (MachineOperand &Op : MI.uses()) {
1620 if (Op.isReg() && Op.getReg().isVirtual())
1621 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1622 }
1623 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1624 DeadInstChain.remove(&MI);
1625 MI.eraseFromParent();
1626 if (LocObserver)
1627 LocObserver->checkpoint(false);
1628}
1629
1632 LostDebugLocObserver *LocObserver) {
1633 SmallInstListTy DeadInstChain;
1634 for (MachineInstr *MI : DeadInstrs)
1635 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1636
1637 while (!DeadInstChain.empty()) {
1638 MachineInstr *Inst = DeadInstChain.pop_back_val();
1639 if (!isTriviallyDead(*Inst, MRI))
1640 continue;
1641 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1642 }
1643}
1644
1646 LostDebugLocObserver *LocObserver) {
1647 return eraseInstrs({&MI}, MRI, LocObserver);
1648}
1649
1651 for (auto &Def : MI.defs()) {
1652 assert(Def.isReg() && "Must be a reg");
1653
1655 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1656 MachineInstr *DbgValue = MOUse.getParent();
1657 // Ignore partially formed DBG_VALUEs.
1658 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1659 DbgUsers.push_back(&MOUse);
1660 }
1661 }
1662
1663 if (!DbgUsers.empty()) {
1665 }
1666 }
1667}
1668
1670 switch (Opc) {
1671 case TargetOpcode::G_FABS:
1672 case TargetOpcode::G_FADD:
1673 case TargetOpcode::G_FCANONICALIZE:
1674 case TargetOpcode::G_FCEIL:
1675 case TargetOpcode::G_FCONSTANT:
1676 case TargetOpcode::G_FCOPYSIGN:
1677 case TargetOpcode::G_FCOS:
1678 case TargetOpcode::G_FDIV:
1679 case TargetOpcode::G_FEXP2:
1680 case TargetOpcode::G_FEXP:
1681 case TargetOpcode::G_FFLOOR:
1682 case TargetOpcode::G_FLOG10:
1683 case TargetOpcode::G_FLOG2:
1684 case TargetOpcode::G_FLOG:
1685 case TargetOpcode::G_FMA:
1686 case TargetOpcode::G_FMAD:
1687 case TargetOpcode::G_FMAXIMUM:
1688 case TargetOpcode::G_FMAXNUM:
1689 case TargetOpcode::G_FMAXNUM_IEEE:
1690 case TargetOpcode::G_FMINIMUM:
1691 case TargetOpcode::G_FMINNUM:
1692 case TargetOpcode::G_FMINNUM_IEEE:
1693 case TargetOpcode::G_FMUL:
1694 case TargetOpcode::G_FNEARBYINT:
1695 case TargetOpcode::G_FNEG:
1696 case TargetOpcode::G_FPEXT:
1697 case TargetOpcode::G_FPOW:
1698 case TargetOpcode::G_FPTRUNC:
1699 case TargetOpcode::G_FREM:
1700 case TargetOpcode::G_FRINT:
1701 case TargetOpcode::G_FSIN:
1702 case TargetOpcode::G_FSQRT:
1703 case TargetOpcode::G_FSUB:
1704 case TargetOpcode::G_INTRINSIC_ROUND:
1705 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1706 case TargetOpcode::G_INTRINSIC_TRUNC:
1707 return true;
1708 default:
1709 return false;
1710 }
1711}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
basic Basic Alias true
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition: Utils.cpp:249
static bool isBuildVectorOp(unsigned Opcode)
Definition: Utils.cpp:1307
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition: Utils.cpp:1438
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
static const char PassName[]
Value * RHS
Value * LHS
BinaryOperator * Mul
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1069
void copySign(const APFloat &RHS)
Definition: APFloat.h:1163
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5196
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1051
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1042
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:1193
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1060
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:1087
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1543
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1614
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:805
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1706
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:829
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
bool isNegative() const
Return true if the sign bit is set.
Definition: Constants.h:318
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:315
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:302
Represents a G_BUILD_VECTOR.
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
Definition: GISelWorkList.h:74
MachineInstr * pop_back_val()
bool empty() const
Definition: GISelWorkList.h:38
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Definition: GISelWorkList.h:83
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
Definition: LowLevelType.h:124
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:546
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:329
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:379
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:710
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:475
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:556
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Analysis providing profile information.
Represents a value which can be a Register or a constant.
Definition: Utils.h:391
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:243
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2178
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2183
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2188
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2193
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition: Utils.cpp:882
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1412
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:54
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:625
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:438
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:293
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:956
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1372
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1525
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:713
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1650
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition: Utils.cpp:969
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:915
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1425
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition: APFloat.h:1436
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition: Utils.cpp:1495
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1507
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:465
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition: Utils.cpp:1540
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition: Utils.cpp:1572
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:1144
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:305
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:644
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Returns true if the given block should be optimized for size.
Definition: Utils.cpp:1609
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Definition: APFloat.h:1410
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition: Utils.cpp:1475
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:199
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition: Utils.cpp:1616
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:273
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1001
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition: Utils.cpp:419
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1418
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition: Utils.cpp:767
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition: Utils.cpp:1405
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:932
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition: Utils.cpp:479
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1140
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:1211
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
Definition: APFloat.h:1396
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
Definition: TargetOpcodes.h:36
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition: Utils.cpp:427
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition: Utils.cpp:1585
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:631
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition: Utils.cpp:1356
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1645
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
@ DS_Warning
@ DS_Error
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition: Utils.cpp:44
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1597
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:413
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1669
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition: Utils.h:334
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition: Utils.cpp:446
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1630
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition: Utils.cpp:472
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:1232
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition: APFloat.h:1423
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1390
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition: Utils.cpp:583
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:220
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:865
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:267
#define MORE()
Definition: regcomp.c:252
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition: Utils.h:224
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:285
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:282
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition: Utils.h:183