LLVM 23.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
162 MachineFunction &MF = *MBB.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173
174 Register Reg = MO.getReg();
175 // Physical registers don't need to be constrained.
176 if (Reg.isPhysical())
177 continue;
178
179 // Register operands with a value of 0 (e.g. predicate operands) don't need
180 // to be constrained.
181 if (Reg == 0)
182 continue;
183
184 // If the operand is a vreg, we should constrain its regclass, and only
185 // insert COPYs if that's impossible.
186 // constrainOperandRegClass does that for us.
187 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
188
189 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
190 // done.
191 if (MO.isUse()) {
192 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
193 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
194 I.tieOperands(DefIdx, OpI);
195 }
196 }
197}
198
200 MachineRegisterInfo &MRI) {
201 // Give up if either DstReg or SrcReg is a physical register.
202 if (DstReg.isPhysical() || SrcReg.isPhysical())
203 return false;
204 // Give up if the types don't match.
205 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
206 return false;
207 // Replace if either DstReg has no constraints or the register
208 // constraints match.
209 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
210 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
211 return true;
212
213 // Otherwise match if the Src is already a regclass that is covered by the Dst
214 // RegBank.
215 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
216 cast<const RegisterBank *>(DstRBC)->covers(
217 *MRI.getRegClassOrNull(SrcReg));
218}
219
221 const MachineRegisterInfo &MRI) {
222 // Instructions without side-effects are dead iff they only define dead regs.
223 // This function is hot and this loop returns early in the common case,
224 // so only perform additional checks before this if absolutely necessary.
225 for (const auto &MO : MI.all_defs()) {
226 Register Reg = MO.getReg();
227 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
228 return false;
229 }
230 return MI.wouldBeTriviallyDead();
231}
232
234 MachineFunction &MF,
237 bool IsGlobalISelAbortEnabled =
239 bool IsFatal = Severity == DS_Error && IsGlobalISelAbortEnabled;
240 // Print the function name explicitly if we don't have a debug location (which
241 // makes the diagnostic less useful) or if we're going to emit a raw error.
242 if (!R.getLocation().isValid() || IsFatal)
243 R << (" (in function: " + MF.getName() + ")").str();
244
245 if (IsFatal)
246 reportFatalUsageError(Twine(R.getMsg()));
247 else
248 MORE.emit(R);
249}
250
256
263
266 const char *PassName, StringRef Msg,
267 const MachineInstr &MI) {
268 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
269 MI.getDebugLoc(), MI.getParent());
270 R << Msg;
271 // Printing MI is expensive; only do it if expensive remarks are enabled.
273 MORE.allowExtraAnalysis(PassName))
274 R << ": " << ore::MNV("Inst", MI);
275 reportGISelFailure(MF, MORE, R);
276}
277
278unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
279 switch (MinMaxOpc) {
280 case TargetOpcode::G_SMIN:
281 return TargetOpcode::G_SMAX;
282 case TargetOpcode::G_SMAX:
283 return TargetOpcode::G_SMIN;
284 case TargetOpcode::G_UMIN:
285 return TargetOpcode::G_UMAX;
286 case TargetOpcode::G_UMAX:
287 return TargetOpcode::G_UMIN;
288 default:
289 llvm_unreachable("unrecognized opcode");
290 }
291}
292
293std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
294 const MachineRegisterInfo &MRI) {
295 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
296 VReg, MRI, /*LookThroughInstrs*/ false);
297 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
298 "Value found while looking through instrs");
299 if (!ValAndVReg)
300 return std::nullopt;
301 return ValAndVReg->Value;
302}
303
305 const MachineRegisterInfo &MRI) {
306 MachineInstr *Const = MRI.getVRegDef(Reg);
307 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
308 "expected a G_CONSTANT on Reg");
309 return Const->getOperand(1).getCImm()->getValue();
310}
311
312std::optional<int64_t>
314 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
315 if (Val && Val->getBitWidth() <= 64)
316 return Val->getSExtValue();
317 return std::nullopt;
318}
319
320namespace {
321
322// This function is used in many places, and as such, it has some
323// micro-optimizations to try and make it as fast as it can be.
324//
325// - We use template arguments to avoid an indirect call caused by passing a
326// function_ref/std::function
327// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
328// Instead it returns true/false and places the result in a pre-constructed
329// APInt.
330//
331// Please change this function carefully and benchmark your changes.
332template <bool (*IsConstantOpcode)(const MachineInstr *),
333 bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
334std::optional<ValueAndVReg>
335getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
336 bool LookThroughInstrs = true,
337 bool LookThroughAnyExt = false) {
340
341 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
342 LookThroughInstrs) {
343 switch (MI->getOpcode()) {
344 case TargetOpcode::G_ANYEXT:
345 if (!LookThroughAnyExt)
346 return std::nullopt;
347 [[fallthrough]];
348 case TargetOpcode::G_TRUNC:
349 case TargetOpcode::G_SEXT:
350 case TargetOpcode::G_ZEXT:
351 SeenOpcodes.push_back(std::make_pair(
352 MI->getOpcode(),
353 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
354 VReg = MI->getOperand(1).getReg();
355 break;
356 case TargetOpcode::COPY:
357 VReg = MI->getOperand(1).getReg();
358 if (VReg.isPhysical())
359 return std::nullopt;
360 break;
361 case TargetOpcode::G_INTTOPTR:
362 VReg = MI->getOperand(1).getReg();
363 break;
364 default:
365 return std::nullopt;
366 }
367 }
368 if (!MI || !IsConstantOpcode(MI))
369 return std::nullopt;
370
371 APInt Val;
372 if (!GetAPCstValue(MI, Val))
373 return std::nullopt;
374 for (auto &Pair : reverse(SeenOpcodes)) {
375 switch (Pair.first) {
376 case TargetOpcode::G_TRUNC:
377 Val = Val.trunc(Pair.second);
378 break;
379 case TargetOpcode::G_ANYEXT:
380 case TargetOpcode::G_SEXT:
381 Val = Val.sext(Pair.second);
382 break;
383 case TargetOpcode::G_ZEXT:
384 Val = Val.zext(Pair.second);
385 break;
386 }
387 }
388
389 return ValueAndVReg{std::move(Val), VReg};
390}
391
392bool isIConstant(const MachineInstr *MI) {
393 if (!MI)
394 return false;
395 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
396}
397
398bool isFConstant(const MachineInstr *MI) {
399 if (!MI)
400 return false;
401 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
402}
403
404bool isAnyConstant(const MachineInstr *MI) {
405 if (!MI)
406 return false;
407 unsigned Opc = MI->getOpcode();
408 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
409}
410
411bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
412 const MachineOperand &CstVal = MI->getOperand(1);
413 if (!CstVal.isCImm())
414 return false;
415 Result = CstVal.getCImm()->getValue();
416 return true;
417}
418
419bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
420 const MachineOperand &CstVal = MI->getOperand(1);
421 if (CstVal.isCImm())
422 Result = CstVal.getCImm()->getValue();
423 else if (CstVal.isFPImm())
425 else
426 return false;
427 return true;
428}
429
430} // end anonymous namespace
431
433 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
434 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
435 VReg, MRI, LookThroughInstrs);
436}
437
439 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
440 bool LookThroughAnyExt) {
441 return getConstantVRegValWithLookThrough<isAnyConstant,
442 getCImmOrFPImmAsAPInt>(
443 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
444}
445
446std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
447 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
448 auto Reg =
449 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
450 VReg, MRI, LookThroughInstrs);
451 if (!Reg)
452 return std::nullopt;
453
454 APFloat FloatVal(getFltSemanticForLLT(LLT::scalar(Reg->Value.getBitWidth())),
455 Reg->Value);
456 return FPValueAndVReg{FloatVal, Reg->VReg};
457}
458
459const ConstantFP *
461 MachineInstr *MI = MRI.getVRegDef(VReg);
462 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
463 return nullptr;
464 return MI->getOperand(1).getFPImm();
465}
466
467std::optional<DefinitionAndSourceRegister>
469 Register DefSrcReg = Reg;
470 // This assumes that the code is in SSA form, so there should only be one
471 // definition.
472 auto DefIt = MRI.def_begin(Reg);
473 if (DefIt == MRI.def_end())
474 return {};
475 MachineOperand &DefOpnd = *DefIt;
476 MachineInstr *DefMI = DefOpnd.getParent();
477 auto DstTy = MRI.getType(DefOpnd.getReg());
478 if (!DstTy.isValid())
479 return std::nullopt;
480 unsigned Opc = DefMI->getOpcode();
481 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
482 Register SrcReg = DefMI->getOperand(1).getReg();
483 auto SrcTy = MRI.getType(SrcReg);
484 if (!SrcTy.isValid())
485 break;
486 DefMI = MRI.getVRegDef(SrcReg);
487 DefSrcReg = SrcReg;
488 Opc = DefMI->getOpcode();
489 }
490 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
491}
492
494 const MachineRegisterInfo &MRI) {
495 std::optional<DefinitionAndSourceRegister> DefSrcReg =
497 return DefSrcReg ? DefSrcReg->MI : nullptr;
498}
499
501 const MachineRegisterInfo &MRI) {
502 std::optional<DefinitionAndSourceRegister> DefSrcReg =
504 return DefSrcReg ? DefSrcReg->Reg : Register();
505}
506
507void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
509 MachineIRBuilder &MIRBuilder,
510 MachineRegisterInfo &MRI) {
511 for (int i = 0; i < NumParts; ++i)
513 MIRBuilder.buildUnmerge(VRegs, Reg);
514}
515
516bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
518 SmallVectorImpl<Register> &LeftoverRegs,
519 MachineIRBuilder &MIRBuilder,
520 MachineRegisterInfo &MRI) {
521 assert(!LeftoverTy.isValid() && "this is an out argument");
522
523 unsigned RegSize = RegTy.getSizeInBits();
524 unsigned MainSize = MainTy.getSizeInBits();
525 unsigned NumParts = RegSize / MainSize;
526 unsigned LeftoverSize = RegSize - NumParts * MainSize;
527
528 // Use an unmerge when possible.
529 if (LeftoverSize == 0) {
530 for (unsigned I = 0; I < NumParts; ++I)
531 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
532 MIRBuilder.buildUnmerge(VRegs, Reg);
533 return true;
534 }
535
536 // Try to use unmerge for irregular vector split where possible
537 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
538 // leftover, it becomes:
539 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
540 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
541 if (RegTy.isVector() && MainTy.isVector()) {
542 unsigned RegNumElts = RegTy.getNumElements();
543 unsigned MainNumElts = MainTy.getNumElements();
544 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
545 // If can unmerge to LeftoverTy, do it
546 if (MainNumElts % LeftoverNumElts == 0 &&
547 RegNumElts % LeftoverNumElts == 0 &&
548 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
549 LeftoverNumElts > 1) {
550 LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
551
552 // Unmerge the SrcReg to LeftoverTy vectors
553 SmallVector<Register, 4> UnmergeValues;
554 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
555 MIRBuilder, MRI);
556
557 // Find how many LeftoverTy makes one MainTy
558 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
559 unsigned NumOfLeftoverVal =
560 ((RegNumElts % MainNumElts) / LeftoverNumElts);
561
562 // Create as many MainTy as possible using unmerged value
563 SmallVector<Register, 4> MergeValues;
564 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
565 MergeValues.push_back(UnmergeValues[I]);
566 if (MergeValues.size() == LeftoverPerMain) {
567 VRegs.push_back(
568 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
569 MergeValues.clear();
570 }
571 }
572 // Populate LeftoverRegs with the leftovers
573 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
574 I < UnmergeValues.size(); I++) {
575 LeftoverRegs.push_back(UnmergeValues[I]);
576 }
577 return true;
578 }
579 }
580 // Perform irregular split. Leftover is last element of RegPieces.
581 if (MainTy.isVector()) {
582 SmallVector<Register, 8> RegPieces;
583 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
584 MRI);
585 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
586 VRegs.push_back(RegPieces[i]);
587 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
588 LeftoverTy = MRI.getType(LeftoverRegs[0]);
589 return true;
590 }
591
592 LeftoverTy = LLT::scalar(LeftoverSize);
593 // For irregular sizes, extract the individual parts.
594 for (unsigned I = 0; I != NumParts; ++I) {
595 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
596 VRegs.push_back(NewReg);
597 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
598 }
599
600 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
601 Offset += LeftoverSize) {
602 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
603 LeftoverRegs.push_back(NewReg);
604 MIRBuilder.buildExtract(NewReg, Reg, Offset);
605 }
606
607 return true;
608}
609
610void llvm::extractVectorParts(Register Reg, unsigned NumElts,
612 MachineIRBuilder &MIRBuilder,
613 MachineRegisterInfo &MRI) {
614 LLT RegTy = MRI.getType(Reg);
615 assert(RegTy.isVector() && "Expected a vector type");
616
617 LLT EltTy = RegTy.getElementType();
618 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
619 unsigned RegNumElts = RegTy.getNumElements();
620 unsigned LeftoverNumElts = RegNumElts % NumElts;
621 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
622
623 // Perfect split without leftover
624 if (LeftoverNumElts == 0)
625 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
626 MRI);
627
628 // Irregular split. Provide direct access to all elements for artifact
629 // combiner using unmerge to elements. Then build vectors with NumElts
630 // elements. Remaining element(s) will be (used to build vector) Leftover.
632 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
633
634 unsigned Offset = 0;
635 // Requested sub-vectors of NarrowTy.
636 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
637 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
638 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
639 }
640
641 // Leftover element(s).
642 if (LeftoverNumElts == 1) {
643 VRegs.push_back(Elts[Offset]);
644 } else {
645 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
646 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
647 VRegs.push_back(
648 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
649 }
650}
651
653 const MachineRegisterInfo &MRI) {
655 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
656}
657
658APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
659 if (Size == 32)
660 return APFloat(float(Val));
661 if (Size == 64)
662 return APFloat(Val);
663 if (Size != 16)
664 llvm_unreachable("Unsupported FPConstant size");
665 bool Ignored;
666 APFloat APF(Val);
668 return APF;
669}
670
671std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
672 const Register Op1,
673 const Register Op2,
674 const MachineRegisterInfo &MRI) {
675 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
676 if (!MaybeOp2Cst)
677 return std::nullopt;
678
679 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
680 if (!MaybeOp1Cst)
681 return std::nullopt;
682
683 const APInt &C1 = MaybeOp1Cst->Value;
684 const APInt &C2 = MaybeOp2Cst->Value;
685 switch (Opcode) {
686 default:
687 break;
688 case TargetOpcode::G_ADD:
689 return C1 + C2;
690 case TargetOpcode::G_PTR_ADD:
691 // Types can be of different width here.
692 // Result needs to be the same width as C1, so trunc or sext C2.
693 return C1 + C2.sextOrTrunc(C1.getBitWidth());
694 case TargetOpcode::G_AND:
695 return C1 & C2;
696 case TargetOpcode::G_ASHR:
697 return C1.ashr(C2);
698 case TargetOpcode::G_LSHR:
699 return C1.lshr(C2);
700 case TargetOpcode::G_MUL:
701 return C1 * C2;
702 case TargetOpcode::G_OR:
703 return C1 | C2;
704 case TargetOpcode::G_SHL:
705 return C1 << C2;
706 case TargetOpcode::G_SUB:
707 return C1 - C2;
708 case TargetOpcode::G_XOR:
709 return C1 ^ C2;
710 case TargetOpcode::G_UDIV:
711 if (!C2.getBoolValue())
712 break;
713 return C1.udiv(C2);
714 case TargetOpcode::G_SDIV:
715 if (!C2.getBoolValue())
716 break;
717 return C1.sdiv(C2);
718 case TargetOpcode::G_UREM:
719 if (!C2.getBoolValue())
720 break;
721 return C1.urem(C2);
722 case TargetOpcode::G_SREM:
723 if (!C2.getBoolValue())
724 break;
725 return C1.srem(C2);
726 case TargetOpcode::G_SMIN:
727 return APIntOps::smin(C1, C2);
728 case TargetOpcode::G_SMAX:
729 return APIntOps::smax(C1, C2);
730 case TargetOpcode::G_UMIN:
731 return APIntOps::umin(C1, C2);
732 case TargetOpcode::G_UMAX:
733 return APIntOps::umax(C1, C2);
734 }
735
736 return std::nullopt;
737}
738
739std::optional<APFloat>
740llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
741 const Register Op2, const MachineRegisterInfo &MRI) {
742 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
743 if (!Op2Cst)
744 return std::nullopt;
745
746 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
747 if (!Op1Cst)
748 return std::nullopt;
749
750 APFloat C1 = Op1Cst->getValueAPF();
751 const APFloat &C2 = Op2Cst->getValueAPF();
752 switch (Opcode) {
753 case TargetOpcode::G_FADD:
755 return C1;
756 case TargetOpcode::G_FSUB:
758 return C1;
759 case TargetOpcode::G_FMUL:
761 return C1;
762 case TargetOpcode::G_FDIV:
764 return C1;
765 case TargetOpcode::G_FREM:
766 C1.mod(C2);
767 return C1;
768 case TargetOpcode::G_FCOPYSIGN:
769 C1.copySign(C2);
770 return C1;
771 case TargetOpcode::G_FMINNUM:
772 return minnum(C1, C2);
773 case TargetOpcode::G_FMAXNUM:
774 return maxnum(C1, C2);
775 case TargetOpcode::G_FMINIMUM:
776 return minimum(C1, C2);
777 case TargetOpcode::G_FMAXIMUM:
778 return maximum(C1, C2);
779 case TargetOpcode::G_FMINIMUMNUM:
780 return minimumnum(C1, C2);
781 case TargetOpcode::G_FMAXIMUMNUM:
782 return maximumnum(C1, C2);
783 case TargetOpcode::G_FMINNUM_IEEE:
784 case TargetOpcode::G_FMAXNUM_IEEE:
785 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
786 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
787 // and currently there isn't a nice wrapper in APFloat for the version with
788 // correct snan handling.
789 break;
790 default:
791 break;
792 }
793
794 return std::nullopt;
795}
796
798 const MachineRegisterInfo &MRI) {
799 if (auto *BV = getOpcodeDef<GBuildVector>(Reg, MRI))
800 return BV;
801
802 auto *Bitcast = getOpcodeDef(TargetOpcode::G_BITCAST, Reg, MRI);
803 if (!Bitcast)
804 return nullptr;
805
806 auto [Dst, DstTy, Src, SrcTy] = Bitcast->getFirst2RegLLTs();
807 if (!SrcTy.isVector() || !DstTy.isVector())
808 return nullptr;
809 if (SrcTy.getElementCount() != DstTy.getElementCount())
810 return nullptr;
811 if (SrcTy.getScalarSizeInBits() != DstTy.getScalarSizeInBits())
812 return nullptr;
813
814 return getOpcodeDef<GBuildVector>(Src, MRI);
815}
816
818llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
819 const Register Op2,
820 const MachineRegisterInfo &MRI) {
821 auto *SrcVec2 = getBuildVectorLikeDef(Op2, MRI);
822 if (!SrcVec2)
823 return SmallVector<APInt>();
824
825 auto *SrcVec1 = getBuildVectorLikeDef(Op1, MRI);
826 if (!SrcVec1)
827 return SmallVector<APInt>();
828
829 SmallVector<APInt> FoldedElements;
830 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
831 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
832 SrcVec2->getSourceReg(Idx), MRI);
833 if (!MaybeCst)
834 return SmallVector<APInt>();
835 FoldedElements.push_back(*MaybeCst);
836 }
837 return FoldedElements;
838}
839
841 bool SNaN) {
842 const MachineInstr *DefMI = MRI.getVRegDef(Val);
843 if (!DefMI)
844 return false;
845
846 if (DefMI->getFlag(MachineInstr::FmNoNans))
847 return true;
848
849 // If the value is a constant, we can obviously see if it is a NaN or not.
850 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
851 return !FPVal->getValueAPF().isNaN() ||
852 (SNaN && !FPVal->getValueAPF().isSignaling());
853 }
854
855 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
856 for (const auto &Op : DefMI->uses())
857 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
858 return false;
859 return true;
860 }
861
862 switch (DefMI->getOpcode()) {
863 default:
864 break;
865 case TargetOpcode::G_FADD:
866 case TargetOpcode::G_FSUB:
867 case TargetOpcode::G_FMUL:
868 case TargetOpcode::G_FDIV:
869 case TargetOpcode::G_FREM:
870 case TargetOpcode::G_FSIN:
871 case TargetOpcode::G_FCOS:
872 case TargetOpcode::G_FTAN:
873 case TargetOpcode::G_FACOS:
874 case TargetOpcode::G_FASIN:
875 case TargetOpcode::G_FATAN:
876 case TargetOpcode::G_FATAN2:
877 case TargetOpcode::G_FCOSH:
878 case TargetOpcode::G_FSINH:
879 case TargetOpcode::G_FTANH:
880 case TargetOpcode::G_FMA:
881 case TargetOpcode::G_FMAD:
882 if (SNaN)
883 return true;
884
885 // TODO: Need isKnownNeverInfinity
886 return false;
887 case TargetOpcode::G_FMINNUM_IEEE:
888 case TargetOpcode::G_FMAXNUM_IEEE: {
889 if (SNaN)
890 return true;
891 // This can return a NaN if either operand is an sNaN, or if both operands
892 // are NaN.
893 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
894 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
895 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
896 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
897 }
898 case TargetOpcode::G_FMINNUM:
899 case TargetOpcode::G_FMAXNUM: {
900 // Only one needs to be known not-nan, since it will be returned if the
901 // other ends up being one.
902 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
903 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
904 }
905 }
906
907 if (SNaN) {
908 // FP operations quiet. For now, just handle the ones inserted during
909 // legalization.
910 switch (DefMI->getOpcode()) {
911 case TargetOpcode::G_FPEXT:
912 case TargetOpcode::G_FPTRUNC:
913 case TargetOpcode::G_FCANONICALIZE:
914 return true;
915 default:
916 return false;
917 }
918 }
919
920 return false;
921}
922
924 const MachinePointerInfo &MPO) {
927 MachineFrameInfo &MFI = MF.getFrameInfo();
928 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
929 MPO.Offset);
930 }
931
932 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
933 const Module *M = MF.getFunction().getParent();
934 return V->getPointerAlignment(M->getDataLayout());
935 }
936
937 return Align(1);
938}
939
941 const TargetInstrInfo &TII,
942 MCRegister PhysReg,
943 const TargetRegisterClass &RC,
944 const DebugLoc &DL, LLT RegTy) {
945 MachineBasicBlock &EntryMBB = MF.front();
947 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
948 if (LiveIn) {
949 MachineInstr *Def = MRI.getVRegDef(LiveIn);
950 if (Def) {
951 // FIXME: Should the verifier check this is in the entry block?
952 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
953 return LiveIn;
954 }
955
956 // It's possible the incoming argument register and copy was added during
957 // lowering, but later deleted due to being/becoming dead. If this happens,
958 // re-insert the copy.
959 } else {
960 // The live in register was not present, so add it.
961 LiveIn = MF.addLiveIn(PhysReg, &RC);
962 if (RegTy.isValid())
963 MRI.setType(LiveIn, RegTy);
964 }
965
966 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
967 .addReg(PhysReg);
968 if (!EntryMBB.isLiveIn(PhysReg))
969 EntryMBB.addLiveIn(PhysReg);
970 return LiveIn;
971}
972
973std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
974 const Register Op1, uint64_t Imm,
975 const MachineRegisterInfo &MRI) {
976 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
977 if (MaybeOp1Cst) {
978 switch (Opcode) {
979 default:
980 break;
981 case TargetOpcode::G_SEXT_INREG: {
982 LLT Ty = MRI.getType(Op1);
983 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
984 }
985 }
986 }
987 return std::nullopt;
988}
989
990std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
991 const Register Op0,
992 const MachineRegisterInfo &MRI) {
993 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
994 if (!Val)
995 return Val;
996
997 const unsigned DstSize = DstTy.getScalarSizeInBits();
998
999 switch (Opcode) {
1000 case TargetOpcode::G_SEXT:
1001 return Val->sext(DstSize);
1002 case TargetOpcode::G_ZEXT:
1003 case TargetOpcode::G_ANYEXT:
1004 // TODO: DAG considers target preference when constant folding any_extend.
1005 return Val->zext(DstSize);
1006 default:
1007 break;
1008 }
1009
1010 llvm_unreachable("unexpected cast opcode to constant fold");
1011}
1012
1013std::optional<APFloat>
1014llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
1015 const MachineRegisterInfo &MRI) {
1016 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
1017 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
1018 APFloat DstVal(getFltSemanticForLLT(DstTy));
1019 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
1021 return DstVal;
1022 }
1023 return std::nullopt;
1024}
1025
1026std::optional<SmallVector<unsigned>>
1028 std::function<unsigned(APInt)> CB) {
1029 LLT Ty = MRI.getType(Src);
1030 SmallVector<unsigned> FoldedCTLZs;
1031 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
1032 auto MaybeCst = getIConstantVRegVal(R, MRI);
1033 if (!MaybeCst)
1034 return std::nullopt;
1035 return CB(*MaybeCst);
1036 };
1037 if (Ty.isVector()) {
1038 // Try to constant fold each element.
1039 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
1040 if (!BV)
1041 return std::nullopt;
1042 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1043 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1044 FoldedCTLZs.emplace_back(*MaybeFold);
1045 continue;
1046 }
1047 return std::nullopt;
1048 }
1049 return FoldedCTLZs;
1050 }
1051 if (auto MaybeCst = tryFoldScalar(Src)) {
1052 FoldedCTLZs.emplace_back(*MaybeCst);
1053 return FoldedCTLZs;
1054 }
1055 return std::nullopt;
1056}
1057
1058std::optional<SmallVector<APInt>>
1059llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1060 unsigned DstScalarSizeInBits, unsigned ExtOp,
1061 const MachineRegisterInfo &MRI) {
1062 assert(ExtOp == TargetOpcode::G_SEXT || ExtOp == TargetOpcode::G_ZEXT ||
1063 ExtOp == TargetOpcode::G_ANYEXT);
1064
1065 const LLT Ty = MRI.getType(Op1);
1066
1067 auto GetICmpResultCst = [&](bool IsTrue) {
1068 if (IsTrue)
1069 return ExtOp == TargetOpcode::G_SEXT
1070 ? APInt::getAllOnes(DstScalarSizeInBits)
1071 : APInt::getOneBitSet(DstScalarSizeInBits, 0);
1072 return APInt::getZero(DstScalarSizeInBits);
1073 };
1074
1075 auto TryFoldScalar = [&](Register LHS, Register RHS) -> std::optional<APInt> {
1076 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1077 if (!RHSCst)
1078 return std::nullopt;
1079 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1080 if (!LHSCst)
1081 return std::nullopt;
1082
1083 switch (Pred) {
1085 return GetICmpResultCst(LHSCst->eq(*RHSCst));
1087 return GetICmpResultCst(LHSCst->ne(*RHSCst));
1089 return GetICmpResultCst(LHSCst->ugt(*RHSCst));
1091 return GetICmpResultCst(LHSCst->uge(*RHSCst));
1093 return GetICmpResultCst(LHSCst->ult(*RHSCst));
1095 return GetICmpResultCst(LHSCst->ule(*RHSCst));
1097 return GetICmpResultCst(LHSCst->sgt(*RHSCst));
1099 return GetICmpResultCst(LHSCst->sge(*RHSCst));
1101 return GetICmpResultCst(LHSCst->slt(*RHSCst));
1103 return GetICmpResultCst(LHSCst->sle(*RHSCst));
1104 default:
1105 return std::nullopt;
1106 }
1107 };
1108
1109 SmallVector<APInt> FoldedICmps;
1110
1111 if (Ty.isVector()) {
1112 // Try to constant fold each element.
1113 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1114 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1115 if (!BV1 || !BV2)
1116 return std::nullopt;
1117 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1118 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1119 if (auto MaybeFold =
1120 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1121 FoldedICmps.emplace_back(*MaybeFold);
1122 continue;
1123 }
1124 return std::nullopt;
1125 }
1126 return FoldedICmps;
1127 }
1128
1129 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1130 FoldedICmps.emplace_back(*MaybeCst);
1131 return FoldedICmps;
1132 }
1133
1134 return std::nullopt;
1135}
1136
1138 GISelValueTracking *VT) {
1139 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1141 if (!DefSrcReg)
1142 return false;
1143
1144 const MachineInstr &MI = *DefSrcReg->MI;
1145 const LLT Ty = MRI.getType(Reg);
1146
1147 switch (MI.getOpcode()) {
1148 case TargetOpcode::G_CONSTANT: {
1149 unsigned BitWidth = Ty.getScalarSizeInBits();
1150 const ConstantInt *CI = MI.getOperand(1).getCImm();
1151 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1152 }
1153 case TargetOpcode::G_SHL: {
1154 // A left-shift of a constant one will have exactly one bit set because
1155 // shifting the bit off the end is undefined.
1156
1157 // TODO: Constant splat
1158 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1159 if (*ConstLHS == 1)
1160 return true;
1161 }
1162
1163 break;
1164 }
1165 case TargetOpcode::G_LSHR: {
1166 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1167 if (ConstLHS->isSignMask())
1168 return true;
1169 }
1170
1171 break;
1172 }
1173 case TargetOpcode::G_BUILD_VECTOR: {
1174 // TODO: Probably should have a recursion depth guard since you could have
1175 // bitcasted vector elements.
1176 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1177 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT))
1178 return false;
1179
1180 return true;
1181 }
1182 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1183 // Only handle constants since we would need to know if number of leading
1184 // zeros is greater than the truncation amount.
1185 const unsigned BitWidth = Ty.getScalarSizeInBits();
1186 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1187 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1188 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1189 return false;
1190 }
1191
1192 return true;
1193 }
1194 default:
1195 break;
1196 }
1197
1198 if (!VT)
1199 return false;
1200
1201 // More could be done here, though the above checks are enough
1202 // to handle some common cases.
1203
1204 // Fall back to computeKnownBits to catch other known cases.
1205 KnownBits Known = VT->getKnownBits(Reg);
1206 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1207}
1208
1212
1213LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1214 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1215 return OrigTy;
1216
1217 if (OrigTy.isVector() && TargetTy.isVector()) {
1218 LLT OrigElt = OrigTy.getElementType();
1219 LLT TargetElt = TargetTy.getElementType();
1220
1221 // TODO: The docstring for this function says the intention is to use this
1222 // function to build MERGE/UNMERGE instructions. It won't be the case that
1223 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1224 // could implement getLCMType between the two in the future if there was a
1225 // need, but it is not worth it now as this function should not be used in
1226 // that way.
1227 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1228 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1229 "getLCMType not implemented between fixed and scalable vectors.");
1230
1231 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1232 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1233 TargetTy.getElementCount().getKnownMinValue());
1234 // Prefer the original element type.
1236 TargetTy.getElementCount().getKnownMinValue());
1237 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1238 OrigTy.getElementType());
1239 }
1240 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1241 TargetTy.getSizeInBits().getKnownMinValue());
1242 return LLT::vector(
1243 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1244 OrigElt);
1245 }
1246
1247 // One type is scalar, one type is vector
1248 if (OrigTy.isVector() || TargetTy.isVector()) {
1249 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1250 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1251 LLT EltTy = VecTy.getElementType();
1252 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1253
1254 // Prefer scalar type from OrigTy.
1255 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1256 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1257
1258 // Different size scalars. Create vector with the same total size.
1259 // LCM will take fixed/scalable from VecTy.
1260 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1262 ScalarTy.getSizeInBits().getFixedValue());
1263 // Prefer type from OrigTy
1264 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1265 VecTy.getElementCount().isScalable()),
1266 OrigEltTy);
1267 }
1268
1269 // At this point, both types are scalars of different size
1270 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1271 TargetTy.getSizeInBits().getFixedValue());
1272 // Preserve pointer types.
1273 if (LCM == OrigTy.getSizeInBits())
1274 return OrigTy;
1275 if (LCM == TargetTy.getSizeInBits())
1276 return TargetTy;
1277 return LLT::scalar(LCM);
1278}
1279
1280LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1281
1282 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1283 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1285 "getCoverTy not implemented between fixed and scalable vectors.");
1286
1287 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1288 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1289 return getLCMType(OrigTy, TargetTy);
1290
1291 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1292 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1293 if (OrigTyNumElts % TargetTyNumElts == 0)
1294 return OrigTy;
1295
1296 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1298 OrigTy.getElementType());
1299}
1300
1301LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1302 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1303 return OrigTy;
1304
1305 if (OrigTy.isVector() && TargetTy.isVector()) {
1306 LLT OrigElt = OrigTy.getElementType();
1307
1308 // TODO: The docstring for this function says the intention is to use this
1309 // function to build MERGE/UNMERGE instructions. It won't be the case that
1310 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1311 // could implement getGCDType between the two in the future if there was a
1312 // need, but it is not worth it now as this function should not be used in
1313 // that way.
1314 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1315 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1316 "getGCDType not implemented between fixed and scalable vectors.");
1317
1318 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1319 TargetTy.getSizeInBits().getKnownMinValue());
1320 if (GCD == OrigElt.getSizeInBits())
1322 OrigElt);
1323
1324 // Cannot produce original element type, but both have vscale in common.
1325 if (GCD < OrigElt.getSizeInBits())
1327 GCD);
1328
1329 return LLT::vector(
1331 OrigTy.isScalable()),
1332 OrigElt);
1333 }
1334
1335 // If one type is vector and the element size matches the scalar size, then
1336 // the gcd is the scalar type.
1337 if (OrigTy.isVector() &&
1338 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1339 return OrigTy.getElementType();
1340 if (TargetTy.isVector() &&
1341 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1342 return OrigTy;
1343
1344 // At this point, both types are either scalars of different type or one is a
1345 // vector and one is a scalar. If both types are scalars, the GCD type is the
1346 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1347 // the GCD type is the GCD between the scalar and the vector element size.
1348 LLT OrigScalar = OrigTy.getScalarType();
1349 LLT TargetScalar = TargetTy.getScalarType();
1350 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1351 TargetScalar.getSizeInBits().getFixedValue());
1352 return LLT::scalar(GCD);
1353}
1354
1356 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1357 "Only G_SHUFFLE_VECTOR can have a splat index!");
1358 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1359 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1360
1361 // If all elements are undefined, this shuffle can be considered a splat.
1362 // Return 0 for better potential for callers to simplify.
1363 if (FirstDefinedIdx == Mask.end())
1364 return 0;
1365
1366 // Make sure all remaining elements are either undef or the same
1367 // as the first non-undef value.
1368 int SplatValue = *FirstDefinedIdx;
1369 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1370 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1371 return std::nullopt;
1372
1373 return SplatValue;
1374}
1375
1376static bool isBuildVectorOp(unsigned Opcode) {
1377 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1378 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1379}
1380
1381namespace {
1382
1383std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1384 const MachineRegisterInfo &MRI,
1385 bool AllowUndef) {
1386 MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1387 if (!MI)
1388 return std::nullopt;
1389
1390 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1391 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1392 return std::nullopt;
1393
1394 std::optional<ValueAndVReg> SplatValAndReg;
1395 for (MachineOperand &Op : MI->uses()) {
1396 Register Element = Op.getReg();
1397 // If we have a G_CONCAT_VECTOR, we recursively look into the
1398 // vectors that we're concatenating to see if they're splats.
1399 auto ElementValAndReg =
1400 isConcatVectorsOp
1401 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1403
1404 // If AllowUndef, treat undef as value that will result in a constant splat.
1405 if (!ElementValAndReg) {
1406 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1407 continue;
1408 return std::nullopt;
1409 }
1410
1411 // Record splat value
1412 if (!SplatValAndReg)
1413 SplatValAndReg = ElementValAndReg;
1414
1415 // Different constant than the one already recorded, not a constant splat.
1416 if (SplatValAndReg->Value != ElementValAndReg->Value)
1417 return std::nullopt;
1418 }
1419
1420 return SplatValAndReg;
1421}
1422
1423} // end anonymous namespace
1424
1426 const MachineRegisterInfo &MRI,
1427 int64_t SplatValue, bool AllowUndef) {
1428 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1429 return SplatValAndReg->Value.getSExtValue() == SplatValue;
1430
1431 return false;
1432}
1433
1435 const MachineRegisterInfo &MRI,
1436 const APInt &SplatValue,
1437 bool AllowUndef) {
1438 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef)) {
1439 if (SplatValAndReg->Value.getBitWidth() < SplatValue.getBitWidth())
1440 return APInt::isSameValue(
1441 SplatValAndReg->Value.sext(SplatValue.getBitWidth()), SplatValue);
1442 return APInt::isSameValue(
1443 SplatValAndReg->Value,
1444 SplatValue.sext(SplatValAndReg->Value.getBitWidth()));
1445 }
1446
1447 return false;
1448}
1449
1451 const MachineRegisterInfo &MRI,
1452 int64_t SplatValue, bool AllowUndef) {
1453 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1454 AllowUndef);
1455}
1456
1458 const MachineRegisterInfo &MRI,
1459 const APInt &SplatValue,
1460 bool AllowUndef) {
1461 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1462 AllowUndef);
1463}
1464
1465std::optional<APInt>
1467 if (auto SplatValAndReg =
1468 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1469 if (std::optional<ValueAndVReg> ValAndVReg =
1470 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1471 return ValAndVReg->Value;
1472 }
1473
1474 return std::nullopt;
1475}
1476
1477std::optional<APInt>
1479 const MachineRegisterInfo &MRI) {
1480 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1481}
1482
1483std::optional<int64_t>
1485 const MachineRegisterInfo &MRI) {
1486 if (auto SplatValAndReg =
1487 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1488 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1489 return std::nullopt;
1490}
1491
1492std::optional<int64_t>
1494 const MachineRegisterInfo &MRI) {
1495 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1496}
1497
1498std::optional<FPValueAndVReg>
1500 bool AllowUndef) {
1501 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1502 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1503 return std::nullopt;
1504}
1505
1507 const MachineRegisterInfo &MRI,
1508 bool AllowUndef) {
1509 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1510}
1511
1513 const MachineRegisterInfo &MRI,
1514 bool AllowUndef) {
1515 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1516}
1517
1518std::optional<RegOrConstant>
1520 unsigned Opc = MI.getOpcode();
1521 if (!isBuildVectorOp(Opc))
1522 return std::nullopt;
1523 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1524 return RegOrConstant(*Splat);
1525 auto Reg = MI.getOperand(1).getReg();
1526 if (any_of(drop_begin(MI.operands(), 2),
1527 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1528 return std::nullopt;
1529 return RegOrConstant(Reg);
1530}
1531
1533 const MachineRegisterInfo &MRI,
1534 bool AllowFP = true,
1535 bool AllowOpaqueConstants = true) {
1536 switch (MI.getOpcode()) {
1537 case TargetOpcode::G_CONSTANT:
1538 case TargetOpcode::G_IMPLICIT_DEF:
1539 return true;
1540 case TargetOpcode::G_FCONSTANT:
1541 return AllowFP;
1542 case TargetOpcode::G_GLOBAL_VALUE:
1543 case TargetOpcode::G_FRAME_INDEX:
1544 case TargetOpcode::G_BLOCK_ADDR:
1545 case TargetOpcode::G_JUMP_TABLE:
1546 return AllowOpaqueConstants;
1547 default:
1548 return false;
1549 }
1550}
1551
1553 const MachineRegisterInfo &MRI) {
1554 Register Def = MI.getOperand(0).getReg();
1555 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1556 return true;
1558 if (!BV)
1559 return false;
1560 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1561 if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1562 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1563 continue;
1564 return false;
1565 }
1566 return true;
1567}
1568
1570 const MachineRegisterInfo &MRI,
1571 bool AllowFP, bool AllowOpaqueConstants) {
1572 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1573 return true;
1574
1575 if (!isBuildVectorOp(MI.getOpcode()))
1576 return false;
1577
1578 const unsigned NumOps = MI.getNumOperands();
1579 for (unsigned I = 1; I != NumOps; ++I) {
1580 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1581 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1582 return false;
1583 }
1584
1585 return true;
1586}
1587
1588std::optional<APInt>
1590 const MachineRegisterInfo &MRI) {
1591 Register Def = MI.getOperand(0).getReg();
1592 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1593 return C->Value;
1594 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1595 if (!MaybeCst)
1596 return std::nullopt;
1597 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1598 return APInt(ScalarSize, *MaybeCst, true);
1599}
1600
1601std::optional<APFloat>
1603 const MachineRegisterInfo &MRI) {
1604 Register Def = MI.getOperand(0).getReg();
1605 if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1606 return FpConst->Value;
1607 auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1608 if (!MaybeCstFP)
1609 return std::nullopt;
1610 return MaybeCstFP->Value;
1611}
1612
1614 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1615 switch (MI.getOpcode()) {
1616 case TargetOpcode::G_IMPLICIT_DEF:
1617 return AllowUndefs;
1618 case TargetOpcode::G_CONSTANT:
1619 return MI.getOperand(1).getCImm()->isNullValue();
1620 case TargetOpcode::G_FCONSTANT: {
1621 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1622 return FPImm->isZero() && !FPImm->isNegative();
1623 }
1624 default:
1625 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1626 return false;
1627 return isBuildVectorAllZeros(MI, MRI);
1628 }
1629}
1630
1632 const MachineRegisterInfo &MRI,
1633 bool AllowUndefs) {
1634 switch (MI.getOpcode()) {
1635 case TargetOpcode::G_IMPLICIT_DEF:
1636 return AllowUndefs;
1637 case TargetOpcode::G_CONSTANT:
1638 return MI.getOperand(1).getCImm()->isAllOnesValue();
1639 default:
1640 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1641 return false;
1642 return isBuildVectorAllOnes(MI, MRI);
1643 }
1644}
1645
1647 const MachineRegisterInfo &MRI, Register Reg,
1648 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1649
1650 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1651 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1652 return Match(nullptr);
1653
1654 // TODO: Also handle fconstant
1655 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1656 return Match(Def->getOperand(1).getCImm());
1657
1658 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1659 return false;
1660
1661 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1662 Register SrcElt = Def->getOperand(I).getReg();
1663 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1664 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1665 if (!Match(nullptr))
1666 return false;
1667 continue;
1668 }
1669
1670 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1671 !Match(SrcDef->getOperand(1).getCImm()))
1672 return false;
1673 }
1674
1675 return true;
1676}
1677
1678bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1679 bool IsFP) {
1680 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1682 return Val & 0x1;
1684 return Val == 1;
1686 return Val == -1;
1687 }
1688 llvm_unreachable("Invalid boolean contents");
1689}
1690
1691bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1692 bool IsVector, bool IsFP) {
1693 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1695 return ~Val & 0x1;
1698 return Val == 0;
1699 }
1700 llvm_unreachable("Invalid boolean contents");
1701}
1702
1703int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1704 bool IsFP) {
1705 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1708 return 1;
1710 return -1;
1711 }
1712 llvm_unreachable("Invalid boolean contents");
1713}
1714
1716 LostDebugLocObserver *LocObserver,
1717 SmallInstListTy &DeadInstChain) {
1718 for (MachineOperand &Op : MI.uses()) {
1719 if (Op.isReg() && Op.getReg().isVirtual())
1720 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1721 }
1722 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1723 DeadInstChain.remove(&MI);
1724 MI.eraseFromParent();
1725 if (LocObserver)
1726 LocObserver->checkpoint(false);
1727}
1728
1731 LostDebugLocObserver *LocObserver) {
1732 SmallInstListTy DeadInstChain;
1733 for (MachineInstr *MI : DeadInstrs)
1734 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1735
1736 while (!DeadInstChain.empty()) {
1737 MachineInstr *Inst = DeadInstChain.pop_back_val();
1738 if (!isTriviallyDead(*Inst, MRI))
1739 continue;
1740 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1741 }
1742}
1743
1745 LostDebugLocObserver *LocObserver) {
1746 return eraseInstrs({&MI}, MRI, LocObserver);
1747}
1748
1750 for (auto &Def : MI.defs()) {
1751 assert(Def.isReg() && "Must be a reg");
1752
1754 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1755 MachineInstr *DbgValue = MOUse.getParent();
1756 // Ignore partially formed DBG_VALUEs.
1757 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1758 DbgUsers.push_back(&MOUse);
1759 }
1760 }
1761
1762 if (!DbgUsers.empty()) {
1763 salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1764 }
1765 }
1766}
1767
1769 switch (Opc) {
1770 case TargetOpcode::G_FABS:
1771 case TargetOpcode::G_FADD:
1772 case TargetOpcode::G_FCANONICALIZE:
1773 case TargetOpcode::G_FCEIL:
1774 case TargetOpcode::G_FCONSTANT:
1775 case TargetOpcode::G_FCOPYSIGN:
1776 case TargetOpcode::G_FCOS:
1777 case TargetOpcode::G_FDIV:
1778 case TargetOpcode::G_FEXP2:
1779 case TargetOpcode::G_FEXP:
1780 case TargetOpcode::G_FFLOOR:
1781 case TargetOpcode::G_FLOG10:
1782 case TargetOpcode::G_FLOG2:
1783 case TargetOpcode::G_FLOG:
1784 case TargetOpcode::G_FMA:
1785 case TargetOpcode::G_FMAD:
1786 case TargetOpcode::G_FMAXIMUM:
1787 case TargetOpcode::G_FMAXIMUMNUM:
1788 case TargetOpcode::G_FMAXNUM:
1789 case TargetOpcode::G_FMAXNUM_IEEE:
1790 case TargetOpcode::G_FMINIMUM:
1791 case TargetOpcode::G_FMINIMUMNUM:
1792 case TargetOpcode::G_FMINNUM:
1793 case TargetOpcode::G_FMINNUM_IEEE:
1794 case TargetOpcode::G_FMUL:
1795 case TargetOpcode::G_FNEARBYINT:
1796 case TargetOpcode::G_FNEG:
1797 case TargetOpcode::G_FPEXT:
1798 case TargetOpcode::G_FPOW:
1799 case TargetOpcode::G_FPTRUNC:
1800 case TargetOpcode::G_FREM:
1801 case TargetOpcode::G_FRINT:
1802 case TargetOpcode::G_FSIN:
1803 case TargetOpcode::G_FTAN:
1804 case TargetOpcode::G_FACOS:
1805 case TargetOpcode::G_FASIN:
1806 case TargetOpcode::G_FATAN:
1807 case TargetOpcode::G_FATAN2:
1808 case TargetOpcode::G_FCOSH:
1809 case TargetOpcode::G_FSINH:
1810 case TargetOpcode::G_FTANH:
1811 case TargetOpcode::G_FSQRT:
1812 case TargetOpcode::G_FSUB:
1813 case TargetOpcode::G_INTRINSIC_ROUND:
1814 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1815 case TargetOpcode::G_INTRINSIC_TRUNC:
1816 return true;
1817 default:
1818 return false;
1819 }
1820}
1821
1822/// Shifts return poison if shiftwidth is larger than the bitwidth.
1823static bool shiftAmountKnownInRange(Register ShiftAmount,
1824 const MachineRegisterInfo &MRI) {
1825 LLT Ty = MRI.getType(ShiftAmount);
1826
1827 if (Ty.isScalableVector())
1828 return false; // Can't tell, just return false to be safe
1829
1830 if (Ty.isScalar()) {
1831 std::optional<ValueAndVReg> Val =
1832 getIConstantVRegValWithLookThrough(ShiftAmount, MRI);
1833 if (!Val)
1834 return false;
1835 return Val->Value.ult(Ty.getScalarSizeInBits());
1836 }
1837
1838 GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1839 if (!BV)
1840 return false;
1841
1842 unsigned Sources = BV->getNumSources();
1843 for (unsigned I = 0; I < Sources; ++I) {
1844 std::optional<ValueAndVReg> Val =
1846 if (!Val)
1847 return false;
1848 if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1849 return false;
1850 }
1851
1852 return true;
1853}
1854
1855namespace {
1856enum class UndefPoisonKind {
1857 PoisonOnly = (1 << 0),
1858 UndefOnly = (1 << 1),
1860};
1861}
1862
1864 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1865}
1866
1868 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1869}
1870
1872 bool ConsiderFlagsAndMetadata,
1873 UndefPoisonKind Kind) {
1874 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1875
1876 if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1877 if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1878 if (GMI->hasPoisonGeneratingFlags())
1879 return true;
1880
1881 // Check whether opcode is a poison/undef-generating operation.
1882 switch (RegDef->getOpcode()) {
1883 case TargetOpcode::G_BUILD_VECTOR:
1884 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1885 return false;
1886 case TargetOpcode::G_SHL:
1887 case TargetOpcode::G_ASHR:
1888 case TargetOpcode::G_LSHR:
1889 return includesPoison(Kind) &&
1890 !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1891 case TargetOpcode::G_FPTOSI:
1892 case TargetOpcode::G_FPTOUI:
1893 // fptosi/ui yields poison if the resulting value does not fit in the
1894 // destination type.
1895 return true;
1896 case TargetOpcode::G_CTLZ:
1897 case TargetOpcode::G_CTTZ:
1898 case TargetOpcode::G_CTLS:
1899 case TargetOpcode::G_ABS:
1900 case TargetOpcode::G_CTPOP:
1901 case TargetOpcode::G_BSWAP:
1902 case TargetOpcode::G_BITREVERSE:
1903 case TargetOpcode::G_FSHL:
1904 case TargetOpcode::G_FSHR:
1905 case TargetOpcode::G_SMAX:
1906 case TargetOpcode::G_SMIN:
1907 case TargetOpcode::G_SCMP:
1908 case TargetOpcode::G_UMAX:
1909 case TargetOpcode::G_UMIN:
1910 case TargetOpcode::G_UCMP:
1911 case TargetOpcode::G_PTRMASK:
1912 case TargetOpcode::G_SADDO:
1913 case TargetOpcode::G_SSUBO:
1914 case TargetOpcode::G_UADDO:
1915 case TargetOpcode::G_USUBO:
1916 case TargetOpcode::G_SMULO:
1917 case TargetOpcode::G_UMULO:
1918 case TargetOpcode::G_SADDSAT:
1919 case TargetOpcode::G_UADDSAT:
1920 case TargetOpcode::G_SSUBSAT:
1921 case TargetOpcode::G_USUBSAT:
1922 case TargetOpcode::G_SBFX:
1923 case TargetOpcode::G_UBFX:
1924 return false;
1925 case TargetOpcode::G_SSHLSAT:
1926 case TargetOpcode::G_USHLSAT:
1927 return includesPoison(Kind) &&
1928 !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1929 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1931 if (includesPoison(Kind)) {
1932 std::optional<ValueAndVReg> Index =
1933 getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1934 if (!Index)
1935 return true;
1936 LLT VecTy = MRI.getType(Insert->getVectorReg());
1937 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1938 }
1939 return false;
1940 }
1941 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1943 if (includesPoison(Kind)) {
1944 std::optional<ValueAndVReg> Index =
1946 if (!Index)
1947 return true;
1948 LLT VecTy = MRI.getType(Extract->getVectorReg());
1949 return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1950 }
1951 return false;
1952 }
1953 case TargetOpcode::G_SHUFFLE_VECTOR: {
1954 GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1955 ArrayRef<int> Mask = Shuffle->getMask();
1956 return includesPoison(Kind) && is_contained(Mask, -1);
1957 }
1958 case TargetOpcode::G_FNEG:
1959 case TargetOpcode::G_PHI:
1960 case TargetOpcode::G_SELECT:
1961 case TargetOpcode::G_UREM:
1962 case TargetOpcode::G_SREM:
1963 case TargetOpcode::G_FREEZE:
1964 case TargetOpcode::G_ICMP:
1965 case TargetOpcode::G_FCMP:
1966 case TargetOpcode::G_FADD:
1967 case TargetOpcode::G_FSUB:
1968 case TargetOpcode::G_FMUL:
1969 case TargetOpcode::G_FDIV:
1970 case TargetOpcode::G_FREM:
1971 case TargetOpcode::G_PTR_ADD:
1972 return false;
1973 default:
1974 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1975 }
1976}
1977
1979 const MachineRegisterInfo &MRI,
1980 unsigned Depth,
1981 UndefPoisonKind Kind) {
1983 return false;
1984
1985 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1986
1987 switch (RegDef->getOpcode()) {
1988 case TargetOpcode::G_FREEZE:
1989 return true;
1990 case TargetOpcode::G_IMPLICIT_DEF:
1991 return !includesUndef(Kind);
1992 case TargetOpcode::G_CONSTANT:
1993 case TargetOpcode::G_FCONSTANT:
1994 return true;
1995 case TargetOpcode::G_BUILD_VECTOR: {
1996 GBuildVector *BV = cast<GBuildVector>(RegDef);
1997 unsigned NumSources = BV->getNumSources();
1998 for (unsigned I = 0; I < NumSources; ++I)
2000 Depth + 1, Kind))
2001 return false;
2002 return true;
2003 }
2004 case TargetOpcode::G_PHI: {
2005 GPhi *Phi = cast<GPhi>(RegDef);
2006 unsigned NumIncoming = Phi->getNumIncomingValues();
2007 for (unsigned I = 0; I < NumIncoming; ++I)
2008 if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
2009 Depth + 1, Kind))
2010 return false;
2011 return true;
2012 }
2013 default: {
2014 auto MOCheck = [&](const MachineOperand &MO) {
2015 if (!MO.isReg())
2016 return true;
2017 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
2018 Kind);
2019 };
2020 return !::canCreateUndefOrPoison(Reg, MRI,
2021 /*ConsiderFlagsAndMetadata=*/true, Kind) &&
2022 all_of(RegDef->uses(), MOCheck);
2023 }
2024 }
2025}
2026
2028 bool ConsiderFlagsAndMetadata) {
2029 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2031}
2032
2034 bool ConsiderFlagsAndMetadata = true) {
2035 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
2037}
2038
2040 const MachineRegisterInfo &MRI,
2041 unsigned Depth) {
2042 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2044}
2045
2047 const MachineRegisterInfo &MRI,
2048 unsigned Depth) {
2049 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2051}
2052
2054 const MachineRegisterInfo &MRI,
2055 unsigned Depth) {
2056 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
2058}
2059
2061 if (Ty.isVector())
2062 return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
2063 Ty.getElementCount());
2064 return IntegerType::get(C, Ty.getSizeInBits());
2065}
2066
2068 switch (MI.getOpcode()) {
2069 default:
2070 return false;
2071 case TargetOpcode::G_ASSERT_ALIGN:
2072 case TargetOpcode::G_ASSERT_SEXT:
2073 case TargetOpcode::G_ASSERT_ZEXT:
2074 return true;
2075 }
2076}
2077
2079 assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
2080
2081 return Value;
2082}
2083
2084std::optional<GIConstant>
2087
2089 std::optional<ValueAndVReg> MayBeConstant =
2090 getIConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
2091 if (!MayBeConstant)
2092 return std::nullopt;
2093 return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
2094 }
2095
2097 SmallVector<APInt> Values;
2098 unsigned NumSources = Build->getNumSources();
2099 for (unsigned I = 0; I < NumSources; ++I) {
2100 Register SrcReg = Build->getSourceReg(I);
2101 std::optional<ValueAndVReg> MayBeConstant =
2103 if (!MayBeConstant)
2104 return std::nullopt;
2105 Values.push_back(MayBeConstant->Value);
2106 }
2107 return GIConstant(Values);
2108 }
2109
2110 std::optional<ValueAndVReg> MayBeConstant =
2112 if (!MayBeConstant)
2113 return std::nullopt;
2114
2115 return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2116}
2117
2119 assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2120
2121 return Values[0];
2122}
2123
2124std::optional<GFConstant>
2127
2129 std::optional<FPValueAndVReg> MayBeConstant =
2130 getFConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
2131 if (!MayBeConstant)
2132 return std::nullopt;
2133 return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2134 }
2135
2137 SmallVector<APFloat> Values;
2138 unsigned NumSources = Build->getNumSources();
2139 for (unsigned I = 0; I < NumSources; ++I) {
2140 Register SrcReg = Build->getSourceReg(I);
2141 std::optional<FPValueAndVReg> MayBeConstant =
2143 if (!MayBeConstant)
2144 return std::nullopt;
2145 Values.push_back(MayBeConstant->Value);
2146 }
2147 return GFConstant(Values);
2148 }
2149
2150 std::optional<FPValueAndVReg> MayBeConstant =
2152 if (!MayBeConstant)
2153 return std::nullopt;
2154
2155 return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2156}
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition Utils.cpp:233
static bool includesPoison(UndefPoisonKind Kind)
Definition Utils.cpp:1863
static bool includesUndef(UndefPoisonKind Kind)
Definition Utils.cpp:1867
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
Definition Utils.cpp:1823
static bool isBuildVectorOp(unsigned Opcode)
Definition Utils.cpp:1376
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition Utils.cpp:1532
static GBuildVector * getBuildVectorLikeDef(Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:797
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Class recording the (high level) value of a variable.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1263
void copySign(const APFloat &RHS)
Definition APFloat.h:1357
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:5890
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1245
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1236
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1402
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1254
APInt bitcastToAPInt() const
Definition APFloat.h:1408
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1281
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1604
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1043
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1064
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:956
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1697
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1675
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1072
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
Definition APInt.h:555
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1776
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1016
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
bool isNegative() const
Return true if the sign bit is set.
Definition Constants.h:470
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:467
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
Represents a G_BUILD_VECTOR.
Represents an extract vector element.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2125
GFConstant(ArrayRef< APFloat > Values)
Definition Utils.h:698
LLVM_ABI APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2118
LLVM_ABI APInt getScalarValue() const
Returns the value, if this constant is a scalar.
Definition Utils.cpp:2078
static LLVM_ABI std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Definition Utils.cpp:2085
GIConstant(ArrayRef< APInt > Values)
Definition Utils.h:657
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_PHI.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
LLT getScalarType() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
mop_range uses()
Returns all operands which may be register uses.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
LLVM_ABI Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
static def_iterator def_end()
iterator_range< use_iterator > use_operands(Register Reg) const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represents a value which can be a Register or a constant.
Definition Utils.h:402
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetOptions Options
GlobalISelAbortMode GlobalISelAbort
EnableGlobalISelAbort - Control abort behaviour when global instruction selection fails to lower/sele...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM Value Representation.
Definition Value.h:75
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2266
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2271
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2276
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2281
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition Utils.cpp:940
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, unsigned DstScalarSizeInBits, unsigned ExtOp, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1059
@ Offset
Definition DWP.cpp:532
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1506
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
Definition Utils.cpp:2060
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition Utils.cpp:652
LLVM_ABI const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:460
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:293
LLVM_ABI std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1014
LLVM_ABI std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1466
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1631
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
LLVM_ABI std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:740
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1749
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition Utils.cpp:1027
LLVM_ABI std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition Utils.cpp:973
LLVM_ABI std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1519
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1710
GISelWorkList< 4 > SmallInstListTy
Definition Utils.h:577
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition Utils.cpp:1589
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1613
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition Utils.cpp:493
LLVM_ABI bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition Utils.cpp:1646
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI void reportGISelWarning(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:251
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1678
LLVM_ABI LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition Utils.cpp:1213
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:313
LLVM_ABI std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition Utils.cpp:671
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:304
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1665
LLVM_ABI bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition Utils.cpp:1569
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition Utils.cpp:199
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
Definition APFloat.h:1696
LLVM_ABI void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition Utils.cpp:1715
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:257
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition Utils.cpp:438
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition Utils.cpp:1512
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition Utils.cpp:818
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition Utils.cpp:1499
LLVM_ABI std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition Utils.cpp:990
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition Utils.cpp:507
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1209
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition Utils.cpp:1280
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1646
LLVM_ABI unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)
Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.
Definition Utils.cpp:278
@ Mul
Product of integers.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition Utils.cpp:446
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1691
LLVM_ABI std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a float constant integer or a splat vector of float constant integers.
Definition Utils.cpp:1602
constexpr unsigned BitWidth
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:658
LLVM_ABI bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition Utils.cpp:1425
LLVM_ABI void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1744
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
LLVM_ABI Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition Utils.cpp:46
LLVM_ABI int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition Utils.cpp:1703
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:432
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1768
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition Utils.h:347
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition Utils.cpp:468
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition Utils.cpp:1729
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition Utils.cpp:500
LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition Utils.cpp:1301
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1683
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
Definition APFloat.h:1723
LLVM_ABI std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition Utils.cpp:1484
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
Definition Utils.cpp:2067
LLVM_ABI void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition Utils.cpp:610
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition Utils.cpp:220
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition Utils.cpp:923
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
#define MORE()
Definition regcomp.c:246
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition Utils.h:229
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:305
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:302
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition Utils.h:188