Bug Summary

File:llvm/lib/CodeGen/GlobalISel/Utils.cpp
Warning:line 785, column 32
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Utils.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/CodeGen/GlobalISel -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/CodeGen/GlobalISel -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-21-164211-33944-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp

1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/Utils.h"
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
15#include "llvm/ADT/Optional.h"
16#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20#include "llvm/CodeGen/MachineInstr.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23#include "llvm/CodeGen/MachineSizeOpts.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/StackProtector.h"
26#include "llvm/CodeGen/TargetInstrInfo.h"
27#include "llvm/CodeGen/TargetLowering.h"
28#include "llvm/CodeGen/TargetPassConfig.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/Target/TargetMachine.h"
32
33#define DEBUG_TYPE"globalisel-utils" "globalisel-utils"
34
35using namespace llvm;
36using namespace MIPatternMatch;
37
38Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
39 const TargetInstrInfo &TII,
40 const RegisterBankInfo &RBI, Register Reg,
41 const TargetRegisterClass &RegClass) {
42 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
43 return MRI.createVirtualRegister(&RegClass);
44
45 return Reg;
46}
47
48Register llvm::constrainOperandRegClass(
49 const MachineFunction &MF, const TargetRegisterInfo &TRI,
50 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
51 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
52 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
53 Register Reg = RegMO.getReg();
54 // Assume physical registers are properly constrained.
55 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented")(static_cast <bool> (Register::isVirtualRegister(Reg) &&
"PhysReg not implemented") ? void (0) : __assert_fail ("Register::isVirtualRegister(Reg) && \"PhysReg not implemented\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 55, __extension__ __PRETTY_FUNCTION__))
;
56
57 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
58 // If we created a new virtual register because the class is not compatible
59 // then create a copy between the new and the old register.
60 if (ConstrainedReg != Reg) {
61 MachineBasicBlock::iterator InsertIt(&InsertPt);
62 MachineBasicBlock &MBB = *InsertPt.getParent();
63 if (RegMO.isUse()) {
64 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
65 TII.get(TargetOpcode::COPY), ConstrainedReg)
66 .addReg(Reg);
67 } else {
68 assert(RegMO.isDef() && "Must be a definition")(static_cast <bool> (RegMO.isDef() && "Must be a definition"
) ? void (0) : __assert_fail ("RegMO.isDef() && \"Must be a definition\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 68, __extension__ __PRETTY_FUNCTION__))
;
69 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
70 TII.get(TargetOpcode::COPY), Reg)
71 .addReg(ConstrainedReg);
72 }
73 if (GISelChangeObserver *Observer = MF.getObserver()) {
74 Observer->changingInstr(*RegMO.getParent());
75 }
76 RegMO.setReg(ConstrainedReg);
77 if (GISelChangeObserver *Observer = MF.getObserver()) {
78 Observer->changedInstr(*RegMO.getParent());
79 }
80 } else {
81 if (GISelChangeObserver *Observer = MF.getObserver()) {
82 if (!RegMO.isDef()) {
83 MachineInstr *RegDef = MRI.getVRegDef(Reg);
84 Observer->changedInstr(*RegDef);
85 }
86 Observer->changingAllUsesOfReg(MRI, Reg);
87 Observer->finishedChangingAllUsesOfReg();
88 }
89 }
90 return ConstrainedReg;
91}
92
93Register llvm::constrainOperandRegClass(
94 const MachineFunction &MF, const TargetRegisterInfo &TRI,
95 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
96 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
97 MachineOperand &RegMO, unsigned OpIdx) {
98 Register Reg = RegMO.getReg();
99 // Assume physical registers are properly constrained.
100 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented")(static_cast <bool> (Register::isVirtualRegister(Reg) &&
"PhysReg not implemented") ? void (0) : __assert_fail ("Register::isVirtualRegister(Reg) && \"PhysReg not implemented\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 100, __extension__ __PRETTY_FUNCTION__))
;
101
102 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
103 // Some of the target independent instructions, like COPY, may not impose any
104 // register class constraints on some of their operands: If it's a use, we can
105 // skip constraining as the instruction defining the register would constrain
106 // it.
107
108 // We can't constrain unallocatable register classes, because we can't create
109 // virtual registers for these classes, so we need to let targets handled this
110 // case.
111 if (RegClass && !RegClass->isAllocatable())
112 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
113
114 if (!RegClass) {
115 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&(static_cast <bool> ((!isTargetSpecificOpcode(II.getOpcode
()) || RegMO.isUse()) && "Register class constraint is required unless either the "
"instruction is target independent or the operand is a use")
? void (0) : __assert_fail ("(!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && \"Register class constraint is required unless either the \" \"instruction is target independent or the operand is a use\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
116 "Register class constraint is required unless either the "(static_cast <bool> ((!isTargetSpecificOpcode(II.getOpcode
()) || RegMO.isUse()) && "Register class constraint is required unless either the "
"instruction is target independent or the operand is a use")
? void (0) : __assert_fail ("(!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && \"Register class constraint is required unless either the \" \"instruction is target independent or the operand is a use\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
117 "instruction is target independent or the operand is a use")(static_cast <bool> ((!isTargetSpecificOpcode(II.getOpcode
()) || RegMO.isUse()) && "Register class constraint is required unless either the "
"instruction is target independent or the operand is a use")
? void (0) : __assert_fail ("(!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && \"Register class constraint is required unless either the \" \"instruction is target independent or the operand is a use\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 117, __extension__ __PRETTY_FUNCTION__))
;
118 // FIXME: Just bailing out like this here could be not enough, unless we
119 // expect the users of this function to do the right thing for PHIs and
120 // COPY:
121 // v1 = COPY v0
122 // v2 = COPY v1
123 // v1 here may end up not being constrained at all. Please notice that to
124 // reproduce the issue we likely need a destination pattern of a selection
125 // rule producing such extra copies, not just an input GMIR with them as
126 // every existing target using selectImpl handles copies before calling it
127 // and they never reach this function.
128 return Reg;
129 }
130 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
131 RegMO);
132}
133
134bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
135 const TargetInstrInfo &TII,
136 const TargetRegisterInfo &TRI,
137 const RegisterBankInfo &RBI) {
138 assert(!isPreISelGenericOpcode(I.getOpcode()) &&(static_cast <bool> (!isPreISelGenericOpcode(I.getOpcode
()) && "A selected instruction is expected") ? void (
0) : __assert_fail ("!isPreISelGenericOpcode(I.getOpcode()) && \"A selected instruction is expected\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
139 "A selected instruction is expected")(static_cast <bool> (!isPreISelGenericOpcode(I.getOpcode
()) && "A selected instruction is expected") ? void (
0) : __assert_fail ("!isPreISelGenericOpcode(I.getOpcode()) && \"A selected instruction is expected\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
;
140 MachineBasicBlock &MBB = *I.getParent();
141 MachineFunction &MF = *MBB.getParent();
142 MachineRegisterInfo &MRI = MF.getRegInfo();
143
144 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
145 MachineOperand &MO = I.getOperand(OpI);
146
147 // There's nothing to be done on non-register operands.
148 if (!MO.isReg())
149 continue;
150
151 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("globalisel-utils")) { dbgs() << "Converting operand: "
<< MO << '\n'; } } while (false)
;
152 assert(MO.isReg() && "Unsupported non-reg operand")(static_cast <bool> (MO.isReg() && "Unsupported non-reg operand"
) ? void (0) : __assert_fail ("MO.isReg() && \"Unsupported non-reg operand\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 152, __extension__ __PRETTY_FUNCTION__))
;
153
154 Register Reg = MO.getReg();
155 // Physical registers don't need to be constrained.
156 if (Register::isPhysicalRegister(Reg))
157 continue;
158
159 // Register operands with a value of 0 (e.g. predicate operands) don't need
160 // to be constrained.
161 if (Reg == 0)
162 continue;
163
164 // If the operand is a vreg, we should constrain its regclass, and only
165 // insert COPYs if that's impossible.
166 // constrainOperandRegClass does that for us.
167 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
168
169 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
170 // done.
171 if (MO.isUse()) {
172 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
173 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
174 I.tieOperands(DefIdx, OpI);
175 }
176 }
177 return true;
178}
179
180bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
181 MachineRegisterInfo &MRI) {
182 // Give up if either DstReg or SrcReg is a physical register.
183 if (DstReg.isPhysical() || SrcReg.isPhysical())
184 return false;
185 // Give up if the types don't match.
186 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
187 return false;
188 // Replace if either DstReg has no constraints or the register
189 // constraints match.
190 return !MRI.getRegClassOrRegBank(DstReg) ||
191 MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
192}
193
194bool llvm::isTriviallyDead(const MachineInstr &MI,
195 const MachineRegisterInfo &MRI) {
196 // FIXME: This logical is mostly duplicated with
197 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
198 // MachineInstr::isLabel?
199
200 // Don't delete frame allocation labels.
201 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
202 return false;
203 // LIFETIME markers should be preserved even if they seem dead.
204 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
205 MI.getOpcode() == TargetOpcode::LIFETIME_END)
206 return false;
207
208 // If we can move an instruction, we can remove it. Otherwise, it has
209 // a side-effect of some sort.
210 bool SawStore = false;
211 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
212 return false;
213
214 // Instructions without side-effects are dead iff they only define dead vregs.
215 for (auto &MO : MI.operands()) {
216 if (!MO.isReg() || !MO.isDef())
217 continue;
218
219 Register Reg = MO.getReg();
220 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
221 return false;
222 }
223 return true;
224}
225
226static void reportGISelDiagnostic(DiagnosticSeverity Severity,
227 MachineFunction &MF,
228 const TargetPassConfig &TPC,
229 MachineOptimizationRemarkEmitter &MORE,
230 MachineOptimizationRemarkMissed &R) {
231 bool IsFatal = Severity == DS_Error &&
232 TPC.isGlobalISelAbortEnabled();
233 // Print the function name explicitly if we don't have a debug location (which
234 // makes the diagnostic less useful) or if we're going to emit a raw error.
235 if (!R.getLocation().isValid() || IsFatal)
236 R << (" (in function: " + MF.getName() + ")").str();
237
238 if (IsFatal)
239 report_fatal_error(R.getMsg());
240 else
241 MORE.emit(R);
242}
243
244void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
245 MachineOptimizationRemarkEmitter &MORE,
246 MachineOptimizationRemarkMissed &R) {
247 reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
248}
249
250void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
251 MachineOptimizationRemarkEmitter &MORE,
252 MachineOptimizationRemarkMissed &R) {
253 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
254 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
255}
256
257void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
258 MachineOptimizationRemarkEmitter &MORE,
259 const char *PassName, StringRef Msg,
260 const MachineInstr &MI) {
261 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
262 MI.getDebugLoc(), MI.getParent());
263 R << Msg;
264 // Printing MI is expensive; only do it if expensive remarks are enabled.
265 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
266 R << ": " << ore::MNV("Inst", MI);
267 reportGISelFailure(MF, TPC, MORE, R);
268}
269
270Optional<APInt> llvm::getConstantVRegVal(Register VReg,
271 const MachineRegisterInfo &MRI) {
272 Optional<ValueAndVReg> ValAndVReg =
273 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
274 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&(static_cast <bool> ((!ValAndVReg || ValAndVReg->VReg
== VReg) && "Value found while looking through instrs"
) ? void (0) : __assert_fail ("(!ValAndVReg || ValAndVReg->VReg == VReg) && \"Value found while looking through instrs\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 275, __extension__ __PRETTY_FUNCTION__))
275 "Value found while looking through instrs")(static_cast <bool> ((!ValAndVReg || ValAndVReg->VReg
== VReg) && "Value found while looking through instrs"
) ? void (0) : __assert_fail ("(!ValAndVReg || ValAndVReg->VReg == VReg) && \"Value found while looking through instrs\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 275, __extension__ __PRETTY_FUNCTION__))
;
276 if (!ValAndVReg)
277 return None;
278 return ValAndVReg->Value;
279}
280
281Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg,
282 const MachineRegisterInfo &MRI) {
283 Optional<APInt> Val = getConstantVRegVal(VReg, MRI);
284 if (Val && Val->getBitWidth() <= 64)
285 return Val->getSExtValue();
286 return None;
287}
288
289Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
290 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
291 bool HandleFConstant, bool LookThroughAnyExt) {
292 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
293 MachineInstr *MI;
294 auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
295 return Opcode == TargetOpcode::G_CONSTANT ||
296 (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
297 };
298 auto GetImmediateValue = [HandleFConstant,
299 &MRI](const MachineInstr &MI) -> Optional<APInt> {
300 const MachineOperand &CstVal = MI.getOperand(1);
301 if (!CstVal.isImm() && !CstVal.isCImm() &&
302 (!HandleFConstant || !CstVal.isFPImm()))
303 return None;
304 if (!CstVal.isFPImm()) {
305 unsigned BitWidth =
306 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
307 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
308 : CstVal.getCImm()->getValue();
309 assert(Val.getBitWidth() == BitWidth &&(static_cast <bool> (Val.getBitWidth() == BitWidth &&
"Value bitwidth doesn't match definition type") ? void (0) :
__assert_fail ("Val.getBitWidth() == BitWidth && \"Value bitwidth doesn't match definition type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 310, __extension__ __PRETTY_FUNCTION__))
310 "Value bitwidth doesn't match definition type")(static_cast <bool> (Val.getBitWidth() == BitWidth &&
"Value bitwidth doesn't match definition type") ? void (0) :
__assert_fail ("Val.getBitWidth() == BitWidth && \"Value bitwidth doesn't match definition type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 310, __extension__ __PRETTY_FUNCTION__))
;
311 return Val;
312 }
313 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
314 };
315 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
316 LookThroughInstrs) {
317 switch (MI->getOpcode()) {
318 case TargetOpcode::G_ANYEXT:
319 if (!LookThroughAnyExt)
320 return None;
321 LLVM_FALLTHROUGH[[gnu::fallthrough]];
322 case TargetOpcode::G_TRUNC:
323 case TargetOpcode::G_SEXT:
324 case TargetOpcode::G_ZEXT:
325 SeenOpcodes.push_back(std::make_pair(
326 MI->getOpcode(),
327 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
328 VReg = MI->getOperand(1).getReg();
329 break;
330 case TargetOpcode::COPY:
331 VReg = MI->getOperand(1).getReg();
332 if (Register::isPhysicalRegister(VReg))
333 return None;
334 break;
335 case TargetOpcode::G_INTTOPTR:
336 VReg = MI->getOperand(1).getReg();
337 break;
338 default:
339 return None;
340 }
341 }
342 if (!MI || !IsConstantOpcode(MI->getOpcode()))
343 return None;
344
345 Optional<APInt> MaybeVal = GetImmediateValue(*MI);
346 if (!MaybeVal)
347 return None;
348 APInt &Val = *MaybeVal;
349 while (!SeenOpcodes.empty()) {
350 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
351 switch (OpcodeAndSize.first) {
352 case TargetOpcode::G_TRUNC:
353 Val = Val.trunc(OpcodeAndSize.second);
354 break;
355 case TargetOpcode::G_ANYEXT:
356 case TargetOpcode::G_SEXT:
357 Val = Val.sext(OpcodeAndSize.second);
358 break;
359 case TargetOpcode::G_ZEXT:
360 Val = Val.zext(OpcodeAndSize.second);
361 break;
362 }
363 }
364
365 return ValueAndVReg{Val, VReg};
366}
367
368const ConstantInt *llvm::getConstantIntVRegVal(Register VReg,
369 const MachineRegisterInfo &MRI) {
370 MachineInstr *MI = MRI.getVRegDef(VReg);
371 if (MI->getOpcode() != TargetOpcode::G_CONSTANT)
372 return nullptr;
373 return MI->getOperand(1).getCImm();
374}
375
376const ConstantFP *
377llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
378 MachineInstr *MI = MRI.getVRegDef(VReg);
379 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
380 return nullptr;
381 return MI->getOperand(1).getFPImm();
382}
383
384Optional<DefinitionAndSourceRegister>
385llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
386 Register DefSrcReg = Reg;
387 auto *DefMI = MRI.getVRegDef(Reg);
388 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
389 if (!DstTy.isValid())
390 return None;
391 unsigned Opc = DefMI->getOpcode();
392 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
393 Register SrcReg = DefMI->getOperand(1).getReg();
394 auto SrcTy = MRI.getType(SrcReg);
395 if (!SrcTy.isValid())
396 break;
397 DefMI = MRI.getVRegDef(SrcReg);
398 DefSrcReg = SrcReg;
399 Opc = DefMI->getOpcode();
400 }
401 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
402}
403
404MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
405 const MachineRegisterInfo &MRI) {
406 Optional<DefinitionAndSourceRegister> DefSrcReg =
407 getDefSrcRegIgnoringCopies(Reg, MRI);
408 return DefSrcReg ? DefSrcReg->MI : nullptr;
409}
410
411Register llvm::getSrcRegIgnoringCopies(Register Reg,
412 const MachineRegisterInfo &MRI) {
413 Optional<DefinitionAndSourceRegister> DefSrcReg =
414 getDefSrcRegIgnoringCopies(Reg, MRI);
415 return DefSrcReg ? DefSrcReg->Reg : Register();
416}
417
418MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
419 const MachineRegisterInfo &MRI) {
420 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
421 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
422}
423
424APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
425 if (Size == 32)
426 return APFloat(float(Val));
427 if (Size == 64)
428 return APFloat(Val);
429 if (Size != 16)
430 llvm_unreachable("Unsupported FPConstant size")::llvm::llvm_unreachable_internal("Unsupported FPConstant size"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 430)
;
431 bool Ignored;
432 APFloat APF(Val);
433 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
434 return APF;
435}
436
437Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
438 const Register Op2,
439 const MachineRegisterInfo &MRI) {
440 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
441 if (!MaybeOp2Cst)
442 return None;
443
444 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
445 if (!MaybeOp1Cst)
446 return None;
447
448 const APInt &C1 = *MaybeOp1Cst;
449 const APInt &C2 = *MaybeOp2Cst;
450 switch (Opcode) {
451 default:
452 break;
453 case TargetOpcode::G_ADD:
454 return C1 + C2;
455 case TargetOpcode::G_AND:
456 return C1 & C2;
457 case TargetOpcode::G_ASHR:
458 return C1.ashr(C2);
459 case TargetOpcode::G_LSHR:
460 return C1.lshr(C2);
461 case TargetOpcode::G_MUL:
462 return C1 * C2;
463 case TargetOpcode::G_OR:
464 return C1 | C2;
465 case TargetOpcode::G_SHL:
466 return C1 << C2;
467 case TargetOpcode::G_SUB:
468 return C1 - C2;
469 case TargetOpcode::G_XOR:
470 return C1 ^ C2;
471 case TargetOpcode::G_UDIV:
472 if (!C2.getBoolValue())
473 break;
474 return C1.udiv(C2);
475 case TargetOpcode::G_SDIV:
476 if (!C2.getBoolValue())
477 break;
478 return C1.sdiv(C2);
479 case TargetOpcode::G_UREM:
480 if (!C2.getBoolValue())
481 break;
482 return C1.urem(C2);
483 case TargetOpcode::G_SREM:
484 if (!C2.getBoolValue())
485 break;
486 return C1.srem(C2);
487 }
488
489 return None;
490}
491
492Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
493 const Register Op2,
494 const MachineRegisterInfo &MRI) {
495 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
496 if (!Op2Cst)
497 return None;
498
499 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
500 if (!Op1Cst)
501 return None;
502
503 APFloat C1 = Op1Cst->getValueAPF();
504 const APFloat &C2 = Op2Cst->getValueAPF();
505 switch (Opcode) {
506 case TargetOpcode::G_FADD:
507 C1.add(C2, APFloat::rmNearestTiesToEven);
508 return C1;
509 case TargetOpcode::G_FSUB:
510 C1.subtract(C2, APFloat::rmNearestTiesToEven);
511 return C1;
512 case TargetOpcode::G_FMUL:
513 C1.multiply(C2, APFloat::rmNearestTiesToEven);
514 return C1;
515 case TargetOpcode::G_FDIV:
516 C1.divide(C2, APFloat::rmNearestTiesToEven);
517 return C1;
518 case TargetOpcode::G_FREM:
519 C1.mod(C2);
520 return C1;
521 case TargetOpcode::G_FCOPYSIGN:
522 C1.copySign(C2);
523 return C1;
524 case TargetOpcode::G_FMINNUM:
525 return minnum(C1, C2);
526 case TargetOpcode::G_FMAXNUM:
527 return maxnum(C1, C2);
528 case TargetOpcode::G_FMINIMUM:
529 return minimum(C1, C2);
530 case TargetOpcode::G_FMAXIMUM:
531 return maximum(C1, C2);
532 case TargetOpcode::G_FMINNUM_IEEE:
533 case TargetOpcode::G_FMAXNUM_IEEE:
534 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
535 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
536 // and currently there isn't a nice wrapper in APFloat for the version with
537 // correct snan handling.
538 break;
539 default:
540 break;
541 }
542
543 return None;
544}
545
546bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
547 bool SNaN) {
548 const MachineInstr *DefMI = MRI.getVRegDef(Val);
549 if (!DefMI)
550 return false;
551
552 const TargetMachine& TM = DefMI->getMF()->getTarget();
553 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
554 return true;
555
556 // If the value is a constant, we can obviously see if it is a NaN or not.
557 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
558 return !FPVal->getValueAPF().isNaN() ||
559 (SNaN && !FPVal->getValueAPF().isSignaling());
560 }
561
562 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
563 for (const auto &Op : DefMI->uses())
564 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
565 return false;
566 return true;
567 }
568
569 switch (DefMI->getOpcode()) {
570 default:
571 break;
572 case TargetOpcode::G_FMINNUM_IEEE:
573 case TargetOpcode::G_FMAXNUM_IEEE: {
574 if (SNaN)
575 return true;
576 // This can return a NaN if either operand is an sNaN, or if both operands
577 // are NaN.
578 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
579 isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
580 (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
581 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
582 }
583 case TargetOpcode::G_FMINNUM:
584 case TargetOpcode::G_FMAXNUM: {
585 // Only one needs to be known not-nan, since it will be returned if the
586 // other ends up being one.
587 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
588 isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
589 }
590 }
591
592 if (SNaN) {
593 // FP operations quiet. For now, just handle the ones inserted during
594 // legalization.
595 switch (DefMI->getOpcode()) {
596 case TargetOpcode::G_FPEXT:
597 case TargetOpcode::G_FPTRUNC:
598 case TargetOpcode::G_FCANONICALIZE:
599 return true;
600 default:
601 return false;
602 }
603 }
604
605 return false;
606}
607
608Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
609 const MachinePointerInfo &MPO) {
610 auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
611 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
612 MachineFrameInfo &MFI = MF.getFrameInfo();
613 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
614 MPO.Offset);
615 }
616
617 if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
618 const Module *M = MF.getFunction().getParent();
619 return V->getPointerAlignment(M->getDataLayout());
620 }
621
622 return Align(1);
623}
624
625Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
626 const TargetInstrInfo &TII,
627 MCRegister PhysReg,
628 const TargetRegisterClass &RC,
629 LLT RegTy) {
630 DebugLoc DL; // FIXME: Is no location the right choice?
631 MachineBasicBlock &EntryMBB = MF.front();
632 MachineRegisterInfo &MRI = MF.getRegInfo();
633 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
634 if (LiveIn) {
635 MachineInstr *Def = MRI.getVRegDef(LiveIn);
636 if (Def) {
637 // FIXME: Should the verifier check this is in the entry block?
638 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block")(static_cast <bool> (Def->getParent() == &EntryMBB
&& "live-in copy not in entry block") ? void (0) : __assert_fail
("Def->getParent() == &EntryMBB && \"live-in copy not in entry block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639 return LiveIn;
640 }
641
642 // It's possible the incoming argument register and copy was added during
643 // lowering, but later deleted due to being/becoming dead. If this happens,
644 // re-insert the copy.
645 } else {
646 // The live in register was not present, so add it.
647 LiveIn = MF.addLiveIn(PhysReg, &RC);
648 if (RegTy.isValid())
649 MRI.setType(LiveIn, RegTy);
650 }
651
652 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
653 .addReg(PhysReg);
654 if (!EntryMBB.isLiveIn(PhysReg))
655 EntryMBB.addLiveIn(PhysReg);
656 return LiveIn;
657}
658
659Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
660 uint64_t Imm,
661 const MachineRegisterInfo &MRI) {
662 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
663 if (MaybeOp1Cst) {
664 switch (Opcode) {
665 default:
666 break;
667 case TargetOpcode::G_SEXT_INREG: {
668 LLT Ty = MRI.getType(Op1);
669 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
670 }
671 }
672 }
673 return None;
674}
675
676bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
677 GISelKnownBits *KB) {
678 Optional<DefinitionAndSourceRegister> DefSrcReg =
679 getDefSrcRegIgnoringCopies(Reg, MRI);
680 if (!DefSrcReg)
681 return false;
682
683 const MachineInstr &MI = *DefSrcReg->MI;
684 const LLT Ty = MRI.getType(Reg);
685
686 switch (MI.getOpcode()) {
687 case TargetOpcode::G_CONSTANT: {
688 unsigned BitWidth = Ty.getScalarSizeInBits();
689 const ConstantInt *CI = MI.getOperand(1).getCImm();
690 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
691 }
692 case TargetOpcode::G_SHL: {
693 // A left-shift of a constant one will have exactly one bit set because
694 // shifting the bit off the end is undefined.
695
696 // TODO: Constant splat
697 if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
698 if (*ConstLHS == 1)
699 return true;
700 }
701
702 break;
703 }
704 case TargetOpcode::G_LSHR: {
705 if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
706 if (ConstLHS->isSignMask())
707 return true;
708 }
709
710 break;
711 }
712 case TargetOpcode::G_BUILD_VECTOR: {
713 // TODO: Probably should have a recursion depth guard since you could have
714 // bitcasted vector elements.
715 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
716 if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
717 return false;
718 }
719
720 return true;
721 }
722 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
723 // Only handle constants since we would need to know if number of leading
724 // zeros is greater than the truncation amount.
725 const unsigned BitWidth = Ty.getScalarSizeInBits();
726 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
727 auto Const = getConstantVRegVal(MI.getOperand(I).getReg(), MRI);
728 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
729 return false;
730 }
731
732 return true;
733 }
734 default:
735 break;
736 }
737
738 if (!KB)
739 return false;
740
741 // More could be done here, though the above checks are enough
742 // to handle some common cases.
743
744 // Fall back to computeKnownBits to catch other known cases.
745 KnownBits Known = KB->getKnownBits(Reg);
746 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
747}
748
749void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
750 AU.addPreserved<StackProtector>();
751}
752
753static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
754 unsigned Mul = OrigSize * TargetSize;
755 unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
756 return Mul / GCDSize;
757}
758
759LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
760 const unsigned OrigSize = OrigTy.getSizeInBits();
761 const unsigned TargetSize = TargetTy.getSizeInBits();
762
763 if (OrigSize == TargetSize)
1
Assuming 'OrigSize' is not equal to 'TargetSize'
2
Taking false branch
764 return OrigTy;
765
766 if (OrigTy.isVector()) {
3
Calling 'LLT::isVector'
5
Returning from 'LLT::isVector'
6
Taking true branch
767 const LLT OrigElt = OrigTy.getElementType();
768
769 if (TargetTy.isVector()) {
7
Calling 'LLT::isVector'
9
Returning from 'LLT::isVector'
10
Taking false branch
770 const LLT TargetElt = TargetTy.getElementType();
771
772 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
773 int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
774 TargetTy.getNumElements());
775 // Prefer the original element type.
776 int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
777 return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
778 }
779 } else {
780 if (OrigElt.getSizeInBits() == TargetSize)
11
Assuming the condition is false
12
Taking false branch
781 return OrigTy;
782 }
783
784 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
785 return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
13
Calling 'LLT::getSizeInBits'
16
Returning from 'LLT::getSizeInBits'
17
Division by zero
786 }
787
788 if (TargetTy.isVector()) {
789 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
790 return LLT::vector(LCMSize / OrigSize, OrigTy);
791 }
792
793 unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
794
795 // Preserve pointer types.
796 if (LCMSize == OrigSize)
797 return OrigTy;
798 if (LCMSize == TargetSize)
799 return TargetTy;
800
801 return LLT::scalar(LCMSize);
802}
803
804LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
805 const unsigned OrigSize = OrigTy.getSizeInBits();
806 const unsigned TargetSize = TargetTy.getSizeInBits();
807
808 if (OrigSize == TargetSize)
809 return OrigTy;
810
811 if (OrigTy.isVector()) {
812 LLT OrigElt = OrigTy.getElementType();
813 if (TargetTy.isVector()) {
814 LLT TargetElt = TargetTy.getElementType();
815 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
816 int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
817 TargetTy.getNumElements());
818 return LLT::scalarOrVector(GCD, OrigElt);
819 }
820 } else {
821 // If the source is a vector of pointers, return a pointer element.
822 if (OrigElt.getSizeInBits() == TargetSize)
823 return OrigElt;
824 }
825
826 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
827 if (GCD == OrigElt.getSizeInBits())
828 return OrigElt;
829
830 // If we can't produce the original element type, we have to use a smaller
831 // scalar.
832 if (GCD < OrigElt.getSizeInBits())
833 return LLT::scalar(GCD);
834 return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
835 }
836
837 if (TargetTy.isVector()) {
838 // Try to preserve the original element type.
839 LLT TargetElt = TargetTy.getElementType();
840 if (TargetElt.getSizeInBits() == OrigSize)
841 return OrigTy;
842 }
843
844 unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
845 return LLT::scalar(GCD);
846}
847
848Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
849 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&& "Only G_SHUFFLE_VECTOR can have a splat index!") ?
void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Only G_SHUFFLE_VECTOR can have a splat index!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 850, __extension__ __PRETTY_FUNCTION__))
850 "Only G_SHUFFLE_VECTOR can have a splat index!")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&& "Only G_SHUFFLE_VECTOR can have a splat index!") ?
void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && \"Only G_SHUFFLE_VECTOR can have a splat index!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 850, __extension__ __PRETTY_FUNCTION__))
;
851 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
852 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
853
854 // If all elements are undefined, this shuffle can be considered a splat.
855 // Return 0 for better potential for callers to simplify.
856 if (FirstDefinedIdx == Mask.end())
857 return 0;
858
859 // Make sure all remaining elements are either undef or the same
860 // as the first non-undef value.
861 int SplatValue = *FirstDefinedIdx;
862 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
863 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
864 return None;
865
866 return SplatValue;
867}
868
869static bool isBuildVectorOp(unsigned Opcode) {
870 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
871 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
872}
873
874// TODO: Handle mixed undef elements.
875static bool isBuildVectorConstantSplat(const MachineInstr &MI,
876 const MachineRegisterInfo &MRI,
877 int64_t SplatValue) {
878 if (!isBuildVectorOp(MI.getOpcode()))
879 return false;
880
881 const unsigned NumOps = MI.getNumOperands();
882 for (unsigned I = 1; I != NumOps; ++I) {
883 Register Element = MI.getOperand(I).getReg();
884 if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
885 return false;
886 }
887
888 return true;
889}
890
891Optional<int64_t>
892llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
893 const MachineRegisterInfo &MRI) {
894 if (!isBuildVectorOp(MI.getOpcode()))
895 return None;
896
897 const unsigned NumOps = MI.getNumOperands();
898 Optional<int64_t> Scalar;
899 for (unsigned I = 1; I != NumOps; ++I) {
900 Register Element = MI.getOperand(I).getReg();
901 int64_t ElementValue;
902 if (!mi_match(Element, MRI, m_ICst(ElementValue)))
903 return None;
904 if (!Scalar)
905 Scalar = ElementValue;
906 else if (*Scalar != ElementValue)
907 return None;
908 }
909
910 return Scalar;
911}
912
913bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
914 const MachineRegisterInfo &MRI) {
915 return isBuildVectorConstantSplat(MI, MRI, 0);
916}
917
918bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
919 const MachineRegisterInfo &MRI) {
920 return isBuildVectorConstantSplat(MI, MRI, -1);
921}
922
923Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
924 const MachineRegisterInfo &MRI) {
925 unsigned Opc = MI.getOpcode();
926 if (!isBuildVectorOp(Opc))
927 return None;
928 if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
929 return RegOrConstant(*Splat);
930 auto Reg = MI.getOperand(1).getReg();
931 if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
932 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
933 return None;
934 return RegOrConstant(Reg);
935}
936
937bool llvm::matchUnaryPredicate(
938 const MachineRegisterInfo &MRI, Register Reg,
939 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
940
941 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
942 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
943 return Match(nullptr);
944
945 // TODO: Also handle fconstant
946 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
947 return Match(Def->getOperand(1).getCImm());
948
949 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
950 return false;
951
952 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
953 Register SrcElt = Def->getOperand(I).getReg();
954 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
955 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
956 if (!Match(nullptr))
957 return false;
958 continue;
959 }
960
961 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
962 !Match(SrcDef->getOperand(1).getCImm()))
963 return false;
964 }
965
966 return true;
967}
968
969bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
970 bool IsFP) {
971 switch (TLI.getBooleanContents(IsVector, IsFP)) {
972 case TargetLowering::UndefinedBooleanContent:
973 return Val & 0x1;
974 case TargetLowering::ZeroOrOneBooleanContent:
975 return Val == 1;
976 case TargetLowering::ZeroOrNegativeOneBooleanContent:
977 return Val == -1;
978 }
979 llvm_unreachable("Invalid boolean contents")::llvm::llvm_unreachable_internal("Invalid boolean contents",
"/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 979)
;
980}
981
982int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
983 bool IsFP) {
984 switch (TLI.getBooleanContents(IsVector, IsFP)) {
985 case TargetLowering::UndefinedBooleanContent:
986 case TargetLowering::ZeroOrOneBooleanContent:
987 return 1;
988 case TargetLowering::ZeroOrNegativeOneBooleanContent:
989 return -1;
990 }
991 llvm_unreachable("Invalid boolean contents")::llvm::llvm_unreachable_internal("Invalid boolean contents",
"/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 991)
;
992}
993
994bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
995 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
996 const auto &F = MBB.getParent()->getFunction();
997 return F.hasOptSize() || F.hasMinSize() ||
998 llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
999}
1000
1001unsigned llvm::getIntrinsicID(const MachineInstr &MI) {
1002#ifndef NDEBUG
1003 unsigned Opc = MI.getOpcode();
1004 assert(Opc == TargetOpcode::G_INTRINSIC ||(static_cast <bool> (Opc == TargetOpcode::G_INTRINSIC ||
Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) ? void (0) :
__assert_fail ("Opc == TargetOpcode::G_INTRINSIC || Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 1005, __extension__ __PRETTY_FUNCTION__))
1005 Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS)(static_cast <bool> (Opc == TargetOpcode::G_INTRINSIC ||
Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) ? void (0) :
__assert_fail ("Opc == TargetOpcode::G_INTRINSIC || Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/CodeGen/GlobalISel/Utils.cpp"
, 1005, __extension__ __PRETTY_FUNCTION__))
;
1006#endif
1007 return MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID();
1008}

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h

1//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// Implement a low-level type suitable for MachineInstr level instruction
10/// selection.
11///
12/// For a type attached to a MachineInstr, we only care about 2 details: total
13/// size and the number of vector lanes (if any). Accordingly, there are 4
14/// possible valid type-kinds:
15///
16/// * `sN` for scalars and aggregates
17/// * `<N x sM>` for vectors, which must have at least 2 elements.
18/// * `pN` for pointers
19///
20/// Other information required for correct selection is expected to be carried
21/// by the opcode, or non-type flags. For example the distinction between G_ADD
22/// and G_FADD for int/float or fast-math flags.
23///
24//===----------------------------------------------------------------------===//
25
26#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
27#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
28
29#include "llvm/ADT/DenseMapInfo.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/MachineValueType.h"
32#include <cassert>
33
34namespace llvm {
35
36class DataLayout;
37class Type;
38class raw_ostream;
39
40class LLT {
41public:
42 /// Get a low-level scalar or aggregate "bag of bits".
43 static LLT scalar(unsigned SizeInBits) {
44 assert(SizeInBits > 0 && "invalid scalar size")(static_cast <bool> (SizeInBits > 0 && "invalid scalar size"
) ? void (0) : __assert_fail ("SizeInBits > 0 && \"invalid scalar size\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 44, __extension__ __PRETTY_FUNCTION__))
;
45 return LLT{/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0,
46 SizeInBits, /*AddressSpace=*/0};
47 }
48
49 /// Get a low-level pointer in the given address space.
50 static LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
51 assert(SizeInBits > 0 && "invalid pointer size")(static_cast <bool> (SizeInBits > 0 && "invalid pointer size"
) ? void (0) : __assert_fail ("SizeInBits > 0 && \"invalid pointer size\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 51, __extension__ __PRETTY_FUNCTION__))
;
52 return LLT{/*isPointer=*/true, /*isVector=*/false, /*NumElements=*/0,
53 SizeInBits, AddressSpace};
54 }
55
56 /// Get a low-level vector of some number of elements and element width.
57 /// \p NumElements must be at least 2.
58 static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
59 assert(NumElements > 1 && "invalid number of vector elements")(static_cast <bool> (NumElements > 1 && "invalid number of vector elements"
) ? void (0) : __assert_fail ("NumElements > 1 && \"invalid number of vector elements\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 59, __extension__ __PRETTY_FUNCTION__))
;
60 assert(ScalarSizeInBits > 0 && "invalid vector element size")(static_cast <bool> (ScalarSizeInBits > 0 &&
"invalid vector element size") ? void (0) : __assert_fail ("ScalarSizeInBits > 0 && \"invalid vector element size\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 60, __extension__ __PRETTY_FUNCTION__))
;
61 return LLT{/*isPointer=*/false, /*isVector=*/true, NumElements,
62 ScalarSizeInBits, /*AddressSpace=*/0};
63 }
64
65 /// Get a low-level vector of some number of elements and element type.
66 static LLT vector(uint16_t NumElements, LLT ScalarTy) {
67 assert(NumElements > 1 && "invalid number of vector elements")(static_cast <bool> (NumElements > 1 && "invalid number of vector elements"
) ? void (0) : __assert_fail ("NumElements > 1 && \"invalid number of vector elements\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 67, __extension__ __PRETTY_FUNCTION__))
;
68 assert(!ScalarTy.isVector() && "invalid vector element type")(static_cast <bool> (!ScalarTy.isVector() && "invalid vector element type"
) ? void (0) : __assert_fail ("!ScalarTy.isVector() && \"invalid vector element type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 68, __extension__ __PRETTY_FUNCTION__))
;
69 return LLT{ScalarTy.isPointer(), /*isVector=*/true, NumElements,
70 ScalarTy.getSizeInBits(),
71 ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
72 }
73
74 static LLT scalarOrVector(uint16_t NumElements, LLT ScalarTy) {
75 return NumElements == 1 ? ScalarTy : LLT::vector(NumElements, ScalarTy);
76 }
77
78 static LLT scalarOrVector(uint16_t NumElements, unsigned ScalarSize) {
79 return scalarOrVector(NumElements, LLT::scalar(ScalarSize));
80 }
81
82 explicit LLT(bool isPointer, bool isVector, uint16_t NumElements,
83 unsigned SizeInBits, unsigned AddressSpace) {
84 init(isPointer, isVector, NumElements, SizeInBits, AddressSpace);
85 }
86 explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
87
88 explicit LLT(MVT VT);
89
90 bool isValid() const { return RawData != 0; }
91
92 bool isScalar() const { return isValid() && !IsPointer && !IsVector; }
93
94 bool isPointer() const { return isValid() && IsPointer && !IsVector; }
95
96 bool isVector() const { return isValid() && IsVector; }
4
Returning value, which participates in a condition later
8
Returning zero, which participates in a condition later
97
98 /// Returns the number of elements in a vector LLT. Must only be called on
99 /// vector types.
100 uint16_t getNumElements() const {
101 assert(IsVector && "cannot get number of elements on scalar/aggregate")(static_cast <bool> (IsVector && "cannot get number of elements on scalar/aggregate"
) ? void (0) : __assert_fail ("IsVector && \"cannot get number of elements on scalar/aggregate\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 101, __extension__ __PRETTY_FUNCTION__))
;
102 if (!IsPointer)
103 return getFieldValue(VectorElementsFieldInfo);
104 else
105 return getFieldValue(PointerVectorElementsFieldInfo);
106 }
107
108 /// Returns the total size of the type. Must only be called on sized types.
109 unsigned getSizeInBits() const {
110 if (isPointer() || isScalar())
14
Taking false branch
111 return getScalarSizeInBits();
112 return getScalarSizeInBits() * getNumElements();
15
Returning zero
113 }
114
115 /// Returns the total size of the type in bytes, i.e. number of whole bytes
116 /// needed to represent the size in bits. Must only be called on sized types.
117 unsigned getSizeInBytes() const {
118 return (getSizeInBits() + 7) / 8;
119 }
120
121 LLT getScalarType() const {
122 return isVector() ? getElementType() : *this;
123 }
124
125 /// If this type is a vector, return a vector with the same number of elements
126 /// but the new element type. Otherwise, return the new element type.
127 LLT changeElementType(LLT NewEltTy) const {
128 return isVector() ? LLT::vector(getNumElements(), NewEltTy) : NewEltTy;
129 }
130
131 /// If this type is a vector, return a vector with the same number of elements
132 /// but the new element size. Otherwise, return the new element type. Invalid
133 /// for pointer types. For pointer types, use changeElementType.
134 LLT changeElementSize(unsigned NewEltSize) const {
135 assert(!getScalarType().isPointer() &&(static_cast <bool> (!getScalarType().isPointer() &&
"invalid to directly change element size for pointers") ? void
(0) : __assert_fail ("!getScalarType().isPointer() && \"invalid to directly change element size for pointers\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 136, __extension__ __PRETTY_FUNCTION__))
136 "invalid to directly change element size for pointers")(static_cast <bool> (!getScalarType().isPointer() &&
"invalid to directly change element size for pointers") ? void
(0) : __assert_fail ("!getScalarType().isPointer() && \"invalid to directly change element size for pointers\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 136, __extension__ __PRETTY_FUNCTION__))
;
137 return isVector() ? LLT::vector(getNumElements(), NewEltSize)
138 : LLT::scalar(NewEltSize);
139 }
140
141 /// Return a vector or scalar with the same element type and the new number of
142 /// elements.
143 LLT changeNumElements(unsigned NewNumElts) const {
144 return LLT::scalarOrVector(NewNumElts, getScalarType());
145 }
146
147 /// Return a type that is \p Factor times smaller. Reduces the number of
148 /// elements if this is a vector, or the bitwidth for scalar/pointers. Does
149 /// not attempt to handle cases that aren't evenly divisible.
150 LLT divide(int Factor) const {
151 assert(Factor != 1)(static_cast <bool> (Factor != 1) ? void (0) : __assert_fail
("Factor != 1", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 151, __extension__ __PRETTY_FUNCTION__))
;
152 if (isVector()) {
153 assert(getNumElements() % Factor == 0)(static_cast <bool> (getNumElements() % Factor == 0) ? void
(0) : __assert_fail ("getNumElements() % Factor == 0", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 153, __extension__ __PRETTY_FUNCTION__))
;
154 return scalarOrVector(getNumElements() / Factor, getElementType());
155 }
156
157 assert(getSizeInBits() % Factor == 0)(static_cast <bool> (getSizeInBits() % Factor == 0) ? void
(0) : __assert_fail ("getSizeInBits() % Factor == 0", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 157, __extension__ __PRETTY_FUNCTION__))
;
158 return scalar(getSizeInBits() / Factor);
159 }
160
161 bool isByteSized() const { return (getSizeInBits() & 7) == 0; }
162
163 unsigned getScalarSizeInBits() const {
164 assert(RawData != 0 && "Invalid Type")(static_cast <bool> (RawData != 0 && "Invalid Type"
) ? void (0) : __assert_fail ("RawData != 0 && \"Invalid Type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 164, __extension__ __PRETTY_FUNCTION__))
;
165 if (!IsVector) {
166 if (!IsPointer)
167 return getFieldValue(ScalarSizeFieldInfo);
168 else
169 return getFieldValue(PointerSizeFieldInfo);
170 } else {
171 if (!IsPointer)
172 return getFieldValue(VectorSizeFieldInfo);
173 else
174 return getFieldValue(PointerVectorSizeFieldInfo);
175 }
176 }
177
178 unsigned getAddressSpace() const {
179 assert(RawData != 0 && "Invalid Type")(static_cast <bool> (RawData != 0 && "Invalid Type"
) ? void (0) : __assert_fail ("RawData != 0 && \"Invalid Type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180 assert(IsPointer && "cannot get address space of non-pointer type")(static_cast <bool> (IsPointer && "cannot get address space of non-pointer type"
) ? void (0) : __assert_fail ("IsPointer && \"cannot get address space of non-pointer type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 180, __extension__ __PRETTY_FUNCTION__))
;
181 if (!IsVector)
182 return getFieldValue(PointerAddressSpaceFieldInfo);
183 else
184 return getFieldValue(PointerVectorAddressSpaceFieldInfo);
185 }
186
187 /// Returns the vector's element type. Only valid for vector types.
188 LLT getElementType() const {
189 assert(isVector() && "cannot get element type of scalar/aggregate")(static_cast <bool> (isVector() && "cannot get element type of scalar/aggregate"
) ? void (0) : __assert_fail ("isVector() && \"cannot get element type of scalar/aggregate\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 189, __extension__ __PRETTY_FUNCTION__))
;
190 if (IsPointer)
191 return pointer(getAddressSpace(), getScalarSizeInBits());
192 else
193 return scalar(getScalarSizeInBits());
194 }
195
196 void print(raw_ostream &OS) const;
197
198#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
199 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const {
200 print(dbgs());
201 dbgs() << '\n';
202 }
203#endif
204
205 bool operator==(const LLT &RHS) const {
206 return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
207 RHS.RawData == RawData;
208 }
209
210 bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
211
212 friend struct DenseMapInfo<LLT>;
213 friend class GISelInstProfileBuilder;
214
215private:
216 /// LLT is packed into 64 bits as follows:
217 /// isPointer : 1
218 /// isVector : 1
219 /// with 62 bits remaining for Kind-specific data, packed in bitfields
220 /// as described below. As there isn't a simple portable way to pack bits
221 /// into bitfields, here the different fields in the packed structure is
222 /// described in static const *Field variables. Each of these variables
223 /// is a 2-element array, with the first element describing the bitfield size
224 /// and the second element describing the bitfield offset.
225 typedef int BitFieldInfo[2];
226 ///
227 /// This is how the bitfields are packed per Kind:
228 /// * Invalid:
229 /// gets encoded as RawData == 0, as that is an invalid encoding, since for
230 /// valid encodings, SizeInBits/SizeOfElement must be larger than 0.
231 /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
232 /// SizeInBits: 32;
233 static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0};
234 /// * Pointer (isPointer == 1 && isVector == 0):
235 /// SizeInBits: 16;
236 /// AddressSpace: 24;
237 static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0};
238 static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
239 24, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
240 /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
241 /// NumElements: 16;
242 /// SizeOfElement: 32;
243 static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0};
244 static const constexpr BitFieldInfo VectorSizeFieldInfo{
245 32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
246 /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
247 /// NumElements: 16;
248 /// SizeOfElement: 16;
249 /// AddressSpace: 24;
250 static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0};
251 static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{
252 16,
253 PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]};
254 static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{
255 24, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]};
256
257 uint64_t IsPointer : 1;
258 uint64_t IsVector : 1;
259 uint64_t RawData : 62;
260
261 static uint64_t getMask(const BitFieldInfo FieldInfo) {
262 const int FieldSizeInBits = FieldInfo[0];
263 return (((uint64_t)1) << FieldSizeInBits) - 1;
264 }
265 static uint64_t maskAndShift(uint64_t Val, uint64_t Mask, uint8_t Shift) {
266 assert(Val <= Mask && "Value too large for field")(static_cast <bool> (Val <= Mask && "Value too large for field"
) ? void (0) : __assert_fail ("Val <= Mask && \"Value too large for field\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 266, __extension__ __PRETTY_FUNCTION__))
;
267 return (Val & Mask) << Shift;
268 }
269 static uint64_t maskAndShift(uint64_t Val, const BitFieldInfo FieldInfo) {
270 return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]);
271 }
272 uint64_t getFieldValue(const BitFieldInfo FieldInfo) const {
273 return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
274 }
275
276 void init(bool IsPointer, bool IsVector, uint16_t NumElements,
277 unsigned SizeInBits, unsigned AddressSpace) {
278 this->IsPointer = IsPointer;
279 this->IsVector = IsVector;
280 if (!IsVector) {
281 if (!IsPointer)
282 RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
283 else
284 RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
285 maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
286 } else {
287 assert(NumElements > 1 && "invalid number of vector elements")(static_cast <bool> (NumElements > 1 && "invalid number of vector elements"
) ? void (0) : __assert_fail ("NumElements > 1 && \"invalid number of vector elements\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/Support/LowLevelTypeImpl.h"
, 287, __extension__ __PRETTY_FUNCTION__))
;
288 if (!IsPointer)
289 RawData = maskAndShift(NumElements, VectorElementsFieldInfo) |
290 maskAndShift(SizeInBits, VectorSizeFieldInfo);
291 else
292 RawData =
293 maskAndShift(NumElements, PointerVectorElementsFieldInfo) |
294 maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) |
295 maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo);
296 }
297 }
298
299 uint64_t getUniqueRAWLLTData() const {
300 return ((uint64_t)RawData) << 2 | ((uint64_t)IsPointer) << 1 |
301 ((uint64_t)IsVector);
302 }
303};
304
305inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
306 Ty.print(OS);
307 return OS;
308}
309
310template<> struct DenseMapInfo<LLT> {
311 static inline LLT getEmptyKey() {
312 LLT Invalid;
313 Invalid.IsPointer = true;
314 return Invalid;
315 }
316 static inline LLT getTombstoneKey() {
317 LLT Invalid;
318 Invalid.IsVector = true;
319 return Invalid;
320 }
321 static inline unsigned getHashValue(const LLT &Ty) {
322 uint64_t Val = Ty.getUniqueRAWLLTData();
323 return DenseMapInfo<uint64_t>::getHashValue(Val);
324 }
325 static bool isEqual(const LLT &LHS, const LLT &RHS) {
326 return LHS == RHS;
327 }
328};
329
330}
331
332#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H