Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1149, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SelectionDAGBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/CodeGen/SelectionDAG -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/Optional.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Triple.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/Analysis/AliasAnalysis.h"
30#include "llvm/Analysis/BlockFrequencyInfo.h"
31#include "llvm/Analysis/BranchProbabilityInfo.h"
32#include "llvm/Analysis/ConstantFolding.h"
33#include "llvm/Analysis/EHPersonalities.h"
34#include "llvm/Analysis/Loads.h"
35#include "llvm/Analysis/MemoryLocation.h"
36#include "llvm/Analysis/ProfileSummaryInfo.h"
37#include "llvm/Analysis/TargetLibraryInfo.h"
38#include "llvm/Analysis/ValueTracking.h"
39#include "llvm/Analysis/VectorUtils.h"
40#include "llvm/CodeGen/Analysis.h"
41#include "llvm/CodeGen/FunctionLoweringInfo.h"
42#include "llvm/CodeGen/GCMetadata.h"
43#include "llvm/CodeGen/ISDOpcodes.h"
44#include "llvm/CodeGen/MachineBasicBlock.h"
45#include "llvm/CodeGen/MachineFrameInfo.h"
46#include "llvm/CodeGen/MachineFunction.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineJumpTableInfo.h"
50#include "llvm/CodeGen/MachineMemOperand.h"
51#include "llvm/CodeGen/MachineModuleInfo.h"
52#include "llvm/CodeGen/MachineOperand.h"
53#include "llvm/CodeGen/MachineRegisterInfo.h"
54#include "llvm/CodeGen/RuntimeLibcalls.h"
55#include "llvm/CodeGen/SelectionDAG.h"
56#include "llvm/CodeGen/SelectionDAGNodes.h"
57#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58#include "llvm/CodeGen/StackMaps.h"
59#include "llvm/CodeGen/SwiftErrorValueTracking.h"
60#include "llvm/CodeGen/TargetFrameLowering.h"
61#include "llvm/CodeGen/TargetInstrInfo.h"
62#include "llvm/CodeGen/TargetLowering.h"
63#include "llvm/CodeGen/TargetOpcodes.h"
64#include "llvm/CodeGen/TargetRegisterInfo.h"
65#include "llvm/CodeGen/TargetSubtargetInfo.h"
66#include "llvm/CodeGen/ValueTypes.h"
67#include "llvm/CodeGen/WinEHFuncInfo.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CFG.h"
72#include "llvm/IR/CallSite.h"
73#include "llvm/IR/CallingConv.h"
74#include "llvm/IR/Constant.h"
75#include "llvm/IR/ConstantRange.h"
76#include "llvm/IR/Constants.h"
77#include "llvm/IR/DataLayout.h"
78#include "llvm/IR/DebugInfoMetadata.h"
79#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/DerivedTypes.h"
81#include "llvm/IR/Function.h"
82#include "llvm/IR/GetElementPtrTypeIterator.h"
83#include "llvm/IR/InlineAsm.h"
84#include "llvm/IR/InstrTypes.h"
85#include "llvm/IR/Instruction.h"
86#include "llvm/IR/Instructions.h"
87#include "llvm/IR/IntrinsicInst.h"
88#include "llvm/IR/Intrinsics.h"
89#include "llvm/IR/IntrinsicsAArch64.h"
90#include "llvm/IR/IntrinsicsWebAssembly.h"
91#include "llvm/IR/LLVMContext.h"
92#include "llvm/IR/Metadata.h"
93#include "llvm/IR/Module.h"
94#include "llvm/IR/Operator.h"
95#include "llvm/IR/PatternMatch.h"
96#include "llvm/IR/Statepoint.h"
97#include "llvm/IR/Type.h"
98#include "llvm/IR/User.h"
99#include "llvm/IR/Value.h"
100#include "llvm/MC/MCContext.h"
101#include "llvm/MC/MCSymbol.h"
102#include "llvm/Support/AtomicOrdering.h"
103#include "llvm/Support/BranchProbability.h"
104#include "llvm/Support/Casting.h"
105#include "llvm/Support/CodeGen.h"
106#include "llvm/Support/CommandLine.h"
107#include "llvm/Support/Compiler.h"
108#include "llvm/Support/Debug.h"
109#include "llvm/Support/ErrorHandling.h"
110#include "llvm/Support/MachineValueType.h"
111#include "llvm/Support/MathExtras.h"
112#include "llvm/Support/raw_ostream.h"
113#include "llvm/Target/TargetIntrinsicInfo.h"
114#include "llvm/Target/TargetMachine.h"
115#include "llvm/Target/TargetOptions.h"
116#include "llvm/Transforms/Utils/Local.h"
117#include <algorithm>
118#include <cassert>
119#include <cstddef>
120#include <cstdint>
121#include <cstring>
122#include <iterator>
123#include <limits>
124#include <numeric>
125#include <tuple>
126#include <utility>
127#include <vector>
128
129using namespace llvm;
130using namespace PatternMatch;
131using namespace SwitchCG;
132
133#define DEBUG_TYPE"isel" "isel"
134
135/// LimitFloatPrecision - Generate low-precision inline sequences for
136/// some float libcalls (6, 8 or 12 bits).
137static unsigned LimitFloatPrecision;
138
139static cl::opt<unsigned, true>
140 LimitFPPrecision("limit-float-precision",
141 cl::desc("Generate low-precision inline sequences "
142 "for some float libcalls"),
143 cl::location(LimitFloatPrecision), cl::Hidden,
144 cl::init(0));
145
146static cl::opt<unsigned> SwitchPeelThreshold(
147 "switch-peel-threshold", cl::Hidden, cl::init(66),
148 cl::desc("Set the case probability threshold for peeling the case from a "
149 "switch statement. A value greater than 100 will void this "
150 "optimization"));
151
152// Limit the width of DAG chains. This is important in general to prevent
153// DAG-based analysis from blowing up. For example, alias analysis and
154// load clustering may not complete in reasonable time. It is difficult to
155// recognize and avoid this situation within each individual analysis, and
156// future analyses are likely to have the same behavior. Limiting DAG width is
157// the safe approach and will be especially important with global DAGs.
158//
159// MaxParallelChains default is arbitrarily high to avoid affecting
160// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
161// sequence over this should have been converted to llvm.memcpy by the
162// frontend. It is easy to induce this behavior with .ll code such as:
163// %buffer = alloca [4096 x i8]
164// %data = load [4096 x i8]* %argPtr
165// store [4096 x i8] %data, [4096 x i8]* %buffer
166static const unsigned MaxParallelChains = 64;
167
168// Return the calling convention if the Value passed requires ABI mangling as it
169// is a parameter to a function or a return value from a function which is not
170// an intrinsic.
171static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
172 if (auto *R = dyn_cast<ReturnInst>(V))
173 return R->getParent()->getParent()->getCallingConv();
174
175 if (auto *CI = dyn_cast<CallInst>(V)) {
176 const bool IsInlineAsm = CI->isInlineAsm();
177 const bool IsIndirectFunctionCall =
178 !IsInlineAsm && !CI->getCalledFunction();
179
180 // It is possible that the call instruction is an inline asm statement or an
181 // indirect function call in which case the return value of
182 // getCalledFunction() would be nullptr.
183 const bool IsInstrinsicCall =
184 !IsInlineAsm && !IsIndirectFunctionCall &&
185 CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
186
187 if (!IsInlineAsm && !IsInstrinsicCall)
188 return CI->getCallingConv();
189 }
190
191 return None;
192}
193
194static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
195 const SDValue *Parts, unsigned NumParts,
196 MVT PartVT, EVT ValueVT, const Value *V,
197 Optional<CallingConv::ID> CC);
198
199/// getCopyFromParts - Create a value that contains the specified legal parts
200/// combined into the value they represent. If the parts combine to a type
201/// larger than ValueVT then AssertOp can be used to specify whether the extra
202/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
203/// (ISD::AssertSext).
204static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
205 const SDValue *Parts, unsigned NumParts,
206 MVT PartVT, EVT ValueVT, const Value *V,
207 Optional<CallingConv::ID> CC = None,
208 Optional<ISD::NodeType> AssertOp = None) {
209 if (ValueVT.isVector())
210 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
211 CC);
212
213 assert(NumParts > 0 && "No parts to assemble!")((NumParts > 0 && "No parts to assemble!") ? static_cast
<void> (0) : __assert_fail ("NumParts > 0 && \"No parts to assemble!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 213, __PRETTY_FUNCTION__))
;
214 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
215 SDValue Val = Parts[0];
216
217 if (NumParts > 1) {
218 // Assemble the value from multiple parts.
219 if (ValueVT.isInteger()) {
220 unsigned PartBits = PartVT.getSizeInBits();
221 unsigned ValueBits = ValueVT.getSizeInBits();
222
223 // Assemble the power of 2 part.
224 unsigned RoundParts =
225 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
226 unsigned RoundBits = PartBits * RoundParts;
227 EVT RoundVT = RoundBits == ValueBits ?
228 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
229 SDValue Lo, Hi;
230
231 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
232
233 if (RoundParts > 2) {
234 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
235 PartVT, HalfVT, V);
236 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
237 RoundParts / 2, PartVT, HalfVT, V);
238 } else {
239 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
240 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
241 }
242
243 if (DAG.getDataLayout().isBigEndian())
244 std::swap(Lo, Hi);
245
246 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
247
248 if (RoundParts < NumParts) {
249 // Assemble the trailing non-power-of-2 part.
250 unsigned OddParts = NumParts - RoundParts;
251 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
252 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
253 OddVT, V, CC);
254
255 // Combine the round and odd parts.
256 Lo = Val;
257 if (DAG.getDataLayout().isBigEndian())
258 std::swap(Lo, Hi);
259 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
260 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
261 Hi =
262 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
263 DAG.getConstant(Lo.getValueSizeInBits(), DL,
264 TLI.getPointerTy(DAG.getDataLayout())));
265 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
266 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
267 }
268 } else if (PartVT.isFloatingPoint()) {
269 // FP split into multiple FP parts (for ppcf128)
270 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&((ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split") ? static_cast<void> (0) : __assert_fail
("ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 271, __PRETTY_FUNCTION__))
271 "Unexpected split")((ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split") ? static_cast<void> (0) : __assert_fail
("ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 271, __PRETTY_FUNCTION__))
;
272 SDValue Lo, Hi;
273 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
274 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
275 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
276 std::swap(Lo, Hi);
277 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
278 } else {
279 // FP split into integer parts (soft fp)
280 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&((ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 281, __PRETTY_FUNCTION__))
281 !PartVT.isVector() && "Unexpected split")((ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 281, __PRETTY_FUNCTION__))
;
282 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
283 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
284 }
285 }
286
287 // There is now one part, held in Val. Correct it to match ValueVT.
288 // PartEVT is the type of the register class that holds the value.
289 // ValueVT is the type of the inline asm operation.
290 EVT PartEVT = Val.getValueType();
291
292 if (PartEVT == ValueVT)
293 return Val;
294
295 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
296 ValueVT.bitsLT(PartEVT)) {
297 // For an FP value in an integer part, we need to truncate to the right
298 // width first.
299 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
300 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
301 }
302
303 // Handle types that have the same size.
304 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
305 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
306
307 // Handle types with different sizes.
308 if (PartEVT.isInteger() && ValueVT.isInteger()) {
309 if (ValueVT.bitsLT(PartEVT)) {
310 // For a truncate, see if we have any information to
311 // indicate whether the truncated bits will always be
312 // zero or sign-extension.
313 if (AssertOp.hasValue())
314 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
315 DAG.getValueType(ValueVT));
316 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
317 }
318 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
319 }
320
321 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
322 // FP_ROUND's are always exact here.
323 if (ValueVT.bitsLT(Val.getValueType()))
324 return DAG.getNode(
325 ISD::FP_ROUND, DL, ValueVT, Val,
326 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
327
328 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
329 }
330
331 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
332 // then truncating.
333 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
334 ValueVT.bitsLT(PartEVT)) {
335 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
336 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
337 }
338
339 report_fatal_error("Unknown mismatch in getCopyFromParts!");
340}
341
342static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
343 const Twine &ErrMsg) {
344 const Instruction *I = dyn_cast_or_null<Instruction>(V);
345 if (!V)
346 return Ctx.emitError(ErrMsg);
347
348 const char *AsmError = ", possible invalid constraint for vector type";
349 if (const CallInst *CI = dyn_cast<CallInst>(I))
350 if (isa<InlineAsm>(CI->getCalledValue()))
351 return Ctx.emitError(I, ErrMsg + AsmError);
352
353 return Ctx.emitError(I, ErrMsg);
354}
355
356/// getCopyFromPartsVector - Create a value that contains the specified legal
357/// parts combined into the value they represent. If the parts combine to a
358/// type larger than ValueVT then AssertOp can be used to specify whether the
359/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
360/// ValueVT (ISD::AssertSext).
361static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
362 const SDValue *Parts, unsigned NumParts,
363 MVT PartVT, EVT ValueVT, const Value *V,
364 Optional<CallingConv::ID> CallConv) {
365 assert(ValueVT.isVector() && "Not a vector value")((ValueVT.isVector() && "Not a vector value") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isVector() && \"Not a vector value\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 365, __PRETTY_FUNCTION__))
;
366 assert(NumParts > 0 && "No parts to assemble!")((NumParts > 0 && "No parts to assemble!") ? static_cast
<void> (0) : __assert_fail ("NumParts > 0 && \"No parts to assemble!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 366, __PRETTY_FUNCTION__))
;
367 const bool IsABIRegCopy = CallConv.hasValue();
368
369 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
370 SDValue Val = Parts[0];
371
372 // Handle a multi-element vector.
373 if (NumParts > 1) {
374 EVT IntermediateVT;
375 MVT RegisterVT;
376 unsigned NumIntermediates;
377 unsigned NumRegs;
378
379 if (IsABIRegCopy) {
380 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
381 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
382 NumIntermediates, RegisterVT);
383 } else {
384 NumRegs =
385 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
386 NumIntermediates, RegisterVT);
387 }
388
389 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")((NumRegs == NumParts && "Part count doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("NumRegs == NumParts && \"Part count doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 389, __PRETTY_FUNCTION__))
;
390 NumParts = NumRegs; // Silence a compiler warning.
391 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")((RegisterVT == PartVT && "Part type doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("RegisterVT == PartVT && \"Part type doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 391, __PRETTY_FUNCTION__))
;
392 assert(RegisterVT.getSizeInBits() ==((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 394, __PRETTY_FUNCTION__))
393 Parts[0].getSimpleValueType().getSizeInBits() &&((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 394, __PRETTY_FUNCTION__))
394 "Part type sizes don't match!")((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 394, __PRETTY_FUNCTION__))
;
395
396 // Assemble the parts into intermediate operands.
397 SmallVector<SDValue, 8> Ops(NumIntermediates);
398 if (NumIntermediates == NumParts) {
399 // If the register was not expanded, truncate or copy the value,
400 // as appropriate.
401 for (unsigned i = 0; i != NumParts; ++i)
402 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
403 PartVT, IntermediateVT, V);
404 } else if (NumParts > 0) {
405 // If the intermediate type was expanded, build the intermediate
406 // operands from the parts.
407 assert(NumParts % NumIntermediates == 0 &&((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 408, __PRETTY_FUNCTION__))
408 "Must expand into a divisible number of parts!")((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 408, __PRETTY_FUNCTION__))
;
409 unsigned Factor = NumParts / NumIntermediates;
410 for (unsigned i = 0; i != NumIntermediates; ++i)
411 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
412 PartVT, IntermediateVT, V);
413 }
414
415 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
416 // intermediate operands.
417 EVT BuiltVectorTy =
418 EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
419 (IntermediateVT.isVector()
420 ? IntermediateVT.getVectorNumElements() * NumParts
421 : NumIntermediates));
422 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
423 : ISD::BUILD_VECTOR,
424 DL, BuiltVectorTy, Ops);
425 }
426
427 // There is now one part, held in Val. Correct it to match ValueVT.
428 EVT PartEVT = Val.getValueType();
429
430 if (PartEVT == ValueVT)
431 return Val;
432
433 if (PartEVT.isVector()) {
434 // If the element type of the source/dest vectors are the same, but the
435 // parts vector has more elements than the value vector, then we have a
436 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
437 // elements we want.
438 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
439 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&((PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements
() && "Cannot narrow, it would be a lossy transformation"
) ? static_cast<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && \"Cannot narrow, it would be a lossy transformation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 440, __PRETTY_FUNCTION__))
440 "Cannot narrow, it would be a lossy transformation")((PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements
() && "Cannot narrow, it would be a lossy transformation"
) ? static_cast<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && \"Cannot narrow, it would be a lossy transformation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 440, __PRETTY_FUNCTION__))
;
441 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
442 DAG.getVectorIdxConstant(0, DL));
443 }
444
445 // Vector/Vector bitcast.
446 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
447 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
448
449 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&((PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements
() && "Cannot handle this kind of promotion") ? static_cast
<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && \"Cannot handle this kind of promotion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 450, __PRETTY_FUNCTION__))
450 "Cannot handle this kind of promotion")((PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements
() && "Cannot handle this kind of promotion") ? static_cast
<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && \"Cannot handle this kind of promotion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 450, __PRETTY_FUNCTION__))
;
451 // Promoted vector extract
452 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
453
454 }
455
456 // Trivial bitcast if the types are the same size and the destination
457 // vector type is legal.
458 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
459 TLI.isTypeLegal(ValueVT))
460 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
461
462 if (ValueVT.getVectorNumElements() != 1) {
463 // Certain ABIs require that vectors are passed as integers. For vectors
464 // are the same size, this is an obvious bitcast.
465 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
466 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
467 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
468 // Bitcast Val back the original type and extract the corresponding
469 // vector we want.
470 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
471 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
472 ValueVT.getVectorElementType(), Elts);
473 Val = DAG.getBitcast(WiderVecType, Val);
474 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
475 DAG.getVectorIdxConstant(0, DL));
476 }
477
478 diagnosePossiblyInvalidConstraint(
479 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
480 return DAG.getUNDEF(ValueVT);
481 }
482
483 // Handle cases such as i8 -> <1 x i1>
484 EVT ValueSVT = ValueVT.getVectorElementType();
485 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
486 if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
487 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
488 else
489 Val = ValueVT.isFloatingPoint()
490 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
491 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
492 }
493
494 return DAG.getBuildVector(ValueVT, DL, Val);
495}
496
497static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
498 SDValue Val, SDValue *Parts, unsigned NumParts,
499 MVT PartVT, const Value *V,
500 Optional<CallingConv::ID> CallConv);
501
502/// getCopyToParts - Create a series of nodes that contain the specified value
503/// split into legal parts. If the parts contain more bits than Val, then, for
504/// integers, ExtendKind can be used to specify how to generate the extra bits.
505static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
506 SDValue *Parts, unsigned NumParts, MVT PartVT,
507 const Value *V,
508 Optional<CallingConv::ID> CallConv = None,
509 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
510 EVT ValueVT = Val.getValueType();
511
512 // Handle the vector case separately.
513 if (ValueVT.isVector())
514 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
515 CallConv);
516
517 unsigned PartBits = PartVT.getSizeInBits();
518 unsigned OrigNumParts = NumParts;
519 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && "Copying to an illegal type!"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && \"Copying to an illegal type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 520, __PRETTY_FUNCTION__))
520 "Copying to an illegal type!")((DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && "Copying to an illegal type!"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && \"Copying to an illegal type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 520, __PRETTY_FUNCTION__))
;
521
522 if (NumParts == 0)
523 return;
524
525 assert(!ValueVT.isVector() && "Vector case handled elsewhere")((!ValueVT.isVector() && "Vector case handled elsewhere"
) ? static_cast<void> (0) : __assert_fail ("!ValueVT.isVector() && \"Vector case handled elsewhere\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 525, __PRETTY_FUNCTION__))
;
526 EVT PartEVT = PartVT;
527 if (PartEVT == ValueVT) {
528 assert(NumParts == 1 && "No-op copy with multiple parts!")((NumParts == 1 && "No-op copy with multiple parts!")
? static_cast<void> (0) : __assert_fail ("NumParts == 1 && \"No-op copy with multiple parts!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 528, __PRETTY_FUNCTION__))
;
529 Parts[0] = Val;
530 return;
531 }
532
533 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
534 // If the parts cover more bits than the value has, promote the value.
535 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
536 assert(NumParts == 1 && "Do not know what to promote to!")((NumParts == 1 && "Do not know what to promote to!")
? static_cast<void> (0) : __assert_fail ("NumParts == 1 && \"Do not know what to promote to!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 536, __PRETTY_FUNCTION__))
;
537 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
538 } else {
539 if (ValueVT.isFloatingPoint()) {
540 // FP values need to be bitcast, then extended if they are being put
541 // into a larger container.
542 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
543 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
544 }
545 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 547, __PRETTY_FUNCTION__))
546 ValueVT.isInteger() &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 547, __PRETTY_FUNCTION__))
547 "Unknown mismatch!")(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 547, __PRETTY_FUNCTION__))
;
548 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
549 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
550 if (PartVT == MVT::x86mmx)
551 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
552 }
553 } else if (PartBits == ValueVT.getSizeInBits()) {
554 // Different types of the same size.
555 assert(NumParts == 1 && PartEVT != ValueVT)((NumParts == 1 && PartEVT != ValueVT) ? static_cast<
void> (0) : __assert_fail ("NumParts == 1 && PartEVT != ValueVT"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 555, __PRETTY_FUNCTION__))
;
556 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
557 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
558 // If the parts cover less bits than value has, truncate the value.
559 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 561, __PRETTY_FUNCTION__))
560 ValueVT.isInteger() &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 561, __PRETTY_FUNCTION__))
561 "Unknown mismatch!")(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 561, __PRETTY_FUNCTION__))
;
562 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
563 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
564 if (PartVT == MVT::x86mmx)
565 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
566 }
567
568 // The value may have changed - recompute ValueVT.
569 ValueVT = Val.getValueType();
570 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&((NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"
) ? static_cast<void> (0) : __assert_fail ("NumParts * PartBits == ValueVT.getSizeInBits() && \"Failed to tile the value with PartVT!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 571, __PRETTY_FUNCTION__))
571 "Failed to tile the value with PartVT!")((NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"
) ? static_cast<void> (0) : __assert_fail ("NumParts * PartBits == ValueVT.getSizeInBits() && \"Failed to tile the value with PartVT!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 571, __PRETTY_FUNCTION__))
;
572
573 if (NumParts == 1) {
574 if (PartEVT != ValueVT) {
575 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
576 "scalar-to-vector conversion failed");
577 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
578 }
579
580 Parts[0] = Val;
581 return;
582 }
583
584 // Expand the value into multiple parts.
585 if (NumParts & (NumParts - 1)) {
586 // The number of parts is not a power of 2. Split off and copy the tail.
587 assert(PartVT.isInteger() && ValueVT.isInteger() &&((PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!") ? static_cast<void> (
0) : __assert_fail ("PartVT.isInteger() && ValueVT.isInteger() && \"Do not know what to expand to!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 588, __PRETTY_FUNCTION__))
588 "Do not know what to expand to!")((PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!") ? static_cast<void> (
0) : __assert_fail ("PartVT.isInteger() && ValueVT.isInteger() && \"Do not know what to expand to!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 588, __PRETTY_FUNCTION__))
;
589 unsigned RoundParts = 1 << Log2_32(NumParts);
590 unsigned RoundBits = RoundParts * PartBits;
591 unsigned OddParts = NumParts - RoundParts;
592 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
593 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
594
595 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
596 CallConv);
597
598 if (DAG.getDataLayout().isBigEndian())
599 // The odd parts were reversed by getCopyToParts - unreverse them.
600 std::reverse(Parts + RoundParts, Parts + NumParts);
601
602 NumParts = RoundParts;
603 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
604 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
605 }
606
607 // The number of parts is a power of 2. Repeatedly bisect the value using
608 // EXTRACT_ELEMENT.
609 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
610 EVT::getIntegerVT(*DAG.getContext(),
611 ValueVT.getSizeInBits()),
612 Val);
613
614 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
615 for (unsigned i = 0; i < NumParts; i += StepSize) {
616 unsigned ThisBits = StepSize * PartBits / 2;
617 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
618 SDValue &Part0 = Parts[i];
619 SDValue &Part1 = Parts[i+StepSize/2];
620
621 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
622 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
623 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
624 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
625
626 if (ThisBits == PartBits && ThisVT != PartVT) {
627 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
628 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
629 }
630 }
631 }
632
633 if (DAG.getDataLayout().isBigEndian())
634 std::reverse(Parts, Parts + OrigNumParts);
635}
636
637static SDValue widenVectorToPartType(SelectionDAG &DAG,
638 SDValue Val, const SDLoc &DL, EVT PartVT) {
639 if (!PartVT.isVector())
640 return SDValue();
641
642 EVT ValueVT = Val.getValueType();
643 unsigned PartNumElts = PartVT.getVectorNumElements();
644 unsigned ValueNumElts = ValueVT.getVectorNumElements();
645 if (PartNumElts > ValueNumElts &&
646 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
647 EVT ElementVT = PartVT.getVectorElementType();
648 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
649 // undef elements.
650 SmallVector<SDValue, 16> Ops;
651 DAG.ExtractVectorElements(Val, Ops);
652 SDValue EltUndef = DAG.getUNDEF(ElementVT);
653 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
654 Ops.push_back(EltUndef);
655
656 // FIXME: Use CONCAT for 2x -> 4x.
657 return DAG.getBuildVector(PartVT, DL, Ops);
658 }
659
660 return SDValue();
661}
662
663/// getCopyToPartsVector - Create a series of nodes that contain the specified
664/// value split into legal parts.
665static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
666 SDValue Val, SDValue *Parts, unsigned NumParts,
667 MVT PartVT, const Value *V,
668 Optional<CallingConv::ID> CallConv) {
669 EVT ValueVT = Val.getValueType();
670 assert(ValueVT.isVector() && "Not a vector")((ValueVT.isVector() && "Not a vector") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isVector() && \"Not a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 670, __PRETTY_FUNCTION__))
;
671 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
672 const bool IsABIRegCopy = CallConv.hasValue();
673
674 if (NumParts == 1) {
675 EVT PartEVT = PartVT;
676 if (PartEVT == ValueVT) {
677 // Nothing to do.
678 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
679 // Bitconvert vector->vector case.
680 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
681 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
682 Val = Widened;
683 } else if (PartVT.isVector() &&
684 PartEVT.getVectorElementType().bitsGE(
685 ValueVT.getVectorElementType()) &&
686 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
687
688 // Promoted vector extract
689 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
690 } else {
691 if (ValueVT.getVectorNumElements() == 1) {
692 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
693 DAG.getVectorIdxConstant(0, DL));
694 } else {
695 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&((PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
"lossy conversion of vector to scalar type") ? static_cast<
void> (0) : __assert_fail ("PartVT.getSizeInBits() > ValueVT.getSizeInBits() && \"lossy conversion of vector to scalar type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 696, __PRETTY_FUNCTION__))
696 "lossy conversion of vector to scalar type")((PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
"lossy conversion of vector to scalar type") ? static_cast<
void> (0) : __assert_fail ("PartVT.getSizeInBits() > ValueVT.getSizeInBits() && \"lossy conversion of vector to scalar type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 696, __PRETTY_FUNCTION__))
;
697 EVT IntermediateType =
698 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
699 Val = DAG.getBitcast(IntermediateType, Val);
700 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
701 }
702 }
703
704 assert(Val.getValueType() == PartVT && "Unexpected vector part value type")((Val.getValueType() == PartVT && "Unexpected vector part value type"
) ? static_cast<void> (0) : __assert_fail ("Val.getValueType() == PartVT && \"Unexpected vector part value type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 704, __PRETTY_FUNCTION__))
;
705 Parts[0] = Val;
706 return;
707 }
708
709 // Handle a multi-element vector.
710 EVT IntermediateVT;
711 MVT RegisterVT;
712 unsigned NumIntermediates;
713 unsigned NumRegs;
714 if (IsABIRegCopy) {
715 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
716 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
717 NumIntermediates, RegisterVT);
718 } else {
719 NumRegs =
720 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
721 NumIntermediates, RegisterVT);
722 }
723
724 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")((NumRegs == NumParts && "Part count doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("NumRegs == NumParts && \"Part count doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 724, __PRETTY_FUNCTION__))
;
725 NumParts = NumRegs; // Silence a compiler warning.
726 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")((RegisterVT == PartVT && "Part type doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("RegisterVT == PartVT && \"Part type doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 726, __PRETTY_FUNCTION__))
;
727
728 unsigned IntermediateNumElts = IntermediateVT.isVector() ?
729 IntermediateVT.getVectorNumElements() : 1;
730
731 // Convert the vector to the appropriate type if necessary.
732 unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
733
734 EVT BuiltVectorTy = EVT::getVectorVT(
735 *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
736 if (ValueVT != BuiltVectorTy) {
737 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
738 Val = Widened;
739
740 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
741 }
742
743 // Split the vector into intermediate operands.
744 SmallVector<SDValue, 8> Ops(NumIntermediates);
745 for (unsigned i = 0; i != NumIntermediates; ++i) {
746 if (IntermediateVT.isVector()) {
747 Ops[i] =
748 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
749 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
750 } else {
751 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
752 DAG.getVectorIdxConstant(i, DL));
753 }
754 }
755
756 // Split the intermediate operands into legal parts.
757 if (NumParts == NumIntermediates) {
758 // If the register was not expanded, promote or copy the value,
759 // as appropriate.
760 for (unsigned i = 0; i != NumParts; ++i)
761 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
762 } else if (NumParts > 0) {
763 // If the intermediate type was expanded, split each the value into
764 // legal parts.
765 assert(NumIntermediates != 0 && "division by zero")((NumIntermediates != 0 && "division by zero") ? static_cast
<void> (0) : __assert_fail ("NumIntermediates != 0 && \"division by zero\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 765, __PRETTY_FUNCTION__))
;
766 assert(NumParts % NumIntermediates == 0 &&((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 767, __PRETTY_FUNCTION__))
767 "Must expand into a divisible number of parts!")((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 767, __PRETTY_FUNCTION__))
;
768 unsigned Factor = NumParts / NumIntermediates;
769 for (unsigned i = 0; i != NumIntermediates; ++i)
770 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
771 CallConv);
772 }
773}
774
775RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
776 EVT valuevt, Optional<CallingConv::ID> CC)
777 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
778 RegCount(1, regs.size()), CallConv(CC) {}
779
780RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
781 const DataLayout &DL, unsigned Reg, Type *Ty,
782 Optional<CallingConv::ID> CC) {
783 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
784
785 CallConv = CC;
786
787 for (EVT ValueVT : ValueVTs) {
788 unsigned NumRegs =
789 isABIMangled()
790 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
791 : TLI.getNumRegisters(Context, ValueVT);
792 MVT RegisterVT =
793 isABIMangled()
794 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
795 : TLI.getRegisterType(Context, ValueVT);
796 for (unsigned i = 0; i != NumRegs; ++i)
797 Regs.push_back(Reg + i);
798 RegVTs.push_back(RegisterVT);
799 RegCount.push_back(NumRegs);
800 Reg += NumRegs;
801 }
802}
803
804SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
805 FunctionLoweringInfo &FuncInfo,
806 const SDLoc &dl, SDValue &Chain,
807 SDValue *Flag, const Value *V) const {
808 // A Value with type {} or [0 x %t] needs no registers.
809 if (ValueVTs.empty())
810 return SDValue();
811
812 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
813
814 // Assemble the legal parts into the final values.
815 SmallVector<SDValue, 4> Values(ValueVTs.size());
816 SmallVector<SDValue, 8> Parts;
817 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
818 // Copy the legal parts from the registers.
819 EVT ValueVT = ValueVTs[Value];
820 unsigned NumRegs = RegCount[Value];
821 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
822 *DAG.getContext(),
823 CallConv.getValue(), RegVTs[Value])
824 : RegVTs[Value];
825
826 Parts.resize(NumRegs);
827 for (unsigned i = 0; i != NumRegs; ++i) {
828 SDValue P;
829 if (!Flag) {
830 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
831 } else {
832 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
833 *Flag = P.getValue(2);
834 }
835
836 Chain = P.getValue(1);
837 Parts[i] = P;
838
839 // If the source register was virtual and if we know something about it,
840 // add an assert node.
841 if (!Register::isVirtualRegister(Regs[Part + i]) ||
842 !RegisterVT.isInteger())
843 continue;
844
845 const FunctionLoweringInfo::LiveOutInfo *LOI =
846 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
847 if (!LOI)
848 continue;
849
850 unsigned RegSize = RegisterVT.getScalarSizeInBits();
851 unsigned NumSignBits = LOI->NumSignBits;
852 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
853
854 if (NumZeroBits == RegSize) {
855 // The current value is a zero.
856 // Explicitly express that as it would be easier for
857 // optimizations to kick in.
858 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
859 continue;
860 }
861
862 // FIXME: We capture more information than the dag can represent. For
863 // now, just use the tightest assertzext/assertsext possible.
864 bool isSExt;
865 EVT FromVT(MVT::Other);
866 if (NumZeroBits) {
867 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
868 isSExt = false;
869 } else if (NumSignBits > 1) {
870 FromVT =
871 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
872 isSExt = true;
873 } else {
874 continue;
875 }
876 // Add an assertion node.
877 assert(FromVT != MVT::Other)((FromVT != MVT::Other) ? static_cast<void> (0) : __assert_fail
("FromVT != MVT::Other", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 877, __PRETTY_FUNCTION__))
;
878 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
879 RegisterVT, P, DAG.getValueType(FromVT));
880 }
881
882 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
883 RegisterVT, ValueVT, V, CallConv);
884 Part += NumRegs;
885 Parts.clear();
886 }
887
888 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
889}
890
891void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
892 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
893 const Value *V,
894 ISD::NodeType PreferredExtendType) const {
895 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
896 ISD::NodeType ExtendKind = PreferredExtendType;
897
898 // Get the list of the values's legal parts.
899 unsigned NumRegs = Regs.size();
900 SmallVector<SDValue, 8> Parts(NumRegs);
901 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
902 unsigned NumParts = RegCount[Value];
903
904 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
905 *DAG.getContext(),
906 CallConv.getValue(), RegVTs[Value])
907 : RegVTs[Value];
908
909 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
910 ExtendKind = ISD::ZERO_EXTEND;
911
912 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
913 NumParts, RegisterVT, V, CallConv, ExtendKind);
914 Part += NumParts;
915 }
916
917 // Copy the parts into the registers.
918 SmallVector<SDValue, 8> Chains(NumRegs);
919 for (unsigned i = 0; i != NumRegs; ++i) {
920 SDValue Part;
921 if (!Flag) {
922 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
923 } else {
924 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
925 *Flag = Part.getValue(1);
926 }
927
928 Chains[i] = Part.getValue(0);
929 }
930
931 if (NumRegs == 1 || Flag)
932 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
933 // flagged to it. That is the CopyToReg nodes and the user are considered
934 // a single scheduling unit. If we create a TokenFactor and return it as
935 // chain, then the TokenFactor is both a predecessor (operand) of the
936 // user as well as a successor (the TF operands are flagged to the user).
937 // c1, f1 = CopyToReg
938 // c2, f2 = CopyToReg
939 // c3 = TokenFactor c1, c2
940 // ...
941 // = op c3, ..., f2
942 Chain = Chains[NumRegs-1];
943 else
944 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
945}
946
947void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
948 unsigned MatchingIdx, const SDLoc &dl,
949 SelectionDAG &DAG,
950 std::vector<SDValue> &Ops) const {
951 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
952
953 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
954 if (HasMatching)
955 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
956 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
957 // Put the register class of the virtual registers in the flag word. That
958 // way, later passes can recompute register class constraints for inline
959 // assembly as well as normal instructions.
960 // Don't do this for tied operands that can use the regclass information
961 // from the def.
962 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
963 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
964 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
965 }
966
967 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
968 Ops.push_back(Res);
969
970 if (Code == InlineAsm::Kind_Clobber) {
971 // Clobbers should always have a 1:1 mapping with registers, and may
972 // reference registers that have illegal (e.g. vector) types. Hence, we
973 // shouldn't try to apply any sort of splitting logic to them.
974 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&((Regs.size() == RegVTs.size() && Regs.size() == ValueVTs
.size() && "No 1:1 mapping from clobbers to regs?") ?
static_cast<void> (0) : __assert_fail ("Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && \"No 1:1 mapping from clobbers to regs?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 975, __PRETTY_FUNCTION__))
975 "No 1:1 mapping from clobbers to regs?")((Regs.size() == RegVTs.size() && Regs.size() == ValueVTs
.size() && "No 1:1 mapping from clobbers to regs?") ?
static_cast<void> (0) : __assert_fail ("Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && \"No 1:1 mapping from clobbers to regs?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 975, __PRETTY_FUNCTION__))
;
976 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
977 (void)SP;
978 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
979 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
980 assert((((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 983, __PRETTY_FUNCTION__))
981 (Regs[I] != SP ||(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 983, __PRETTY_FUNCTION__))
982 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 983, __PRETTY_FUNCTION__))
983 "If we clobbered the stack pointer, MFI should know about it.")(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 983, __PRETTY_FUNCTION__))
;
984 }
985 return;
986 }
987
988 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
989 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
990 MVT RegisterVT = RegVTs[Value];
991 for (unsigned i = 0; i != NumRegs; ++i) {
992 assert(Reg < Regs.size() && "Mismatch in # registers expected")((Reg < Regs.size() && "Mismatch in # registers expected"
) ? static_cast<void> (0) : __assert_fail ("Reg < Regs.size() && \"Mismatch in # registers expected\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 992, __PRETTY_FUNCTION__))
;
993 unsigned TheReg = Regs[Reg++];
994 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
995 }
996 }
997}
998
999SmallVector<std::pair<unsigned, unsigned>, 4>
1000RegsForValue::getRegsAndSizes() const {
1001 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
1002 unsigned I = 0;
1003 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1004 unsigned RegCount = std::get<0>(CountAndVT);
1005 MVT RegisterVT = std::get<1>(CountAndVT);
1006 unsigned RegisterSize = RegisterVT.getSizeInBits();
1007 for (unsigned E = I + RegCount; I != E; ++I)
1008 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1009 }
1010 return OutVec;
1011}
1012
1013void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1014 const TargetLibraryInfo *li) {
1015 AA = aa;
1016 GFI = gfi;
1017 LibInfo = li;
1018 DL = &DAG.getDataLayout();
1019 Context = DAG.getContext();
1020 LPadToCallSiteMap.clear();
1021 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1022}
1023
1024void SelectionDAGBuilder::clear() {
1025 NodeMap.clear();
1026 UnusedArgNodeMap.clear();
1027 PendingLoads.clear();
1028 PendingExports.clear();
1029 PendingConstrainedFP.clear();
1030 PendingConstrainedFPStrict.clear();
1031 CurInst = nullptr;
1032 HasTailCall = false;
1033 SDNodeOrder = LowestSDNodeOrder;
1034 StatepointLowering.clear();
1035}
1036
1037void SelectionDAGBuilder::clearDanglingDebugInfo() {
1038 DanglingDebugInfoMap.clear();
1039}
1040
1041// Update DAG root to include dependencies on Pending chains.
1042SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1043 SDValue Root = DAG.getRoot();
1044
1045 if (Pending.empty())
1046 return Root;
1047
1048 // Add current root to PendingChains, unless we already indirectly
1049 // depend on it.
1050 if (Root.getOpcode() != ISD::EntryToken) {
1051 unsigned i = 0, e = Pending.size();
1052 for (; i != e; ++i) {
1053 assert(Pending[i].getNode()->getNumOperands() > 1)((Pending[i].getNode()->getNumOperands() > 1) ? static_cast
<void> (0) : __assert_fail ("Pending[i].getNode()->getNumOperands() > 1"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1053, __PRETTY_FUNCTION__))
;
1054 if (Pending[i].getNode()->getOperand(0) == Root)
1055 break; // Don't add the root if we already indirectly depend on it.
1056 }
1057
1058 if (i == e)
1059 Pending.push_back(Root);
1060 }
1061
1062 if (Pending.size() == 1)
1063 Root = Pending[0];
1064 else
1065 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1066
1067 DAG.setRoot(Root);
1068 Pending.clear();
1069 return Root;
1070}
1071
1072SDValue SelectionDAGBuilder::getMemoryRoot() {
1073 return updateRoot(PendingLoads);
1074}
1075
1076SDValue SelectionDAGBuilder::getRoot() {
1077 // Chain up all pending constrained intrinsics together with all
1078 // pending loads, by simply appending them to PendingLoads and
1079 // then calling getMemoryRoot().
1080 PendingLoads.reserve(PendingLoads.size() +
1081 PendingConstrainedFP.size() +
1082 PendingConstrainedFPStrict.size());
1083 PendingLoads.append(PendingConstrainedFP.begin(),
1084 PendingConstrainedFP.end());
1085 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1086 PendingConstrainedFPStrict.end());
1087 PendingConstrainedFP.clear();
1088 PendingConstrainedFPStrict.clear();
1089 return getMemoryRoot();
1090}
1091
1092SDValue SelectionDAGBuilder::getControlRoot() {
1093 // We need to emit pending fpexcept.strict constrained intrinsics,
1094 // so append them to the PendingExports list.
1095 PendingExports.append(PendingConstrainedFPStrict.begin(),
1096 PendingConstrainedFPStrict.end());
1097 PendingConstrainedFPStrict.clear();
1098 return updateRoot(PendingExports);
1099}
1100
1101void SelectionDAGBuilder::visit(const Instruction &I) {
1102 // Set up outgoing PHI node register values before emitting the terminator.
1103 if (I.isTerminator()) {
1104 HandlePHINodesInSuccessorBlocks(I.getParent());
1105 }
1106
1107 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1108 if (!isa<DbgInfoIntrinsic>(I))
1109 ++SDNodeOrder;
1110
1111 CurInst = &I;
1112
1113 visit(I.getOpcode(), I);
1114
1115 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1116 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1117 // maps to this instruction.
1118 // TODO: We could handle all flags (nsw, etc) here.
1119 // TODO: If an IR instruction maps to >1 node, only the final node will have
1120 // flags set.
1121 if (SDNode *Node = getNodeForIRValue(&I)) {
1122 SDNodeFlags IncomingFlags;
1123 IncomingFlags.copyFMF(*FPMO);
1124 if (!Node->getFlags().isDefined())
1125 Node->setFlags(IncomingFlags);
1126 else
1127 Node->intersectFlagsWith(IncomingFlags);
1128 }
1129 }
1130 // Constrained FP intrinsics with fpexcept.ignore should also get
1131 // the NoFPExcept flag.
1132 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(&I))
1133 if (FPI->getExceptionBehavior() == fp::ExceptionBehavior::ebIgnore)
1134 if (SDNode *Node = getNodeForIRValue(&I)) {
1135 SDNodeFlags Flags = Node->getFlags();
1136 Flags.setNoFPExcept(true);
1137 Node->setFlags(Flags);
1138 }
1139
1140 if (!I.isTerminator() && !HasTailCall &&
1141 !isStatepoint(&I)) // statepoints handle their exports internally
1142 CopyToExportRegsIfNeeded(&I);
1143
1144 CurInst = nullptr;
1145}
1146
1147void SelectionDAGBuilder::visitPHI(const PHINode &) {
1148 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!")::llvm::llvm_unreachable_internal("SelectionDAGBuilder shouldn't visit PHI nodes!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1148)
;
1149}
1150
1151void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1152 // Note: this doesn't use InstVisitor, because it has to work with
1153 // ConstantExpr's in addition to instructions.
1154 switch (Opcode) {
1155 default: llvm_unreachable("Unknown instruction type encountered!")::llvm::llvm_unreachable_internal("Unknown instruction type encountered!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1155)
;
1156 // Build the switch statement using the Instruction.def file.
1157#define HANDLE_INST(NUM, OPCODE, CLASS) \
1158 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1159#include "llvm/IR/Instruction.def"
1160 }
1161}
1162
1163void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1164 const DIExpression *Expr) {
1165 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1166 const DbgValueInst *DI = DDI.getDI();
1167 DIVariable *DanglingVariable = DI->getVariable();
1168 DIExpression *DanglingExpr = DI->getExpression();
1169 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1170 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping dangling debug info for "
<< *DI << "\n"; } } while (false)
;
1171 return true;
1172 }
1173 return false;
1174 };
1175
1176 for (auto &DDIMI : DanglingDebugInfoMap) {
1177 DanglingDebugInfoVector &DDIV = DDIMI.second;
1178
1179 // If debug info is to be dropped, run it through final checks to see
1180 // whether it can be salvaged.
1181 for (auto &DDI : DDIV)
1182 if (isMatchingDbgValue(DDI))
1183 salvageUnresolvedDbgValue(DDI);
1184
1185 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1186 }
1187}
1188
1189// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1190// generate the debug data structures now that we've seen its definition.
1191void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1192 SDValue Val) {
1193 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1194 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1195 return;
1196
1197 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1198 for (auto &DDI : DDIV) {
1199 const DbgValueInst *DI = DDI.getDI();
1200 assert(DI && "Ill-formed DanglingDebugInfo")((DI && "Ill-formed DanglingDebugInfo") ? static_cast
<void> (0) : __assert_fail ("DI && \"Ill-formed DanglingDebugInfo\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1200, __PRETTY_FUNCTION__))
;
1201 DebugLoc dl = DDI.getdl();
1202 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1203 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1204 DILocalVariable *Variable = DI->getVariable();
1205 DIExpression *Expr = DI->getExpression();
1206 assert(Variable->isValidLocationForIntrinsic(dl) &&((Variable->isValidLocationForIntrinsic(dl) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(dl) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1207, __PRETTY_FUNCTION__))
1207 "Expected inlined-at fields to agree")((Variable->isValidLocationForIntrinsic(dl) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(dl) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1207, __PRETTY_FUNCTION__))
;
1208 SDDbgValue *SDV;
1209 if (Val.getNode()) {
1210 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1211 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1212 // we couldn't resolve it directly when examining the DbgValue intrinsic
1213 // in the first place we should not be more successful here). Unless we
1214 // have some test case that prove this to be correct we should avoid
1215 // calling EmitFuncArgumentDbgValue here.
1216 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1217 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolve dangling debug info [order="
<< DbgSDNodeOrder << "] for:\n " << *DI <<
"\n"; } } while (false)
1218 << DbgSDNodeOrder << "] for:\n " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolve dangling debug info [order="
<< DbgSDNodeOrder << "] for:\n " << *DI <<
"\n"; } } while (false)
;
1219 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << " By mapping to:\n "; Val.dump
(); } } while (false)
;
1220 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1221 // inserted after the definition of Val when emitting the instructions
1222 // after ISel. An alternative could be to teach
1223 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1224 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
1225 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
1226 << ValSDNodeOrder << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
;
1227 SDV = getDbgValue(Val, Variable, Expr, dl,
1228 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1229 DAG.AddDbgValue(SDV, Val.getNode(), false);
1230 } else
1231 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolved dangling debug info for "
<< *DI << "in EmitFuncArgumentDbgValue\n"; } } while
(false)
1232 << "in EmitFuncArgumentDbgValue\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolved dangling debug info for "
<< *DI << "in EmitFuncArgumentDbgValue\n"; } } while
(false)
;
1233 } else {
1234 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug info for " <<
*DI << "\n"; } } while (false)
;
1235 auto Undef =
1236 UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1237 auto SDV =
1238 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1239 DAG.AddDbgValue(SDV, nullptr, false);
1240 }
1241 }
1242 DDIV.clear();
1243}
1244
1245void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1246 Value *V = DDI.getDI()->getValue();
1247 DILocalVariable *Var = DDI.getDI()->getVariable();
1248 DIExpression *Expr = DDI.getDI()->getExpression();
1249 DebugLoc DL = DDI.getdl();
1250 DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1251 unsigned SDOrder = DDI.getSDNodeOrder();
1252
1253 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1254 // that DW_OP_stack_value is desired.
1255 assert(isa<DbgValueInst>(DDI.getDI()))((isa<DbgValueInst>(DDI.getDI())) ? static_cast<void
> (0) : __assert_fail ("isa<DbgValueInst>(DDI.getDI())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1255, __PRETTY_FUNCTION__))
;
1256 bool StackValue = true;
1257
1258 // Can this Value can be encoded without any further work?
1259 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1260 return;
1261
1262 // Attempt to salvage back through as many instructions as possible. Bail if
1263 // a non-instruction is seen, such as a constant expression or global
1264 // variable. FIXME: Further work could recover those too.
1265 while (isa<Instruction>(V)) {
1266 Instruction &VAsInst = *cast<Instruction>(V);
1267 DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1268
1269 // If we cannot salvage any further, and haven't yet found a suitable debug
1270 // expression, bail out.
1271 if (!NewExpr)
1272 break;
1273
1274 // New value and expr now represent this debuginfo.
1275 V = VAsInst.getOperand(0);
1276 Expr = NewExpr;
1277
1278 // Some kind of simplification occurred: check whether the operand of the
1279 // salvaged debug expression can be encoded in this DAG.
1280 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1281 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Salvaged debug location info for:\n "
<< DDI.getDI() << "\nBy stripping back to:\n " <<
V; } } while (false)
1282 << DDI.getDI() << "\nBy stripping back to:\n " << V)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Salvaged debug location info for:\n "
<< DDI.getDI() << "\nBy stripping back to:\n " <<
V; } } while (false)
;
1283 return;
1284 }
1285 }
1286
1287 // This was the final opportunity to salvage this debug information, and it
1288 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1289 // any earlier variable location.
1290 auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1291 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1292 DAG.AddDbgValue(SDV, nullptr, false);
1293
1294 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug value info for:\n "
<< DDI.getDI() << "\n"; } } while (false)
1295 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug value info for:\n "
<< DDI.getDI() << "\n"; } } while (false)
;
1296 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << " Last seen at:\n " << *
DDI.getDI()->getOperand(0) << "\n"; } } while (false
)
1297 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << " Last seen at:\n " << *
DDI.getDI()->getOperand(0) << "\n"; } } while (false
)
;
1298}
1299
1300bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1301 DIExpression *Expr, DebugLoc dl,
1302 DebugLoc InstDL, unsigned Order) {
1303 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1304 SDDbgValue *SDV;
1305 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1306 isa<ConstantPointerNull>(V)) {
1307 SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1308 DAG.AddDbgValue(SDV, nullptr, false);
1309 return true;
1310 }
1311
1312 // If the Value is a frame index, we can create a FrameIndex debug value
1313 // without relying on the DAG at all.
1314 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1315 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1316 if (SI != FuncInfo.StaticAllocaMap.end()) {
1317 auto SDV =
1318 DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1319 /*IsIndirect*/ false, dl, SDNodeOrder);
1320 // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1321 // is still available even if the SDNode gets optimized out.
1322 DAG.AddDbgValue(SDV, nullptr, false);
1323 return true;
1324 }
1325 }
1326
1327 // Do not use getValue() in here; we don't want to generate code at
1328 // this point if it hasn't been done yet.
1329 SDValue N = NodeMap[V];
1330 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1331 N = UnusedArgNodeMap[V];
1332 if (N.getNode()) {
1333 if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1334 return true;
1335 SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1336 DAG.AddDbgValue(SDV, N.getNode(), false);
1337 return true;
1338 }
1339
1340 // Special rules apply for the first dbg.values of parameter variables in a
1341 // function. Identify them by the fact they reference Argument Values, that
1342 // they're parameters, and they are parameters of the current function. We
1343 // need to let them dangle until they get an SDNode.
1344 bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1345 !InstDL.getInlinedAt();
1346 if (!IsParamOfFunc) {
1347 // The value is not used in this block yet (or it would have an SDNode).
1348 // We still want the value to appear for the user if possible -- if it has
1349 // an associated VReg, we can refer to that instead.
1350 auto VMI = FuncInfo.ValueMap.find(V);
1351 if (VMI != FuncInfo.ValueMap.end()) {
1352 unsigned Reg = VMI->second;
1353 // If this is a PHI node, it may be split up into several MI PHI nodes
1354 // (in FunctionLoweringInfo::set).
1355 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1356 V->getType(), None);
1357 if (RFV.occupiesMultipleRegs()) {
1358 unsigned Offset = 0;
1359 unsigned BitsToDescribe = 0;
1360 if (auto VarSize = Var->getSizeInBits())
1361 BitsToDescribe = *VarSize;
1362 if (auto Fragment = Expr->getFragmentInfo())
1363 BitsToDescribe = Fragment->SizeInBits;
1364 for (auto RegAndSize : RFV.getRegsAndSizes()) {
1365 unsigned RegisterSize = RegAndSize.second;
1366 // Bail out if all bits are described already.
1367 if (Offset >= BitsToDescribe)
1368 break;
1369 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1370 ? BitsToDescribe - Offset
1371 : RegisterSize;
1372 auto FragmentExpr = DIExpression::createFragmentExpression(
1373 Expr, Offset, FragmentSize);
1374 if (!FragmentExpr)
1375 continue;
1376 SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1377 false, dl, SDNodeOrder);
1378 DAG.AddDbgValue(SDV, nullptr, false);
1379 Offset += RegisterSize;
1380 }
1381 } else {
1382 SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1383 DAG.AddDbgValue(SDV, nullptr, false);
1384 }
1385 return true;
1386 }
1387 }
1388
1389 return false;
1390}
1391
1392void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1393 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1394 for (auto &Pair : DanglingDebugInfoMap)
1395 for (auto &DDI : Pair.second)
1396 salvageUnresolvedDbgValue(DDI);
1397 clearDanglingDebugInfo();
1398}
1399
1400/// getCopyFromRegs - If there was virtual register allocated for the value V
1401/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1402SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1403 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1404 SDValue Result;
1405
1406 if (It != FuncInfo.ValueMap.end()) {
1407 unsigned InReg = It->second;
1408
1409 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1410 DAG.getDataLayout(), InReg, Ty,
1411 None); // This is not an ABI copy.
1412 SDValue Chain = DAG.getEntryNode();
1413 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1414 V);
1415 resolveDanglingDebugInfo(V, Result);
1416 }
1417
1418 return Result;
1419}
1420
1421/// getValue - Return an SDValue for the given Value.
1422SDValue SelectionDAGBuilder::getValue(const Value *V) {
1423 // If we already have an SDValue for this value, use it. It's important
1424 // to do this first, so that we don't create a CopyFromReg if we already
1425 // have a regular SDValue.
1426 SDValue &N = NodeMap[V];
1427 if (N.getNode()) return N;
1428
1429 // If there's a virtual register allocated and initialized for this
1430 // value, use it.
1431 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1432 return copyFromReg;
1433
1434 // Otherwise create a new SDValue and remember it.
1435 SDValue Val = getValueImpl(V);
1436 NodeMap[V] = Val;
1437 resolveDanglingDebugInfo(V, Val);
1438 return Val;
1439}
1440
1441// Return true if SDValue exists for the given Value
1442bool SelectionDAGBuilder::findValue(const Value *V) const {
1443 return (NodeMap.find(V) != NodeMap.end()) ||
1444 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1445}
1446
1447/// getNonRegisterValue - Return an SDValue for the given Value, but
1448/// don't look in FuncInfo.ValueMap for a virtual register.
1449SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1450 // If we already have an SDValue for this value, use it.
1451 SDValue &N = NodeMap[V];
1452 if (N.getNode()) {
1453 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1454 // Remove the debug location from the node as the node is about to be used
1455 // in a location which may differ from the original debug location. This
1456 // is relevant to Constant and ConstantFP nodes because they can appear
1457 // as constant expressions inside PHI nodes.
1458 N->setDebugLoc(DebugLoc());
1459 }
1460 return N;
1461 }
1462
1463 // Otherwise create a new SDValue and remember it.
1464 SDValue Val = getValueImpl(V);
1465 NodeMap[V] = Val;
1466 resolveDanglingDebugInfo(V, Val);
1467 return Val;
1468}
1469
1470/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1471/// Create an SDValue for the given value.
1472SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1473 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1474
1475 if (const Constant *C = dyn_cast<Constant>(V)) {
1476 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1477
1478 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1479 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1480
1481 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1482 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1483
1484 if (isa<ConstantPointerNull>(C)) {
1485 unsigned AS = V->getType()->getPointerAddressSpace();
1486 return DAG.getConstant(0, getCurSDLoc(),
1487 TLI.getPointerTy(DAG.getDataLayout(), AS));
1488 }
1489
1490 if (match(C, m_VScale(DAG.getDataLayout())))
1491 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1492
1493 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1494 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1495
1496 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1497 return DAG.getUNDEF(VT);
1498
1499 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1500 visit(CE->getOpcode(), *CE);
1501 SDValue N1 = NodeMap[V];
1502 assert(N1.getNode() && "visit didn't populate the NodeMap!")((N1.getNode() && "visit didn't populate the NodeMap!"
) ? static_cast<void> (0) : __assert_fail ("N1.getNode() && \"visit didn't populate the NodeMap!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1502, __PRETTY_FUNCTION__))
;
1503 return N1;
1504 }
1505
1506 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1507 SmallVector<SDValue, 4> Constants;
1508 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1509 OI != OE; ++OI) {
1510 SDNode *Val = getValue(*OI).getNode();
1511 // If the operand is an empty aggregate, there are no values.
1512 if (!Val) continue;
1513 // Add each leaf value from the operand to the Constants list
1514 // to form a flattened list of all the values.
1515 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1516 Constants.push_back(SDValue(Val, i));
1517 }
1518
1519 return DAG.getMergeValues(Constants, getCurSDLoc());
1520 }
1521
1522 if (const ConstantDataSequential *CDS =
1523 dyn_cast<ConstantDataSequential>(C)) {
1524 SmallVector<SDValue, 4> Ops;
1525 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1526 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1527 // Add each leaf value from the operand to the Constants list
1528 // to form a flattened list of all the values.
1529 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1530 Ops.push_back(SDValue(Val, i));
1531 }
1532
1533 if (isa<ArrayType>(CDS->getType()))
1534 return DAG.getMergeValues(Ops, getCurSDLoc());
1535 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1536 }
1537
1538 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1539 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&(((isa<ConstantAggregateZero>(C) || isa<UndefValue>
(C)) && "Unknown struct or array constant!") ? static_cast
<void> (0) : __assert_fail ("(isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && \"Unknown struct or array constant!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1540, __PRETTY_FUNCTION__))
1540 "Unknown struct or array constant!")(((isa<ConstantAggregateZero>(C) || isa<UndefValue>
(C)) && "Unknown struct or array constant!") ? static_cast
<void> (0) : __assert_fail ("(isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && \"Unknown struct or array constant!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1540, __PRETTY_FUNCTION__))
;
1541
1542 SmallVector<EVT, 4> ValueVTs;
1543 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1544 unsigned NumElts = ValueVTs.size();
1545 if (NumElts == 0)
1546 return SDValue(); // empty struct
1547 SmallVector<SDValue, 4> Constants(NumElts);
1548 for (unsigned i = 0; i != NumElts; ++i) {
1549 EVT EltVT = ValueVTs[i];
1550 if (isa<UndefValue>(C))
1551 Constants[i] = DAG.getUNDEF(EltVT);
1552 else if (EltVT.isFloatingPoint())
1553 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1554 else
1555 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1556 }
1557
1558 return DAG.getMergeValues(Constants, getCurSDLoc());
1559 }
1560
1561 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1562 return DAG.getBlockAddress(BA, VT);
1563
1564 VectorType *VecTy = cast<VectorType>(V->getType());
1565 unsigned NumElements = VecTy->getNumElements();
1566
1567 // Now that we know the number and type of the elements, get that number of
1568 // elements into the Ops array based on what kind of constant it is.
1569 SmallVector<SDValue, 16> Ops;
1570 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1571 for (unsigned i = 0; i != NumElements; ++i)
1572 Ops.push_back(getValue(CV->getOperand(i)));
1573 } else {
1574 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!")((isa<ConstantAggregateZero>(C) && "Unknown vector constant!"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantAggregateZero>(C) && \"Unknown vector constant!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1574, __PRETTY_FUNCTION__))
;
1575 EVT EltVT =
1576 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1577
1578 SDValue Op;
1579 if (EltVT.isFloatingPoint())
1580 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1581 else
1582 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1583 Ops.assign(NumElements, Op);
1584 }
1585
1586 // Create a BUILD_VECTOR node.
1587 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1588 }
1589
1590 // If this is a static alloca, generate it as the frameindex instead of
1591 // computation.
1592 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1593 DenseMap<const AllocaInst*, int>::iterator SI =
1594 FuncInfo.StaticAllocaMap.find(AI);
1595 if (SI != FuncInfo.StaticAllocaMap.end())
1596 return DAG.getFrameIndex(SI->second,
1597 TLI.getFrameIndexTy(DAG.getDataLayout()));
1598 }
1599
1600 // If this is an instruction which fast-isel has deferred, select it now.
1601 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1602 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1603
1604 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1605 Inst->getType(), getABIRegCopyCC(V));
1606 SDValue Chain = DAG.getEntryNode();
1607 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1608 }
1609
1610 llvm_unreachable("Can't get register for value!")::llvm::llvm_unreachable_internal("Can't get register for value!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1610)
;
1611}
1612
1613void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1614 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1615 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1616 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1617 bool IsSEH = isAsynchronousEHPersonality(Pers);
1618 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1619 if (!IsSEH)
1620 CatchPadMBB->setIsEHScopeEntry();
1621 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1622 if (IsMSVCCXX || IsCoreCLR)
1623 CatchPadMBB->setIsEHFuncletEntry();
1624}
1625
1626void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1627 // Update machine-CFG edge.
1628 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1629 FuncInfo.MBB->addSuccessor(TargetMBB);
1630
1631 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1632 bool IsSEH = isAsynchronousEHPersonality(Pers);
1633 if (IsSEH) {
1634 // If this is not a fall-through branch or optimizations are switched off,
1635 // emit the branch.
1636 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1637 TM.getOptLevel() == CodeGenOpt::None)
1638 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1639 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1640 return;
1641 }
1642
1643 // Figure out the funclet membership for the catchret's successor.
1644 // This will be used by the FuncletLayout pass to determine how to order the
1645 // BB's.
1646 // A 'catchret' returns to the outer scope's color.
1647 Value *ParentPad = I.getCatchSwitchParentPad();
1648 const BasicBlock *SuccessorColor;
1649 if (isa<ConstantTokenNone>(ParentPad))
1650 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1651 else
1652 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1653 assert(SuccessorColor && "No parent funclet for catchret!")((SuccessorColor && "No parent funclet for catchret!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorColor && \"No parent funclet for catchret!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1653, __PRETTY_FUNCTION__))
;
1654 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1655 assert(SuccessorColorMBB && "No MBB for SuccessorColor!")((SuccessorColorMBB && "No MBB for SuccessorColor!") ?
static_cast<void> (0) : __assert_fail ("SuccessorColorMBB && \"No MBB for SuccessorColor!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1655, __PRETTY_FUNCTION__))
;
1656
1657 // Create the terminator node.
1658 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1659 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1660 DAG.getBasicBlock(SuccessorColorMBB));
1661 DAG.setRoot(Ret);
1662}
1663
1664void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1665 // Don't emit any special code for the cleanuppad instruction. It just marks
1666 // the start of an EH scope/funclet.
1667 FuncInfo.MBB->setIsEHScopeEntry();
1668 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1669 if (Pers != EHPersonality::Wasm_CXX) {
1670 FuncInfo.MBB->setIsEHFuncletEntry();
1671 FuncInfo.MBB->setIsCleanupFuncletEntry();
1672 }
1673}
1674
1675// For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1676// the control flow always stops at the single catch pad, as it does for a
1677// cleanup pad. In case the exception caught is not of the types the catch pad
1678// catches, it will be rethrown by a rethrow.
1679static void findWasmUnwindDestinations(
1680 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1681 BranchProbability Prob,
1682 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1683 &UnwindDests) {
1684 while (EHPadBB) {
1685 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1686 if (isa<CleanupPadInst>(Pad)) {
1687 // Stop on cleanup pads.
1688 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1689 UnwindDests.back().first->setIsEHScopeEntry();
1690 break;
1691 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1692 // Add the catchpad handlers to the possible destinations. We don't
1693 // continue to the unwind destination of the catchswitch for wasm.
1694 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1695 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1696 UnwindDests.back().first->setIsEHScopeEntry();
1697 }
1698 break;
1699 } else {
1700 continue;
1701 }
1702 }
1703}
1704
1705/// When an invoke or a cleanupret unwinds to the next EH pad, there are
1706/// many places it could ultimately go. In the IR, we have a single unwind
1707/// destination, but in the machine CFG, we enumerate all the possible blocks.
1708/// This function skips over imaginary basic blocks that hold catchswitch
1709/// instructions, and finds all the "real" machine
1710/// basic block destinations. As those destinations may not be successors of
1711/// EHPadBB, here we also calculate the edge probability to those destinations.
1712/// The passed-in Prob is the edge probability to EHPadBB.
1713static void findUnwindDestinations(
1714 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1715 BranchProbability Prob,
1716 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1717 &UnwindDests) {
1718 EHPersonality Personality =
1719 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1720 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1721 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1722 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1723 bool IsSEH = isAsynchronousEHPersonality(Personality);
1724
1725 if (IsWasmCXX) {
1726 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1727 assert(UnwindDests.size() <= 1 &&((UnwindDests.size() <= 1 && "There should be at most one unwind destination for wasm"
) ? static_cast<void> (0) : __assert_fail ("UnwindDests.size() <= 1 && \"There should be at most one unwind destination for wasm\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1728, __PRETTY_FUNCTION__))
1728 "There should be at most one unwind destination for wasm")((UnwindDests.size() <= 1 && "There should be at most one unwind destination for wasm"
) ? static_cast<void> (0) : __assert_fail ("UnwindDests.size() <= 1 && \"There should be at most one unwind destination for wasm\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1728, __PRETTY_FUNCTION__))
;
1729 return;
1730 }
1731
1732 while (EHPadBB) {
1733 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1734 BasicBlock *NewEHPadBB = nullptr;
1735 if (isa<LandingPadInst>(Pad)) {
1736 // Stop on landingpads. They are not funclets.
1737 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1738 break;
1739 } else if (isa<CleanupPadInst>(Pad)) {
1740 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1741 // personalities.
1742 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1743 UnwindDests.back().first->setIsEHScopeEntry();
1744 UnwindDests.back().first->setIsEHFuncletEntry();
1745 break;
1746 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1747 // Add the catchpad handlers to the possible destinations.
1748 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1749 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1750 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1751 if (IsMSVCCXX || IsCoreCLR)
1752 UnwindDests.back().first->setIsEHFuncletEntry();
1753 if (!IsSEH)
1754 UnwindDests.back().first->setIsEHScopeEntry();
1755 }
1756 NewEHPadBB = CatchSwitch->getUnwindDest();
1757 } else {
1758 continue;
1759 }
1760
1761 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1762 if (BPI && NewEHPadBB)
1763 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1764 EHPadBB = NewEHPadBB;
1765 }
1766}
1767
1768void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1769 // Update successor info.
1770 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1771 auto UnwindDest = I.getUnwindDest();
1772 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1773 BranchProbability UnwindDestProb =
1774 (BPI && UnwindDest)
1775 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1776 : BranchProbability::getZero();
1777 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1778 for (auto &UnwindDest : UnwindDests) {
1779 UnwindDest.first->setIsEHPad();
1780 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1781 }
1782 FuncInfo.MBB->normalizeSuccProbs();
1783
1784 // Create the terminator node.
1785 SDValue Ret =
1786 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1787 DAG.setRoot(Ret);
1788}
1789
1790void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1791 report_fatal_error("visitCatchSwitch not yet implemented!");
1792}
1793
1794void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1795 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1796 auto &DL = DAG.getDataLayout();
1797 SDValue Chain = getControlRoot();
1798 SmallVector<ISD::OutputArg, 8> Outs;
1799 SmallVector<SDValue, 8> OutVals;
1800
1801 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1802 // lower
1803 //
1804 // %val = call <ty> @llvm.experimental.deoptimize()
1805 // ret <ty> %val
1806 //
1807 // differently.
1808 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1809 LowerDeoptimizingReturn();
1810 return;
1811 }
1812
1813 if (!FuncInfo.CanLowerReturn) {
1814 unsigned DemoteReg = FuncInfo.DemoteRegister;
1815 const Function *F = I.getParent()->getParent();
1816
1817 // Emit a store of the return value through the virtual register.
1818 // Leave Outs empty so that LowerReturn won't try to load return
1819 // registers the usual way.
1820 SmallVector<EVT, 1> PtrValueVTs;
1821 ComputeValueVTs(TLI, DL,
1822 F->getReturnType()->getPointerTo(
1823 DAG.getDataLayout().getAllocaAddrSpace()),
1824 PtrValueVTs);
1825
1826 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1827 DemoteReg, PtrValueVTs[0]);
1828 SDValue RetOp = getValue(I.getOperand(0));
1829
1830 SmallVector<EVT, 4> ValueVTs, MemVTs;
1831 SmallVector<uint64_t, 4> Offsets;
1832 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1833 &Offsets);
1834 unsigned NumValues = ValueVTs.size();
1835
1836 SmallVector<SDValue, 4> Chains(NumValues);
1837 for (unsigned i = 0; i != NumValues; ++i) {
1838 // An aggregate return value cannot wrap around the address space, so
1839 // offsets to its parts don't wrap either.
1840 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1841
1842 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1843 if (MemVTs[i] != ValueVTs[i])
1844 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1845 Chains[i] = DAG.getStore(Chain, getCurSDLoc(), Val,
1846 // FIXME: better loc info would be nice.
1847 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1848 }
1849
1850 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1851 MVT::Other, Chains);
1852 } else if (I.getNumOperands() != 0) {
1853 SmallVector<EVT, 4> ValueVTs;
1854 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1855 unsigned NumValues = ValueVTs.size();
1856 if (NumValues) {
1857 SDValue RetOp = getValue(I.getOperand(0));
1858
1859 const Function *F = I.getParent()->getParent();
1860
1861 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1862 I.getOperand(0)->getType(), F->getCallingConv(),
1863 /*IsVarArg*/ false);
1864
1865 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1866 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1867 Attribute::SExt))
1868 ExtendKind = ISD::SIGN_EXTEND;
1869 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1870 Attribute::ZExt))
1871 ExtendKind = ISD::ZERO_EXTEND;
1872
1873 LLVMContext &Context = F->getContext();
1874 bool RetInReg = F->getAttributes().hasAttribute(
1875 AttributeList::ReturnIndex, Attribute::InReg);
1876
1877 for (unsigned j = 0; j != NumValues; ++j) {
1878 EVT VT = ValueVTs[j];
1879
1880 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1881 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1882
1883 CallingConv::ID CC = F->getCallingConv();
1884
1885 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1886 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1887 SmallVector<SDValue, 4> Parts(NumParts);
1888 getCopyToParts(DAG, getCurSDLoc(),
1889 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1890 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1891
1892 // 'inreg' on function refers to return value
1893 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1894 if (RetInReg)
1895 Flags.setInReg();
1896
1897 if (I.getOperand(0)->getType()->isPointerTy()) {
1898 Flags.setPointer();
1899 Flags.setPointerAddrSpace(
1900 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1901 }
1902
1903 if (NeedsRegBlock) {
1904 Flags.setInConsecutiveRegs();
1905 if (j == NumValues - 1)
1906 Flags.setInConsecutiveRegsLast();
1907 }
1908
1909 // Propagate extension type if any
1910 if (ExtendKind == ISD::SIGN_EXTEND)
1911 Flags.setSExt();
1912 else if (ExtendKind == ISD::ZERO_EXTEND)
1913 Flags.setZExt();
1914
1915 for (unsigned i = 0; i < NumParts; ++i) {
1916 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1917 VT, /*isfixed=*/true, 0, 0));
1918 OutVals.push_back(Parts[i]);
1919 }
1920 }
1921 }
1922 }
1923
1924 // Push in swifterror virtual register as the last element of Outs. This makes
1925 // sure swifterror virtual register will be returned in the swifterror
1926 // physical register.
1927 const Function *F = I.getParent()->getParent();
1928 if (TLI.supportSwiftError() &&
1929 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1930 assert(SwiftError.getFunctionArg() && "Need a swift error argument")((SwiftError.getFunctionArg() && "Need a swift error argument"
) ? static_cast<void> (0) : __assert_fail ("SwiftError.getFunctionArg() && \"Need a swift error argument\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1930, __PRETTY_FUNCTION__))
;
1931 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1932 Flags.setSwiftError();
1933 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1934 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1935 true /*isfixed*/, 1 /*origidx*/,
1936 0 /*partOffs*/));
1937 // Create SDNode for the swifterror virtual register.
1938 OutVals.push_back(
1939 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1940 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1941 EVT(TLI.getPointerTy(DL))));
1942 }
1943
1944 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1945 CallingConv::ID CallConv =
1946 DAG.getMachineFunction().getFunction().getCallingConv();
1947 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1948 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1949
1950 // Verify that the target's LowerReturn behaved as expected.
1951 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&((Chain.getNode() && Chain.getValueType() == MVT::Other
&& "LowerReturn didn't return a valid chain!") ? static_cast
<void> (0) : __assert_fail ("Chain.getNode() && Chain.getValueType() == MVT::Other && \"LowerReturn didn't return a valid chain!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1952, __PRETTY_FUNCTION__))
1952 "LowerReturn didn't return a valid chain!")((Chain.getNode() && Chain.getValueType() == MVT::Other
&& "LowerReturn didn't return a valid chain!") ? static_cast
<void> (0) : __assert_fail ("Chain.getNode() && Chain.getValueType() == MVT::Other && \"LowerReturn didn't return a valid chain!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1952, __PRETTY_FUNCTION__))
;
1953
1954 // Update the DAG with the new chain value resulting from return lowering.
1955 DAG.setRoot(Chain);
1956}
1957
1958/// CopyToExportRegsIfNeeded - If the given value has virtual registers
1959/// created for it, emit nodes to copy the value into the virtual
1960/// registers.
1961void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1962 // Skip empty types
1963 if (V->getType()->isEmptyTy())
1964 return;
1965
1966 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1967 if (VMI != FuncInfo.ValueMap.end()) {
1968 assert(!V->use_empty() && "Unused value assigned virtual registers!")((!V->use_empty() && "Unused value assigned virtual registers!"
) ? static_cast<void> (0) : __assert_fail ("!V->use_empty() && \"Unused value assigned virtual registers!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1968, __PRETTY_FUNCTION__))
;
1969 CopyValueToVirtualRegister(V, VMI->second);
1970 }
1971}
1972
1973/// ExportFromCurrentBlock - If this condition isn't known to be exported from
1974/// the current basic block, add it to ValueMap now so that we'll get a
1975/// CopyTo/FromReg.
1976void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1977 // No need to export constants.
1978 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1979
1980 // Already exported?
1981 if (FuncInfo.isExportedInst(V)) return;
1982
1983 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1984 CopyValueToVirtualRegister(V, Reg);
1985}
1986
1987bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1988 const BasicBlock *FromBB) {
1989 // The operands of the setcc have to be in this block. We don't know
1990 // how to export them from some other block.
1991 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1992 // Can export from current BB.
1993 if (VI->getParent() == FromBB)
1994 return true;
1995
1996 // Is already exported, noop.
1997 return FuncInfo.isExportedInst(V);
1998 }
1999
2000 // If this is an argument, we can export it if the BB is the entry block or
2001 // if it is already exported.
2002 if (isa<Argument>(V)) {
2003 if (FromBB == &FromBB->getParent()->getEntryBlock())
2004 return true;
2005
2006 // Otherwise, can only export this if it is already exported.
2007 return FuncInfo.isExportedInst(V);
2008 }
2009
2010 // Otherwise, constants can always be exported.
2011 return true;
2012}
2013
2014/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2015BranchProbability
2016SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2017 const MachineBasicBlock *Dst) const {
2018 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2019 const BasicBlock *SrcBB = Src->getBasicBlock();
2020 const BasicBlock *DstBB = Dst->getBasicBlock();
2021 if (!BPI) {
2022 // If BPI is not available, set the default probability as 1 / N, where N is
2023 // the number of successors.
2024 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2025 return BranchProbability(1, SuccSize);
2026 }
2027 return BPI->getEdgeProbability(SrcBB, DstBB);
2028}
2029
2030void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2031 MachineBasicBlock *Dst,
2032 BranchProbability Prob) {
2033 if (!FuncInfo.BPI)
2034 Src->addSuccessorWithoutProb(Dst);
2035 else {
2036 if (Prob.isUnknown())
2037 Prob = getEdgeProbability(Src, Dst);
2038 Src->addSuccessor(Dst, Prob);
2039 }
2040}
2041
2042static bool InBlock(const Value *V, const BasicBlock *BB) {
2043 if (const Instruction *I = dyn_cast<Instruction>(V))
2044 return I->getParent() == BB;
2045 return true;
2046}
2047
2048/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2049/// This function emits a branch and is used at the leaves of an OR or an
2050/// AND operator tree.
2051void
2052SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2053 MachineBasicBlock *TBB,
2054 MachineBasicBlock *FBB,
2055 MachineBasicBlock *CurBB,
2056 MachineBasicBlock *SwitchBB,
2057 BranchProbability TProb,
2058 BranchProbability FProb,
2059 bool InvertCond) {
2060 const BasicBlock *BB = CurBB->getBasicBlock();
2061
2062 // If the leaf of the tree is a comparison, merge the condition into
2063 // the caseblock.
2064 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2065 // The operands of the cmp have to be in this block. We don't know
2066 // how to export them from some other block. If this is the first block
2067 // of the sequence, no exporting is needed.
2068 if (CurBB == SwitchBB ||
2069 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2070 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2071 ISD::CondCode Condition;
2072 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2073 ICmpInst::Predicate Pred =
2074 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2075 Condition = getICmpCondCode(Pred);
2076 } else {
2077 const FCmpInst *FC = cast<FCmpInst>(Cond);
2078 FCmpInst::Predicate Pred =
2079 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2080 Condition = getFCmpCondCode(Pred);
2081 if (TM.Options.NoNaNsFPMath)
2082 Condition = getFCmpCodeWithoutNaN(Condition);
2083 }
2084
2085 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2086 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2087 SL->SwitchCases.push_back(CB);
2088 return;
2089 }
2090 }
2091
2092 // Create a CaseBlock record representing this branch.
2093 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2094 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2095 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2096 SL->SwitchCases.push_back(CB);
2097}
2098
2099void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2100 MachineBasicBlock *TBB,
2101 MachineBasicBlock *FBB,
2102 MachineBasicBlock *CurBB,
2103 MachineBasicBlock *SwitchBB,
2104 Instruction::BinaryOps Opc,
2105 BranchProbability TProb,
2106 BranchProbability FProb,
2107 bool InvertCond) {
2108 // Skip over not part of the tree and remember to invert op and operands at
2109 // next level.
2110 Value *NotCond;
2111 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2112 InBlock(NotCond, CurBB->getBasicBlock())) {
2113 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2114 !InvertCond);
2115 return;
2116 }
2117
2118 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2119 // Compute the effective opcode for Cond, taking into account whether it needs
2120 // to be inverted, e.g.
2121 // and (not (or A, B)), C
2122 // gets lowered as
2123 // and (and (not A, not B), C)
2124 unsigned BOpc = 0;
2125 if (BOp) {
2126 BOpc = BOp->getOpcode();
2127 if (InvertCond) {
2128 if (BOpc == Instruction::And)
2129 BOpc = Instruction::Or;
2130 else if (BOpc == Instruction::Or)
2131 BOpc = Instruction::And;
2132 }
2133 }
2134
2135 // If this node is not part of the or/and tree, emit it as a branch.
2136 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2137 BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2138 BOp->getParent() != CurBB->getBasicBlock() ||
2139 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2140 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2141 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2142 TProb, FProb, InvertCond);
2143 return;
2144 }
2145
2146 // Create TmpBB after CurBB.
2147 MachineFunction::iterator BBI(CurBB);
2148 MachineFunction &MF = DAG.getMachineFunction();
2149 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2150 CurBB->getParent()->insert(++BBI, TmpBB);
2151
2152 if (Opc == Instruction::Or) {
2153 // Codegen X | Y as:
2154 // BB1:
2155 // jmp_if_X TBB
2156 // jmp TmpBB
2157 // TmpBB:
2158 // jmp_if_Y TBB
2159 // jmp FBB
2160 //
2161
2162 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2163 // The requirement is that
2164 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2165 // = TrueProb for original BB.
2166 // Assuming the original probabilities are A and B, one choice is to set
2167 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2168 // A/(1+B) and 2B/(1+B). This choice assumes that
2169 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2170 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2171 // TmpBB, but the math is more complicated.
2172
2173 auto NewTrueProb = TProb / 2;
2174 auto NewFalseProb = TProb / 2 + FProb;
2175 // Emit the LHS condition.
2176 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2177 NewTrueProb, NewFalseProb, InvertCond);
2178
2179 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2180 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2181 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2182 // Emit the RHS condition into TmpBB.
2183 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2184 Probs[0], Probs[1], InvertCond);
2185 } else {
2186 assert(Opc == Instruction::And && "Unknown merge op!")((Opc == Instruction::And && "Unknown merge op!") ? static_cast
<void> (0) : __assert_fail ("Opc == Instruction::And && \"Unknown merge op!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2186, __PRETTY_FUNCTION__))
;
2187 // Codegen X & Y as:
2188 // BB1:
2189 // jmp_if_X TmpBB
2190 // jmp FBB
2191 // TmpBB:
2192 // jmp_if_Y TBB
2193 // jmp FBB
2194 //
2195 // This requires creation of TmpBB after CurBB.
2196
2197 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2198 // The requirement is that
2199 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2200 // = FalseProb for original BB.
2201 // Assuming the original probabilities are A and B, one choice is to set
2202 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2203 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2204 // TrueProb for BB1 * FalseProb for TmpBB.
2205
2206 auto NewTrueProb = TProb + FProb / 2;
2207 auto NewFalseProb = FProb / 2;
2208 // Emit the LHS condition.
2209 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2210 NewTrueProb, NewFalseProb, InvertCond);
2211
2212 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2213 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2214 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2215 // Emit the RHS condition into TmpBB.
2216 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2217 Probs[0], Probs[1], InvertCond);
2218 }
2219}
2220
2221/// If the set of cases should be emitted as a series of branches, return true.
2222/// If we should emit this as a bunch of and/or'd together conditions, return
2223/// false.
2224bool
2225SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2226 if (Cases.size() != 2) return true;
2227
2228 // If this is two comparisons of the same values or'd or and'd together, they
2229 // will get folded into a single comparison, so don't emit two blocks.
2230 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2231 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2232 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2233 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2234 return false;
2235 }
2236
2237 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2238 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2239 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2240 Cases[0].CC == Cases[1].CC &&
2241 isa<Constant>(Cases[0].CmpRHS) &&
2242 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2243 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2244 return false;
2245 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2246 return false;
2247 }
2248
2249 return true;
2250}
2251
2252void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2253 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2254
2255 // Update machine-CFG edges.
2256 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2257
2258 if (I.isUnconditional()) {
2259 // Update machine-CFG edges.
2260 BrMBB->addSuccessor(Succ0MBB);
2261
2262 // If this is not a fall-through branch or optimizations are switched off,
2263 // emit the branch.
2264 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2265 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2266 MVT::Other, getControlRoot(),
2267 DAG.getBasicBlock(Succ0MBB)));
2268
2269 return;
2270 }
2271
2272 // If this condition is one of the special cases we handle, do special stuff
2273 // now.
2274 const Value *CondVal = I.getCondition();
2275 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2276
2277 // If this is a series of conditions that are or'd or and'd together, emit
2278 // this as a sequence of branches instead of setcc's with and/or operations.
2279 // As long as jumps are not expensive, this should improve performance.
2280 // For example, instead of something like:
2281 // cmp A, B
2282 // C = seteq
2283 // cmp D, E
2284 // F = setle
2285 // or C, F
2286 // jnz foo
2287 // Emit:
2288 // cmp A, B
2289 // je foo
2290 // cmp D, E
2291 // jle foo
2292 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2293 Instruction::BinaryOps Opcode = BOp->getOpcode();
2294 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2295 !I.hasMetadata(LLVMContext::MD_unpredictable) &&
2296 (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2297 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2298 Opcode,
2299 getEdgeProbability(BrMBB, Succ0MBB),
2300 getEdgeProbability(BrMBB, Succ1MBB),
2301 /*InvertCond=*/false);
2302 // If the compares in later blocks need to use values not currently
2303 // exported from this block, export them now. This block should always
2304 // be the first entry.
2305 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!")((SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"
) ? static_cast<void> (0) : __assert_fail ("SL->SwitchCases[0].ThisBB == BrMBB && \"Unexpected lowering!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2305, __PRETTY_FUNCTION__))
;
2306
2307 // Allow some cases to be rejected.
2308 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2309 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2310 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2311 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2312 }
2313
2314 // Emit the branch for this block.
2315 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2316 SL->SwitchCases.erase(SL->SwitchCases.begin());
2317 return;
2318 }
2319
2320 // Okay, we decided not to do this, remove any inserted MBB's and clear
2321 // SwitchCases.
2322 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2323 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2324
2325 SL->SwitchCases.clear();
2326 }
2327 }
2328
2329 // Create a CaseBlock record representing this branch.
2330 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2331 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2332
2333 // Use visitSwitchCase to actually insert the fast branch sequence for this
2334 // cond branch.
2335 visitSwitchCase(CB, BrMBB);
2336}
2337
2338/// visitSwitchCase - Emits the necessary code to represent a single node in
2339/// the binary search tree resulting from lowering a switch instruction.
2340void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2341 MachineBasicBlock *SwitchBB) {
2342 SDValue Cond;
2343 SDValue CondLHS = getValue(CB.CmpLHS);
2344 SDLoc dl = CB.DL;
2345
2346 if (CB.CC == ISD::SETTRUE) {
2347 // Branch or fall through to TrueBB.
2348 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2349 SwitchBB->normalizeSuccProbs();
2350 if (CB.TrueBB != NextBlock(SwitchBB)) {
2351 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2352 DAG.getBasicBlock(CB.TrueBB)));
2353 }
2354 return;
2355 }
2356
2357 auto &TLI = DAG.getTargetLoweringInfo();
2358 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2359
2360 // Build the setcc now.
2361 if (!CB.CmpMHS) {
2362 // Fold "(X == true)" to X and "(X == false)" to !X to
2363 // handle common cases produced by branch lowering.
2364 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2365 CB.CC == ISD::SETEQ)
2366 Cond = CondLHS;
2367 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2368 CB.CC == ISD::SETEQ) {
2369 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2370 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2371 } else {
2372 SDValue CondRHS = getValue(CB.CmpRHS);
2373
2374 // If a pointer's DAG type is larger than its memory type then the DAG
2375 // values are zero-extended. This breaks signed comparisons so truncate
2376 // back to the underlying type before doing the compare.
2377 if (CondLHS.getValueType() != MemVT) {
2378 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2379 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2380 }
2381 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2382 }
2383 } else {
2384 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now")((CB.CC == ISD::SETLE && "Can handle only LE ranges now"
) ? static_cast<void> (0) : __assert_fail ("CB.CC == ISD::SETLE && \"Can handle only LE ranges now\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2384, __PRETTY_FUNCTION__))
;
2385
2386 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2387 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2388
2389 SDValue CmpOp = getValue(CB.CmpMHS);
2390 EVT VT = CmpOp.getValueType();
2391
2392 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2393 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2394 ISD::SETLE);
2395 } else {
2396 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2397 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2398 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2399 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2400 }
2401 }
2402
2403 // Update successor info
2404 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2405 // TrueBB and FalseBB are always different unless the incoming IR is
2406 // degenerate. This only happens when running llc on weird IR.
2407 if (CB.TrueBB != CB.FalseBB)
2408 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2409 SwitchBB->normalizeSuccProbs();
2410
2411 // If the lhs block is the next block, invert the condition so that we can
2412 // fall through to the lhs instead of the rhs block.
2413 if (CB.TrueBB == NextBlock(SwitchBB)) {
2414 std::swap(CB.TrueBB, CB.FalseBB);
2415 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2416 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2417 }
2418
2419 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2420 MVT::Other, getControlRoot(), Cond,
2421 DAG.getBasicBlock(CB.TrueBB));
2422
2423 // Insert the false branch. Do this even if it's a fall through branch,
2424 // this makes it easier to do DAG optimizations which require inverting
2425 // the branch condition.
2426 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2427 DAG.getBasicBlock(CB.FalseBB));
2428
2429 DAG.setRoot(BrCond);
2430}
2431
2432/// visitJumpTable - Emit JumpTable node in the current MBB
2433void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2434 // Emit the code for the jump table
2435 assert(JT.Reg != -1U && "Should lower JT Header first!")((JT.Reg != -1U && "Should lower JT Header first!") ?
static_cast<void> (0) : __assert_fail ("JT.Reg != -1U && \"Should lower JT Header first!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2435, __PRETTY_FUNCTION__))
;
2436 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2437 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2438 JT.Reg, PTy);
2439 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2440 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2441 MVT::Other, Index.getValue(1),
2442 Table, Index);
2443 DAG.setRoot(BrJumpTable);
2444}
2445
2446/// visitJumpTableHeader - This function emits necessary code to produce index
2447/// in the JumpTable from switch case.
2448void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2449 JumpTableHeader &JTH,
2450 MachineBasicBlock *SwitchBB) {
2451 SDLoc dl = getCurSDLoc();
2452
2453 // Subtract the lowest switch case value from the value being switched on.
2454 SDValue SwitchOp = getValue(JTH.SValue);
2455 EVT VT = SwitchOp.getValueType();
2456 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2457 DAG.getConstant(JTH.First, dl, VT));
2458
2459 // The SDNode we just created, which holds the value being switched on minus
2460 // the smallest case value, needs to be copied to a virtual register so it
2461 // can be used as an index into the jump table in a subsequent basic block.
2462 // This value may be smaller or larger than the target's pointer type, and
2463 // therefore require extension or truncating.
2464 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2465 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2466
2467 unsigned JumpTableReg =
2468 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2469 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2470 JumpTableReg, SwitchOp);
2471 JT.Reg = JumpTableReg;
2472
2473 if (!JTH.OmitRangeCheck) {
2474 // Emit the range check for the jump table, and branch to the default block
2475 // for the switch statement if the value being switched on exceeds the
2476 // largest case in the switch.
2477 SDValue CMP = DAG.getSetCC(
2478 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2479 Sub.getValueType()),
2480 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2481
2482 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2483 MVT::Other, CopyTo, CMP,
2484 DAG.getBasicBlock(JT.Default));
2485
2486 // Avoid emitting unnecessary branches to the next block.
2487 if (JT.MBB != NextBlock(SwitchBB))
2488 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2489 DAG.getBasicBlock(JT.MBB));
2490
2491 DAG.setRoot(BrCond);
2492 } else {
2493 // Avoid emitting unnecessary branches to the next block.
2494 if (JT.MBB != NextBlock(SwitchBB))
2495 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2496 DAG.getBasicBlock(JT.MBB)));
2497 else
2498 DAG.setRoot(CopyTo);
2499 }
2500}
2501
2502/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2503/// variable if there exists one.
2504static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2505 SDValue &Chain) {
2506 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2507 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2508 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2509 MachineFunction &MF = DAG.getMachineFunction();
2510 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2511 MachineSDNode *Node =
2512 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2513 if (Global) {
2514 MachinePointerInfo MPInfo(Global);
2515 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2516 MachineMemOperand::MODereferenceable;
2517 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2518 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2519 DAG.setNodeMemRefs(Node, {MemRef});
2520 }
2521 if (PtrTy != PtrMemTy)
2522 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2523 return SDValue(Node, 0);
2524}
2525
2526/// Codegen a new tail for a stack protector check ParentMBB which has had its
2527/// tail spliced into a stack protector check success bb.
2528///
2529/// For a high level explanation of how this fits into the stack protector
2530/// generation see the comment on the declaration of class
2531/// StackProtectorDescriptor.
2532void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2533 MachineBasicBlock *ParentBB) {
2534
2535 // First create the loads to the guard/stack slot for the comparison.
2536 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2537 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2538 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2539
2540 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2541 int FI = MFI.getStackProtectorIndex();
2542
2543 SDValue Guard;
2544 SDLoc dl = getCurSDLoc();
2545 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2546 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2547 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2548
2549 // Generate code to load the content of the guard slot.
2550 SDValue GuardVal = DAG.getLoad(
2551 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2552 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2553 MachineMemOperand::MOVolatile);
2554
2555 if (TLI.useStackGuardXorFP())
2556 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2557
2558 // Retrieve guard check function, nullptr if instrumentation is inlined.
2559 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2560 // The target provides a guard check function to validate the guard value.
2561 // Generate a call to that function with the content of the guard slot as
2562 // argument.
2563 FunctionType *FnTy = GuardCheckFn->getFunctionType();
2564 assert(FnTy->getNumParams() == 1 && "Invalid function signature")((FnTy->getNumParams() == 1 && "Invalid function signature"
) ? static_cast<void> (0) : __assert_fail ("FnTy->getNumParams() == 1 && \"Invalid function signature\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2564, __PRETTY_FUNCTION__))
;
2565
2566 TargetLowering::ArgListTy Args;
2567 TargetLowering::ArgListEntry Entry;
2568 Entry.Node = GuardVal;
2569 Entry.Ty = FnTy->getParamType(0);
2570 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2571 Entry.IsInReg = true;
2572 Args.push_back(Entry);
2573
2574 TargetLowering::CallLoweringInfo CLI(DAG);
2575 CLI.setDebugLoc(getCurSDLoc())
2576 .setChain(DAG.getEntryNode())
2577 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2578 getValue(GuardCheckFn), std::move(Args));
2579
2580 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2581 DAG.setRoot(Result.second);
2582 return;
2583 }
2584
2585 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2586 // Otherwise, emit a volatile load to retrieve the stack guard value.
2587 SDValue Chain = DAG.getEntryNode();
2588 if (TLI.useLoadStackGuardNode()) {
2589 Guard = getLoadStackGuard(DAG, dl, Chain);
2590 } else {
2591 const Value *IRGuard = TLI.getSDagStackGuard(M);
2592 SDValue GuardPtr = getValue(IRGuard);
2593
2594 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2595 MachinePointerInfo(IRGuard, 0), Align,
2596 MachineMemOperand::MOVolatile);
2597 }
2598
2599 // Perform the comparison via a getsetcc.
2600 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2601 *DAG.getContext(),
2602 Guard.getValueType()),
2603 Guard, GuardVal, ISD::SETNE);
2604
2605 // If the guard/stackslot do not equal, branch to failure MBB.
2606 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2607 MVT::Other, GuardVal.getOperand(0),
2608 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2609 // Otherwise branch to success MBB.
2610 SDValue Br = DAG.getNode(ISD::BR, dl,
2611 MVT::Other, BrCond,
2612 DAG.getBasicBlock(SPD.getSuccessMBB()));
2613
2614 DAG.setRoot(Br);
2615}
2616
2617/// Codegen the failure basic block for a stack protector check.
2618///
2619/// A failure stack protector machine basic block consists simply of a call to
2620/// __stack_chk_fail().
2621///
2622/// For a high level explanation of how this fits into the stack protector
2623/// generation see the comment on the declaration of class
2624/// StackProtectorDescriptor.
2625void
2626SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2627 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2628 TargetLowering::MakeLibCallOptions CallOptions;
2629 CallOptions.setDiscardResult(true);
2630 SDValue Chain =
2631 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2632 None, CallOptions, getCurSDLoc()).second;
2633 // On PS4, the "return address" must still be within the calling function,
2634 // even if it's at the very end, so emit an explicit TRAP here.
2635 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2636 if (TM.getTargetTriple().isPS4CPU())
2637 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2638
2639 DAG.setRoot(Chain);
2640}
2641
2642/// visitBitTestHeader - This function emits necessary code to produce value
2643/// suitable for "bit tests"
2644void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2645 MachineBasicBlock *SwitchBB) {
2646 SDLoc dl = getCurSDLoc();
2647
2648 // Subtract the minimum value.
2649 SDValue SwitchOp = getValue(B.SValue);
2650 EVT VT = SwitchOp.getValueType();
2651 SDValue RangeSub =
2652 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2653
2654 // Determine the type of the test operands.
2655 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2656 bool UsePtrType = false;
2657 if (!TLI.isTypeLegal(VT)) {
2658 UsePtrType = true;
2659 } else {
2660 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2661 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2662 // Switch table case range are encoded into series of masks.
2663 // Just use pointer type, it's guaranteed to fit.
2664 UsePtrType = true;
2665 break;
2666 }
2667 }
2668 SDValue Sub = RangeSub;
2669 if (UsePtrType) {
2670 VT = TLI.getPointerTy(DAG.getDataLayout());
2671 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2672 }
2673
2674 B.RegVT = VT.getSimpleVT();
2675 B.Reg = FuncInfo.CreateReg(B.RegVT);
2676 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2677
2678 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2679
2680 if (!B.OmitRangeCheck)
2681 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2682 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2683 SwitchBB->normalizeSuccProbs();
2684
2685 SDValue Root = CopyTo;
2686 if (!B.OmitRangeCheck) {
2687 // Conditional branch to the default block.
2688 SDValue RangeCmp = DAG.getSetCC(dl,
2689 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2690 RangeSub.getValueType()),
2691 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2692 ISD::SETUGT);
2693
2694 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2695 DAG.getBasicBlock(B.Default));
2696 }
2697
2698 // Avoid emitting unnecessary branches to the next block.
2699 if (MBB != NextBlock(SwitchBB))
2700 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2701
2702 DAG.setRoot(Root);
2703}
2704
2705/// visitBitTestCase - this function produces one "bit test"
2706void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2707 MachineBasicBlock* NextMBB,
2708 BranchProbability BranchProbToNext,
2709 unsigned Reg,
2710 BitTestCase &B,
2711 MachineBasicBlock *SwitchBB) {
2712 SDLoc dl = getCurSDLoc();
2713 MVT VT = BB.RegVT;
2714 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2715 SDValue Cmp;
2716 unsigned PopCount = countPopulation(B.Mask);
2717 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2718 if (PopCount == 1) {
2719 // Testing for a single bit; just compare the shift count with what it
2720 // would need to be to shift a 1 bit in that position.
2721 Cmp = DAG.getSetCC(
2722 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2723 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2724 ISD::SETEQ);
2725 } else if (PopCount == BB.Range) {
2726 // There is only one zero bit in the range, test for it directly.
2727 Cmp = DAG.getSetCC(
2728 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2729 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2730 ISD::SETNE);
2731 } else {
2732 // Make desired shift
2733 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2734 DAG.getConstant(1, dl, VT), ShiftOp);
2735
2736 // Emit bit tests and jumps
2737 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2738 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2739 Cmp = DAG.getSetCC(
2740 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2741 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2742 }
2743
2744 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2745 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2746 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2747 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2748 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2749 // one as they are relative probabilities (and thus work more like weights),
2750 // and hence we need to normalize them to let the sum of them become one.
2751 SwitchBB->normalizeSuccProbs();
2752
2753 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2754 MVT::Other, getControlRoot(),
2755 Cmp, DAG.getBasicBlock(B.TargetBB));
2756
2757 // Avoid emitting unnecessary branches to the next block.
2758 if (NextMBB != NextBlock(SwitchBB))
2759 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2760 DAG.getBasicBlock(NextMBB));
2761
2762 DAG.setRoot(BrAnd);
2763}
2764
2765void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2766 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2767
2768 // Retrieve successors. Look through artificial IR level blocks like
2769 // catchswitch for successors.
2770 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2771 const BasicBlock *EHPadBB = I.getSuccessor(1);
2772
2773 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2774 // have to do anything here to lower funclet bundles.
2775 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,((!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext
::OB_funclet, LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2778, __PRETTY_FUNCTION__))
2776 LLVMContext::OB_funclet,((!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext
::OB_funclet, LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2778, __PRETTY_FUNCTION__))
2777 LLVMContext::OB_cfguardtarget}) &&((!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext
::OB_funclet, LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2778, __PRETTY_FUNCTION__))
2778 "Cannot lower invokes with arbitrary operand bundles yet!")((!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext
::OB_funclet, LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2778, __PRETTY_FUNCTION__))
;
2779
2780 const Value *Callee(I.getCalledValue());
2781 const Function *Fn = dyn_cast<Function>(Callee);
2782 if (isa<InlineAsm>(Callee))
2783 visitInlineAsm(&I);
2784 else if (Fn && Fn->isIntrinsic()) {
2785 switch (Fn->getIntrinsicID()) {
2786 default:
2787 llvm_unreachable("Cannot invoke this intrinsic")::llvm::llvm_unreachable_internal("Cannot invoke this intrinsic"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2787)
;
2788 case Intrinsic::donothing:
2789 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2790 break;
2791 case Intrinsic::experimental_patchpoint_void:
2792 case Intrinsic::experimental_patchpoint_i64:
2793 visitPatchpoint(&I, EHPadBB);
2794 break;
2795 case Intrinsic::experimental_gc_statepoint:
2796 LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2797 break;
2798 case Intrinsic::wasm_rethrow_in_catch: {
2799 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2800 // special because it can be invoked, so we manually lower it to a DAG
2801 // node here.
2802 SmallVector<SDValue, 8> Ops;
2803 Ops.push_back(getRoot()); // inchain
2804 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2805 Ops.push_back(
2806 DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(),
2807 TLI.getPointerTy(DAG.getDataLayout())));
2808 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2809 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2810 break;
2811 }
2812 }
2813 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2814 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2815 // Eventually we will support lowering the @llvm.experimental.deoptimize
2816 // intrinsic, and right now there are no plans to support other intrinsics
2817 // with deopt state.
2818 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2819 } else {
2820 LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2821 }
2822
2823 // If the value of the invoke is used outside of its defining block, make it
2824 // available as a virtual register.
2825 // We already took care of the exported value for the statepoint instruction
2826 // during call to the LowerStatepoint.
2827 if (!isStatepoint(I)) {
2828 CopyToExportRegsIfNeeded(&I);
2829 }
2830
2831 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2832 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2833 BranchProbability EHPadBBProb =
2834 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2835 : BranchProbability::getZero();
2836 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2837
2838 // Update successor info.
2839 addSuccessorWithProb(InvokeMBB, Return);
2840 for (auto &UnwindDest : UnwindDests) {
2841 UnwindDest.first->setIsEHPad();
2842 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2843 }
2844 InvokeMBB->normalizeSuccProbs();
2845
2846 // Drop into normal successor.
2847 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2848 DAG.getBasicBlock(Return)));
2849}
2850
2851void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2852 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2853
2854 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2855 // have to do anything here to lower funclet bundles.
2856 assert(!I.hasOperandBundlesOtherThan(((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower callbrs with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2858, __PRETTY_FUNCTION__))
2857 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower callbrs with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2858, __PRETTY_FUNCTION__))
2858 "Cannot lower callbrs with arbitrary operand bundles yet!")((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower callbrs with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2858, __PRETTY_FUNCTION__))
;
2859
2860 assert(isa<InlineAsm>(I.getCalledValue()) &&((isa<InlineAsm>(I.getCalledValue()) && "Only know how to handle inlineasm callbr"
) ? static_cast<void> (0) : __assert_fail ("isa<InlineAsm>(I.getCalledValue()) && \"Only know how to handle inlineasm callbr\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2861, __PRETTY_FUNCTION__))
2861 "Only know how to handle inlineasm callbr")((isa<InlineAsm>(I.getCalledValue()) && "Only know how to handle inlineasm callbr"
) ? static_cast<void> (0) : __assert_fail ("isa<InlineAsm>(I.getCalledValue()) && \"Only know how to handle inlineasm callbr\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2861, __PRETTY_FUNCTION__))
;
2862 visitInlineAsm(&I);
2863 CopyToExportRegsIfNeeded(&I);
2864
2865 // Retrieve successors.
2866 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2867 Return->setInlineAsmBrDefaultTarget();
2868
2869 // Update successor info.
2870 addSuccessorWithProb(CallBrMBB, Return);
2871 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2872 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2873 addSuccessorWithProb(CallBrMBB, Target);
2874 CallBrMBB->addInlineAsmBrIndirectTarget(Target);
2875 }
2876 CallBrMBB->normalizeSuccProbs();
2877
2878 // Drop into default successor.
2879 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2880 MVT::Other, getControlRoot(),
2881 DAG.getBasicBlock(Return)));
2882}
2883
2884void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2885 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!")::llvm::llvm_unreachable_internal("SelectionDAGBuilder shouldn't visit resume instructions!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2885)
;
2886}
2887
2888void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2889 assert(FuncInfo.MBB->isEHPad() &&((FuncInfo.MBB->isEHPad() && "Call to landingpad not in landing pad!"
) ? static_cast<void> (0) : __assert_fail ("FuncInfo.MBB->isEHPad() && \"Call to landingpad not in landing pad!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2890, __PRETTY_FUNCTION__))
2890 "Call to landingpad not in landing pad!")((FuncInfo.MBB->isEHPad() && "Call to landingpad not in landing pad!"
) ? static_cast<void> (0) : __assert_fail ("FuncInfo.MBB->isEHPad() && \"Call to landingpad not in landing pad!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2890, __PRETTY_FUNCTION__))
;
2891
2892 // If there aren't registers to copy the values into (e.g., during SjLj
2893 // exceptions), then don't bother to create these DAG nodes.
2894 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2895 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2896 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2897 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2898 return;
2899
2900 // If landingpad's return type is token type, we don't create DAG nodes
2901 // for its exception pointer and selector value. The extraction of exception
2902 // pointer or selector value from token type landingpads is not currently
2903 // supported.
2904 if (LP.getType()->isTokenTy())
2905 return;
2906
2907 SmallVector<EVT, 2> ValueVTs;
2908 SDLoc dl = getCurSDLoc();
2909 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2910 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported")((ValueVTs.size() == 2 && "Only two-valued landingpads are supported"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 2 && \"Only two-valued landingpads are supported\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2910, __PRETTY_FUNCTION__))
;
2911
2912 // Get the two live-in registers as SDValues. The physregs have already been
2913 // copied into virtual registers.
2914 SDValue Ops[2];
2915 if (FuncInfo.ExceptionPointerVirtReg) {
2916 Ops[0] = DAG.getZExtOrTrunc(
2917 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2918 FuncInfo.ExceptionPointerVirtReg,
2919 TLI.getPointerTy(DAG.getDataLayout())),
2920 dl, ValueVTs[0]);
2921 } else {
2922 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2923 }
2924 Ops[1] = DAG.getZExtOrTrunc(
2925 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2926 FuncInfo.ExceptionSelectorVirtReg,
2927 TLI.getPointerTy(DAG.getDataLayout())),
2928 dl, ValueVTs[1]);
2929
2930 // Merge into one.
2931 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2932 DAG.getVTList(ValueVTs), Ops);
2933 setValue(&LP, Res);
2934}
2935
2936void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2937 MachineBasicBlock *Last) {
2938 // Update JTCases.
2939 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2940 if (SL->JTCases[i].first.HeaderBB == First)
2941 SL->JTCases[i].first.HeaderBB = Last;
2942
2943 // Update BitTestCases.
2944 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2945 if (SL->BitTestCases[i].Parent == First)
2946 SL->BitTestCases[i].Parent = Last;
2947}
2948
2949void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2950 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2951
2952 // Update machine-CFG edges with unique successors.
2953 SmallSet<BasicBlock*, 32> Done;
2954 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2955 BasicBlock *BB = I.getSuccessor(i);
2956 bool Inserted = Done.insert(BB).second;
2957 if (!Inserted)
2958 continue;
2959
2960 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2961 addSuccessorWithProb(IndirectBrMBB, Succ);
2962 }
2963 IndirectBrMBB->normalizeSuccProbs();
2964
2965 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2966 MVT::Other, getControlRoot(),
2967 getValue(I.getAddress())));
2968}
2969
2970void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2971 if (!DAG.getTarget().Options.TrapUnreachable)
2972 return;
2973
2974 // We may be able to ignore unreachable behind a noreturn call.
2975 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2976 const BasicBlock &BB = *I.getParent();
2977 if (&I != &BB.front()) {
2978 BasicBlock::const_iterator PredI =
2979 std::prev(BasicBlock::const_iterator(&I));
2980 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2981 if (Call->doesNotReturn())
2982 return;
2983 }
2984 }
2985 }
2986
2987 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2988}
2989
2990void SelectionDAGBuilder::visitFSub(const User &I) {
2991 // -0.0 - X --> fneg
2992 Type *Ty = I.getType();
2993 if (isa<Constant>(I.getOperand(0)) &&
2994 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2995 SDValue Op2 = getValue(I.getOperand(1));
2996 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2997 Op2.getValueType(), Op2));
2998 return;
2999 }
3000
3001 visitBinary(I, ISD::FSUB);
3002}
3003
3004/// Checks if the given instruction performs a vector reduction, in which case
3005/// we have the freedom to alter the elements in the result as long as the
3006/// reduction of them stays unchanged.
3007static bool isVectorReductionOp(const User *I) {
3008 const Instruction *Inst = dyn_cast<Instruction>(I);
3009 if (!Inst || !Inst->getType()->isVectorTy())
3010 return false;
3011
3012 auto OpCode = Inst->getOpcode();
3013 switch (OpCode) {
3014 case Instruction::Add:
3015 case Instruction::Mul:
3016 case Instruction::And:
3017 case Instruction::Or:
3018 case Instruction::Xor:
3019 break;
3020 case Instruction::FAdd:
3021 case Instruction::FMul:
3022 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
3023 if (FPOp->getFastMathFlags().isFast())
3024 break;
3025 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3026 default:
3027 return false;
3028 }
3029
3030 unsigned ElemNum = Inst->getType()->getVectorNumElements();
3031 // Ensure the reduction size is a power of 2.
3032 if (!isPowerOf2_32(ElemNum))
3033 return false;
3034
3035 unsigned ElemNumToReduce = ElemNum;
3036
3037 // Do DFS search on the def-use chain from the given instruction. We only
3038 // allow four kinds of operations during the search until we reach the
3039 // instruction that extracts the first element from the vector:
3040 //
3041 // 1. The reduction operation of the same opcode as the given instruction.
3042 //
3043 // 2. PHI node.
3044 //
3045 // 3. ShuffleVector instruction together with a reduction operation that
3046 // does a partial reduction.
3047 //
3048 // 4. ExtractElement that extracts the first element from the vector, and we
3049 // stop searching the def-use chain here.
3050 //
3051 // 3 & 4 above perform a reduction on all elements of the vector. We push defs
3052 // from 1-3 to the stack to continue the DFS. The given instruction is not
3053 // a reduction operation if we meet any other instructions other than those
3054 // listed above.
3055
3056 SmallVector<const User *, 16> UsersToVisit{Inst};
3057 SmallPtrSet<const User *, 16> Visited;
3058 bool ReduxExtracted = false;
3059
3060 while (!UsersToVisit.empty()) {
3061 auto User = UsersToVisit.back();
3062 UsersToVisit.pop_back();
3063 if (!Visited.insert(User).second)
3064 continue;
3065
3066 for (const auto *U : User->users()) {
3067 auto Inst = dyn_cast<Instruction>(U);
3068 if (!Inst)
3069 return false;
3070
3071 if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
3072 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
3073 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
3074 return false;
3075 UsersToVisit.push_back(U);
3076 } else if (const ShuffleVectorInst *ShufInst =
3077 dyn_cast<ShuffleVectorInst>(U)) {
3078 // Detect the following pattern: A ShuffleVector instruction together
3079 // with a reduction that do partial reduction on the first and second
3080 // ElemNumToReduce / 2 elements, and store the result in
3081 // ElemNumToReduce / 2 elements in another vector.
3082
3083 unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
3084 if (ResultElements < ElemNum)
3085 return false;
3086
3087 if (ElemNumToReduce == 1)
3088 return false;
3089 if (!isa<UndefValue>(U->getOperand(1)))
3090 return false;
3091 for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
3092 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
3093 return false;
3094 for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
3095 if (ShufInst->getMaskValue(i) != -1)
3096 return false;
3097
3098 // There is only one user of this ShuffleVector instruction, which
3099 // must be a reduction operation.
3100 if (!U->hasOneUse())
3101 return false;
3102
3103 auto U2 = dyn_cast<Instruction>(*U->user_begin());
3104 if (!U2 || U2->getOpcode() != OpCode)
3105 return false;
3106
3107 // Check operands of the reduction operation.
3108 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
3109 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
3110 UsersToVisit.push_back(U2);
3111 ElemNumToReduce /= 2;
3112 } else
3113 return false;
3114 } else if (isa<ExtractElementInst>(U)) {
3115 // At this moment we should have reduced all elements in the vector.
3116 if (ElemNumToReduce != 1)
3117 return false;
3118
3119 const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
3120 if (!Val || !Val->isZero())
3121 return false;
3122
3123 ReduxExtracted = true;
3124 } else
3125 return false;
3126 }
3127 }
3128 return ReduxExtracted;
3129}
3130
3131void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3132 SDNodeFlags Flags;
3133
3134 SDValue Op = getValue(I.getOperand(0));
3135 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3136 Op, Flags);
3137 setValue(&I, UnNodeValue);
3138}
3139
3140void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3141 SDNodeFlags Flags;
3142 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3143 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3144 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3145 }
3146 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3147 Flags.setExact(ExactOp->isExact());
3148 }
3149 if (isVectorReductionOp(&I)) {
3150 Flags.setVectorReduction(true);
3151 LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Detected a reduction operation:"
<< I << "\n"; } } while (false)
;
3152
3153 // If no flags are set we will propagate the incoming flags, if any flags
3154 // are set, we will intersect them with the incoming flag and so we need to
3155 // copy the FMF flags here.
3156 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
3157 Flags.copyFMF(*FPOp);
3158 }
3159 }
3160
3161 SDValue Op1 = getValue(I.getOperand(0));
3162 SDValue Op2 = getValue(I.getOperand(1));
3163 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3164 Op1, Op2, Flags);
3165 setValue(&I, BinNodeValue);
3166}
3167
3168void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3169 SDValue Op1 = getValue(I.getOperand(0));
3170 SDValue Op2 = getValue(I.getOperand(1));
3171
3172 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3173 Op1.getValueType(), DAG.getDataLayout());
3174
3175 // Coerce the shift amount to the right type if we can.
3176 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3177 unsigned ShiftSize = ShiftTy.getSizeInBits();
3178 unsigned Op2Size = Op2.getValueSizeInBits();
3179 SDLoc DL = getCurSDLoc();
3180
3181 // If the operand is smaller than the shift count type, promote it.
3182 if (ShiftSize > Op2Size)
3183 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3184
3185 // If the operand is larger than the shift count type but the shift
3186 // count type has enough bits to represent any shift value, truncate
3187 // it now. This is a common case and it exposes the truncate to
3188 // optimization early.
3189 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3190 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3191 // Otherwise we'll need to temporarily settle for some other convenient
3192 // type. Type legalization will make adjustments once the shiftee is split.
3193 else
3194 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3195 }
3196
3197 bool nuw = false;
3198 bool nsw = false;
3199 bool exact = false;
3200
3201 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3202
3203 if (const OverflowingBinaryOperator *OFBinOp =
3204 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3205 nuw = OFBinOp->hasNoUnsignedWrap();
3206 nsw = OFBinOp->hasNoSignedWrap();
3207 }
3208 if (const PossiblyExactOperator *ExactOp =
3209 dyn_cast<const PossiblyExactOperator>(&I))
3210 exact = ExactOp->isExact();
3211 }
3212 SDNodeFlags Flags;
3213 Flags.setExact(exact);
3214 Flags.setNoSignedWrap(nsw);
3215 Flags.setNoUnsignedWrap(nuw);
3216 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3217 Flags);
3218 setValue(&I, Res);
3219}
3220
3221void SelectionDAGBuilder::visitSDiv(const User &I) {
3222 SDValue Op1 = getValue(I.getOperand(0));
3223 SDValue Op2 = getValue(I.getOperand(1));
3224
3225 SDNodeFlags Flags;
3226 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3227 cast<PossiblyExactOperator>(&I)->isExact());
3228 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3229 Op2, Flags));
3230}
3231
3232void SelectionDAGBuilder::visitICmp(const User &I) {
3233 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3234 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3235 predicate = IC->getPredicate();
3236 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3237 predicate = ICmpInst::Predicate(IC->getPredicate());
3238 SDValue Op1 = getValue(I.getOperand(0));
3239 SDValue Op2 = getValue(I.getOperand(1));
3240 ISD::CondCode Opcode = getICmpCondCode(predicate);
3241
3242 auto &TLI = DAG.getTargetLoweringInfo();
3243 EVT MemVT =
3244 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3245
3246 // If a pointer's DAG type is larger than its memory type then the DAG values
3247 // are zero-extended. This breaks signed comparisons so truncate back to the
3248 // underlying type before doing the compare.
3249 if (Op1.getValueType() != MemVT) {
3250 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3251 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3252 }
3253
3254 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3255 I.getType());
3256 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3257}
3258
3259void SelectionDAGBuilder::visitFCmp(const User &I) {
3260 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3261 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3262 predicate = FC->getPredicate();
3263 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3264 predicate = FCmpInst::Predicate(FC->getPredicate());
3265 SDValue Op1 = getValue(I.getOperand(0));
3266 SDValue Op2 = getValue(I.getOperand(1));
3267
3268 ISD::CondCode Condition = getFCmpCondCode(predicate);
3269 auto *FPMO = dyn_cast<FPMathOperator>(&I);
3270 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3271 Condition = getFCmpCodeWithoutNaN(Condition);
3272
3273 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3274 I.getType());
3275 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3276}
3277
3278// Check if the condition of the select has one use or two users that are both
3279// selects with the same condition.
3280static bool hasOnlySelectUsers(const Value *Cond) {
3281 return llvm::all_of(Cond->users(), [](const Value *V) {
3282 return isa<SelectInst>(V);
3283 });
3284}
3285
3286void SelectionDAGBuilder::visitSelect(const User &I) {
3287 SmallVector<EVT, 4> ValueVTs;
3288 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3289 ValueVTs);
3290 unsigned NumValues = ValueVTs.size();
3291 if (NumValues == 0) return;
3292
3293 SmallVector<SDValue, 4> Values(NumValues);
3294 SDValue Cond = getValue(I.getOperand(0));
3295 SDValue LHSVal = getValue(I.getOperand(1));
3296 SDValue RHSVal = getValue(I.getOperand(2));
3297 SmallVector<SDValue, 1> BaseOps(1, Cond);
3298 ISD::NodeType OpCode =
3299 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3300
3301 bool IsUnaryAbs = false;
3302
3303 // Min/max matching is only viable if all output VTs are the same.
3304 if (is_splat(ValueVTs)) {
3305 EVT VT = ValueVTs[0];
3306 LLVMContext &Ctx = *DAG.getContext();
3307 auto &TLI = DAG.getTargetLoweringInfo();
3308
3309 // We care about the legality of the operation after it has been type
3310 // legalized.
3311 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3312 VT = TLI.getTypeToTransformTo(Ctx, VT);
3313
3314 // If the vselect is legal, assume we want to leave this as a vector setcc +
3315 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3316 // min/max is legal on the scalar type.
3317 bool UseScalarMinMax = VT.isVector() &&
3318 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3319
3320 Value *LHS, *RHS;
3321 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3322 ISD::NodeType Opc = ISD::DELETED_NODE;
3323 switch (SPR.Flavor) {
3324 case SPF_UMAX: Opc = ISD::UMAX; break;
3325 case SPF_UMIN: Opc = ISD::UMIN; break;
3326 case SPF_SMAX: Opc = ISD::SMAX; break;
3327 case SPF_SMIN: Opc = ISD::SMIN; break;
3328 case SPF_FMINNUM:
3329 switch (SPR.NaNBehavior) {
3330 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")::llvm::llvm_unreachable_internal("No NaN behavior for FP op?"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3330)
;
3331 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
3332 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3333 case SPNB_RETURNS_ANY: {
3334 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3335 Opc = ISD::FMINNUM;
3336 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3337 Opc = ISD::FMINIMUM;
3338 else if (UseScalarMinMax)
3339 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3340 ISD::FMINNUM : ISD::FMINIMUM;
3341 break;
3342 }
3343 }
3344 break;
3345 case SPF_FMAXNUM:
3346 switch (SPR.NaNBehavior) {
3347 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")::llvm::llvm_unreachable_internal("No NaN behavior for FP op?"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3347)
;
3348 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3349 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3350 case SPNB_RETURNS_ANY:
3351
3352 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3353 Opc = ISD::FMAXNUM;
3354 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3355 Opc = ISD::FMAXIMUM;
3356 else if (UseScalarMinMax)
3357 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3358 ISD::FMAXNUM : ISD::FMAXIMUM;
3359 break;
3360 }
3361 break;
3362 case SPF_ABS:
3363 IsUnaryAbs = true;
3364 Opc = ISD::ABS;
3365 break;
3366 case SPF_NABS:
3367 // TODO: we need to produce sub(0, abs(X)).
3368 default: break;
3369 }
3370
3371 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3372 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3373 (UseScalarMinMax &&
3374 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3375 // If the underlying comparison instruction is used by any other
3376 // instruction, the consumed instructions won't be destroyed, so it is
3377 // not profitable to convert to a min/max.
3378 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3379 OpCode = Opc;
3380 LHSVal = getValue(LHS);
3381 RHSVal = getValue(RHS);
3382 BaseOps.clear();
3383 }
3384
3385 if (IsUnaryAbs) {
3386 OpCode = Opc;
3387 LHSVal = getValue(LHS);
3388 BaseOps.clear();
3389 }
3390 }
3391
3392 if (IsUnaryAbs) {
3393 for (unsigned i = 0; i != NumValues; ++i) {
3394 Values[i] =
3395 DAG.getNode(OpCode, getCurSDLoc(),
3396 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i),
3397 SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3398 }
3399 } else {
3400 for (unsigned i = 0; i != NumValues; ++i) {
3401 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3402 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3403 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3404 Values[i] = DAG.getNode(
3405 OpCode, getCurSDLoc(),
3406 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops);
3407 }
3408 }
3409
3410 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3411 DAG.getVTList(ValueVTs), Values));
3412}
3413
3414void SelectionDAGBuilder::visitTrunc(const User &I) {
3415 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3416 SDValue N = getValue(I.getOperand(0));
3417 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3418 I.getType());
3419 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3420}
3421
3422void SelectionDAGBuilder::visitZExt(const User &I) {
3423 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3424 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3425 SDValue N = getValue(I.getOperand(0));
3426 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3427 I.getType());
3428 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3429}
3430
3431void SelectionDAGBuilder::visitSExt(const User &I) {
3432 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3433 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3434 SDValue N = getValue(I.getOperand(0));
3435 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3436 I.getType());
3437 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3438}
3439
3440void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3441 // FPTrunc is never a no-op cast, no need to check
3442 SDValue N = getValue(I.getOperand(0));
3443 SDLoc dl = getCurSDLoc();
3444 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3445 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3446 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3447 DAG.getTargetConstant(
3448 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3449}
3450
3451void SelectionDAGBuilder::visitFPExt(const User &I) {
3452 // FPExt is never a no-op cast, no need to check
3453 SDValue N = getValue(I.getOperand(0));
3454 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3455 I.getType());
3456 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3457}
3458
3459void SelectionDAGBuilder::visitFPToUI(const User &I) {
3460 // FPToUI is never a no-op cast, no need to check
3461 SDValue N = getValue(I.getOperand(0));
3462 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3463 I.getType());
3464 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3465}
3466
3467void SelectionDAGBuilder::visitFPToSI(const User &I) {
3468 // FPToSI is never a no-op cast, no need to check
3469 SDValue N = getValue(I.getOperand(0));
3470 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3471 I.getType());
3472 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3473}
3474
3475void SelectionDAGBuilder::visitUIToFP(const User &I) {
3476 // UIToFP is never a no-op cast, no need to check
3477 SDValue N = getValue(I.getOperand(0));
3478 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3479 I.getType());
3480 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3481}
3482
3483void SelectionDAGBuilder::visitSIToFP(const User &I) {
3484 // SIToFP is never a no-op cast, no need to check
3485 SDValue N = getValue(I.getOperand(0));
3486 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3487 I.getType());
3488 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3489}
3490
3491void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3492 // What to do depends on the size of the integer and the size of the pointer.
3493 // We can either truncate, zero extend, or no-op, accordingly.
3494 SDValue N = getValue(I.getOperand(0));
3495 auto &TLI = DAG.getTargetLoweringInfo();
3496 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3497 I.getType());
3498 EVT PtrMemVT =
3499 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3500 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3501 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3502 setValue(&I, N);
3503}
3504
3505void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3506 // What to do depends on the size of the integer and the size of the pointer.
3507 // We can either truncate, zero extend, or no-op, accordingly.
3508 SDValue N = getValue(I.getOperand(0));
3509 auto &TLI = DAG.getTargetLoweringInfo();
3510 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3511 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3512 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3513 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3514 setValue(&I, N);
3515}
3516
3517void SelectionDAGBuilder::visitBitCast(const User &I) {
3518 SDValue N = getValue(I.getOperand(0));
3519 SDLoc dl = getCurSDLoc();
3520 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3521 I.getType());
3522
3523 // BitCast assures us that source and destination are the same size so this is
3524 // either a BITCAST or a no-op.
3525 if (DestVT != N.getValueType())
3526 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3527 DestVT, N)); // convert types.
3528 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3529 // might fold any kind of constant expression to an integer constant and that
3530 // is not what we are looking for. Only recognize a bitcast of a genuine
3531 // constant integer as an opaque constant.
3532 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3533 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3534 /*isOpaque*/true));
3535 else
3536 setValue(&I, N); // noop cast.
3537}
3538
3539void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3540 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3541 const Value *SV = I.getOperand(0);
3542 SDValue N = getValue(SV);
3543 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3544
3545 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3546 unsigned DestAS = I.getType()->getPointerAddressSpace();
3547
3548 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3549 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3550
3551 setValue(&I, N);
3552}
3553
3554void SelectionDAGBuilder::visitInsertElement(const User &I) {
3555 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3556 SDValue InVec = getValue(I.getOperand(0));
3557 SDValue InVal = getValue(I.getOperand(1));
3558 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3559 TLI.getVectorIdxTy(DAG.getDataLayout()));
3560 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3561 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3562 InVec, InVal, InIdx));
3563}
3564
3565void SelectionDAGBuilder::visitExtractElement(const User &I) {
3566 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3567 SDValue InVec = getValue(I.getOperand(0));
3568 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3569 TLI.getVectorIdxTy(DAG.getDataLayout()));
3570 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3571 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3572 InVec, InIdx));
3573}
3574
3575void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3576 SDValue Src1 = getValue(I.getOperand(0));
3577 SDValue Src2 = getValue(I.getOperand(1));
3578 Constant *MaskV = cast<Constant>(I.getOperand(2));
3579 SDLoc DL = getCurSDLoc();
3580 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3581 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3582 EVT SrcVT = Src1.getValueType();
3583 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3584
3585 if (MaskV->isNullValue() && VT.isScalableVector()) {
3586 // Canonical splat form of first element of first input vector.
3587 SDValue FirstElt =
3588 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3589 DAG.getVectorIdxConstant(0, DL));
3590 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3591 return;
3592 }
3593
3594 // For now, we only handle splats for scalable vectors.
3595 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3596 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3597 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle")((!VT.isScalableVector() && "Unsupported scalable vector shuffle"
) ? static_cast<void> (0) : __assert_fail ("!VT.isScalableVector() && \"Unsupported scalable vector shuffle\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3597, __PRETTY_FUNCTION__))
;
3598
3599 SmallVector<int, 8> Mask;
3600 ShuffleVectorInst::getShuffleMask(MaskV, Mask);
3601 unsigned MaskNumElts = Mask.size();
3602
3603 if (SrcNumElts == MaskNumElts) {
3604 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3605 return;
3606 }
3607
3608 // Normalize the shuffle vector since mask and vector length don't match.
3609 if (SrcNumElts < MaskNumElts) {
3610 // Mask is longer than the source vectors. We can use concatenate vector to
3611 // make the mask and vectors lengths match.
3612
3613 if (MaskNumElts % SrcNumElts == 0) {
3614 // Mask length is a multiple of the source vector length.
3615 // Check if the shuffle is some kind of concatenation of the input
3616 // vectors.
3617 unsigned NumConcat = MaskNumElts / SrcNumElts;
3618 bool IsConcat = true;
3619 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3620 for (unsigned i = 0; i != MaskNumElts; ++i) {
3621 int Idx = Mask[i];
3622 if (Idx < 0)
3623 continue;
3624 // Ensure the indices in each SrcVT sized piece are sequential and that
3625 // the same source is used for the whole piece.
3626 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3627 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3628 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3629 IsConcat = false;
3630 break;
3631 }
3632 // Remember which source this index came from.
3633 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3634 }
3635
3636 // The shuffle is concatenating multiple vectors together. Just emit
3637 // a CONCAT_VECTORS operation.
3638 if (IsConcat) {
3639 SmallVector<SDValue, 8> ConcatOps;
3640 for (auto Src : ConcatSrcs) {
3641 if (Src < 0)
3642 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3643 else if (Src == 0)
3644 ConcatOps.push_back(Src1);
3645 else
3646 ConcatOps.push_back(Src2);
3647 }
3648 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3649 return;
3650 }
3651 }
3652
3653 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3654 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3655 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3656 PaddedMaskNumElts);
3657
3658 // Pad both vectors with undefs to make them the same length as the mask.
3659 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3660
3661 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3662 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3663 MOps1[0] = Src1;
3664 MOps2[0] = Src2;
3665
3666 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3667 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3668
3669 // Readjust mask for new input vector length.
3670 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3671 for (unsigned i = 0; i != MaskNumElts; ++i) {
3672 int Idx = Mask[i];
3673 if (Idx >= (int)SrcNumElts)
3674 Idx -= SrcNumElts - PaddedMaskNumElts;
3675 MappedOps[i] = Idx;
3676 }
3677
3678 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3679
3680 // If the concatenated vector was padded, extract a subvector with the
3681 // correct number of elements.
3682 if (MaskNumElts != PaddedMaskNumElts)
3683 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3684 DAG.getVectorIdxConstant(0, DL));
3685
3686 setValue(&I, Result);
3687 return;
3688 }
3689
3690 if (SrcNumElts > MaskNumElts) {
3691 // Analyze the access pattern of the vector to see if we can extract
3692 // two subvectors and do the shuffle.
3693 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3694 bool CanExtract = true;
3695 for (int Idx : Mask) {
3696 unsigned Input = 0;
3697 if (Idx < 0)
3698 continue;
3699
3700 if (Idx >= (int)SrcNumElts) {
3701 Input = 1;
3702 Idx -= SrcNumElts;
3703 }
3704
3705 // If all the indices come from the same MaskNumElts sized portion of
3706 // the sources we can use extract. Also make sure the extract wouldn't
3707 // extract past the end of the source.
3708 int NewStartIdx = alignDown(Idx, MaskNumElts);
3709 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3710 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3711 CanExtract = false;
3712 // Make sure we always update StartIdx as we use it to track if all
3713 // elements are undef.
3714 StartIdx[Input] = NewStartIdx;
3715 }
3716
3717 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3718 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3719 return;
3720 }
3721 if (CanExtract) {
3722 // Extract appropriate subvector and generate a vector shuffle
3723 for (unsigned Input = 0; Input < 2; ++Input) {
3724 SDValue &Src = Input == 0 ? Src1 : Src2;
3725 if (StartIdx[Input] < 0)
3726 Src = DAG.getUNDEF(VT);
3727 else {
3728 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3729 DAG.getVectorIdxConstant(StartIdx[Input], DL));
3730 }
3731 }
3732
3733 // Calculate new mask.
3734 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3735 for (int &Idx : MappedOps) {
3736 if (Idx >= (int)SrcNumElts)
3737 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3738 else if (Idx >= 0)
3739 Idx -= StartIdx[0];
3740 }
3741
3742 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3743 return;
3744 }
3745 }
3746
3747 // We can't use either concat vectors or extract subvectors so fall back to
3748 // replacing the shuffle with extract and build vector.
3749 // to insert and build vector.
3750 EVT EltVT = VT.getVectorElementType();
3751 SmallVector<SDValue,8> Ops;
3752 for (int Idx : Mask) {
3753 SDValue Res;
3754
3755 if (Idx < 0) {
3756 Res = DAG.getUNDEF(EltVT);
3757 } else {
3758 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3759 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3760
3761 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3762 DAG.getVectorIdxConstant(Idx, DL));
3763 }
3764
3765 Ops.push_back(Res);
3766 }
3767
3768 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3769}
3770
3771void SelectionDAGBuilder::visitInsertValue(const User &I) {
3772 ArrayRef<unsigned> Indices;
3773 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3774 Indices = IV->getIndices();
3775 else
3776 Indices = cast<ConstantExpr>(&I)->getIndices();
3777
3778 const Value *Op0 = I.getOperand(0);
3779 const Value *Op1 = I.getOperand(1);
3780 Type *AggTy = I.getType();
3781 Type *ValTy = Op1->getType();
3782 bool IntoUndef = isa<UndefValue>(Op0);
3783 bool FromUndef = isa<UndefValue>(Op1);
3784
3785 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3786
3787 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3788 SmallVector<EVT, 4> AggValueVTs;
3789 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3790 SmallVector<EVT, 4> ValValueVTs;
3791 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3792
3793 unsigned NumAggValues = AggValueVTs.size();
3794 unsigned NumValValues = ValValueVTs.size();
3795 SmallVector<SDValue, 4> Values(NumAggValues);
3796
3797 // Ignore an insertvalue that produces an empty object
3798 if (!NumAggValues) {
3799 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3800 return;
3801 }
3802
3803 SDValue Agg = getValue(Op0);
3804 unsigned i = 0;
3805 // Copy the beginning value(s) from the original aggregate.
3806 for (; i != LinearIndex; ++i)
3807 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3808 SDValue(Agg.getNode(), Agg.getResNo() + i);
3809 // Copy values from the inserted value(s).
3810 if (NumValValues) {
3811 SDValue Val = getValue(Op1);
3812 for (; i != LinearIndex + NumValValues; ++i)
3813 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3814 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3815 }
3816 // Copy remaining value(s) from the original aggregate.
3817 for (; i != NumAggValues; ++i)
3818 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3819 SDValue(Agg.getNode(), Agg.getResNo() + i);
3820
3821 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3822 DAG.getVTList(AggValueVTs), Values));
3823}
3824
3825void SelectionDAGBuilder::visitExtractValue(const User &I) {
3826 ArrayRef<unsigned> Indices;
3827 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3828 Indices = EV->getIndices();
3829 else
3830 Indices = cast<ConstantExpr>(&I)->getIndices();
3831
3832 const Value *Op0 = I.getOperand(0);
3833 Type *AggTy = Op0->getType();
3834 Type *ValTy = I.getType();
3835 bool OutOfUndef = isa<UndefValue>(Op0);
3836
3837 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3838
3839 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3840 SmallVector<EVT, 4> ValValueVTs;
3841 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3842
3843 unsigned NumValValues = ValValueVTs.size();
3844
3845 // Ignore a extractvalue that produces an empty object
3846 if (!NumValValues) {
3847 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3848 return;
3849 }
3850
3851 SmallVector<SDValue, 4> Values(NumValValues);
3852
3853 SDValue Agg = getValue(Op0);
3854 // Copy out the selected value(s).
3855 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3856 Values[i - LinearIndex] =
3857 OutOfUndef ?
3858 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3859 SDValue(Agg.getNode(), Agg.getResNo() + i);
3860
3861 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3862 DAG.getVTList(ValValueVTs), Values));
3863}
3864
3865void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3866 Value *Op0 = I.getOperand(0);
3867 // Note that the pointer operand may be a vector of pointers. Take the scalar
3868 // element which holds a pointer.
3869 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3870 SDValue N = getValue(Op0);
3871 SDLoc dl = getCurSDLoc();
3872 auto &TLI = DAG.getTargetLoweringInfo();
3873 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3874 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3875
3876 // Normalize Vector GEP - all scalar operands should be converted to the
3877 // splat vector.
3878 bool IsVectorGEP = I.getType()->isVectorTy();
3879 ElementCount VectorElementCount = IsVectorGEP ?
3880 I.getType()->getVectorElementCount() : ElementCount(0, false);
3881
3882 if (IsVectorGEP && !N.getValueType().isVector()) {
3883 LLVMContext &Context = *DAG.getContext();
3884 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3885 if (VectorElementCount.Scalable)
3886 N = DAG.getSplatVector(VT, dl, N);
3887 else
3888 N = DAG.getSplatBuildVector(VT, dl, N);
3889 }
3890
3891 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3892 GTI != E; ++GTI) {
3893 const Value *Idx = GTI.getOperand();
3894 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3895 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3896 if (Field) {
3897 // N = N + Offset
3898 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3899
3900 // In an inbounds GEP with an offset that is nonnegative even when
3901 // interpreted as signed, assume there is no unsigned overflow.
3902 SDNodeFlags Flags;
3903 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3904 Flags.setNoUnsignedWrap(true);
3905
3906 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3907 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3908 }
3909 } else {
3910 // IdxSize is the width of the arithmetic according to IR semantics.
3911 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3912 // (and fix up the result later).
3913 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3914 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3915 TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3916 // We intentionally mask away the high bits here; ElementSize may not
3917 // fit in IdxTy.
3918 APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3919 bool ElementScalable = ElementSize.isScalable();
3920
3921 // If this is a scalar constant or a splat vector of constants,
3922 // handle it quickly.
3923 const auto *C = dyn_cast<Constant>(Idx);
3924 if (C && isa<VectorType>(C->getType()))
3925 C = C->getSplatValue();
3926
3927 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3928 if (CI && CI->isZero())
3929 continue;
3930 if (CI && !ElementScalable) {
3931 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3932 LLVMContext &Context = *DAG.getContext();
3933 SDValue OffsVal;
3934 if (IsVectorGEP)
3935 OffsVal = DAG.getConstant(
3936 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3937 else
3938 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3939
3940 // In an inbounds GEP with an offset that is nonnegative even when
3941 // interpreted as signed, assume there is no unsigned overflow.
3942 SDNodeFlags Flags;
3943 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3944 Flags.setNoUnsignedWrap(true);
3945
3946 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3947
3948 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3949 continue;
3950 }
3951
3952 // N = N + Idx * ElementMul;
3953 SDValue IdxN = getValue(Idx);
3954
3955 if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3956 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3957 VectorElementCount);
3958 if (VectorElementCount.Scalable)
3959 IdxN = DAG.getSplatVector(VT, dl, IdxN);
3960 else
3961 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3962 }
3963
3964 // If the index is smaller or larger than intptr_t, truncate or extend
3965 // it.
3966 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3967
3968 if (ElementScalable) {
3969 EVT VScaleTy = N.getValueType().getScalarType();
3970 SDValue VScale = DAG.getNode(
3971 ISD::VSCALE, dl, VScaleTy,
3972 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3973 if (IsVectorGEP)
3974 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3975 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3976 } else {
3977 // If this is a multiply by a power of two, turn it into a shl
3978 // immediately. This is a very common case.
3979 if (ElementMul != 1) {
3980 if (ElementMul.isPowerOf2()) {
3981 unsigned Amt = ElementMul.logBase2();
3982 IdxN = DAG.getNode(ISD::SHL, dl,
3983 N.getValueType(), IdxN,
3984 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3985 } else {
3986 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3987 IdxN.getValueType());
3988 IdxN = DAG.getNode(ISD::MUL, dl,
3989 N.getValueType(), IdxN, Scale);
3990 }
3991 }
3992 }
3993
3994 N = DAG.getNode(ISD::ADD, dl,
3995 N.getValueType(), N, IdxN);
3996 }
3997 }
3998
3999 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4000 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4001
4002 setValue(&I, N);
4003}
4004
4005void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4006 // If this is a fixed sized alloca in the entry block of the function,
4007 // allocate it statically on the stack.
4008 if (FuncInfo.StaticAllocaMap.count(&I))
4009 return; // getValue will auto-populate this.
4010
4011 SDLoc dl = getCurSDLoc();
4012 Type *Ty = I.getAllocatedType();
4013 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4014 auto &DL = DAG.getDataLayout();
4015 uint64_t TySize = DL.getTypeAllocSize(Ty);
4016 unsigned Align =
4017 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
4018
4019 SDValue AllocSize = getValue(I.getArraySize());
4020
4021 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
4022 if (AllocSize.getValueType() != IntPtr)
4023 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4024
4025 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
4026 AllocSize,
4027 DAG.getConstant(TySize, dl, IntPtr));
4028
4029 // Handle alignment. If the requested alignment is less than or equal to
4030 // the stack alignment, ignore it. If the size is greater than or equal to
4031 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4032 unsigned StackAlign =
4033 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
4034 if (Align <= StackAlign)
4035 Align = 0;
4036
4037 // Round the size of the allocation up to the stack alignment size
4038 // by add SA-1 to the size. This doesn't overflow because we're computing
4039 // an address inside an alloca.
4040 SDNodeFlags Flags;
4041 Flags.setNoUnsignedWrap(true);
4042 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4043 DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
4044
4045 // Mask out the low bits for alignment purposes.
4046 AllocSize =
4047 DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4048 DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
4049
4050 SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
4051 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4052 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4053 setValue(&I, DSA);
4054 DAG.setRoot(DSA.getValue(1));
4055
4056 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects())((FuncInfo.MF->getFrameInfo().hasVarSizedObjects()) ? static_cast
<void> (0) : __assert_fail ("FuncInfo.MF->getFrameInfo().hasVarSizedObjects()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4056, __PRETTY_FUNCTION__))
;
4057}
4058
4059void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4060 if (I.isAtomic())
4061 return visitAtomicLoad(I);
4062
4063 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4064 const Value *SV = I.getOperand(0);
4065 if (TLI.supportSwiftError()) {
4066 // Swifterror values can come from either a function parameter with
4067 // swifterror attribute or an alloca with swifterror attribute.
4068 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4069 if (Arg->hasSwiftErrorAttr())
4070 return visitLoadFromSwiftError(I);
4071 }
4072
4073 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4074 if (Alloca->isSwiftError())
4075 return visitLoadFromSwiftError(I);
4076 }
4077 }
4078
4079 SDValue Ptr = getValue(SV);
4080
4081 Type *Ty = I.getType();
4082 unsigned Alignment = I.getAlignment();
4083
4084 AAMDNodes AAInfo;
4085 I.getAAMetadata(AAInfo);
4086 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4087
4088 SmallVector<EVT, 4> ValueVTs, MemVTs;
4089 SmallVector<uint64_t, 4> Offsets;
4090 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4091 unsigned NumValues = ValueVTs.size();
4092 if (NumValues == 0)
4093 return;
4094
4095 bool isVolatile = I.isVolatile();
4096
4097 SDValue Root;
4098 bool ConstantMemory = false;
4099 if (isVolatile)
4100 // Serialize volatile loads with other side effects.
4101 Root = getRoot();
4102 else if (NumValues > MaxParallelChains)
4103 Root = getMemoryRoot();
4104 else if (AA &&
4105 AA->pointsToConstantMemory(MemoryLocation(
4106 SV,
4107 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4108 AAInfo))) {
4109 // Do not serialize (non-volatile) loads of constant memory with anything.
4110 Root = DAG.getEntryNode();
4111 ConstantMemory = true;
4112 } else {
4113 // Do not serialize non-volatile loads against each other.
4114 Root = DAG.getRoot();
4115 }
4116
4117 SDLoc dl = getCurSDLoc();
4118
4119 if (isVolatile)
4120 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4121
4122 // An aggregate load cannot wrap around the address space, so offsets to its
4123 // parts don't wrap either.
4124 SDNodeFlags Flags;
4125 Flags.setNoUnsignedWrap(true);
4126
4127 SmallVector<SDValue, 4> Values(NumValues);
4128 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4129 EVT PtrVT = Ptr.getValueType();
4130
4131 MachineMemOperand::Flags MMOFlags
4132 = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4133
4134 unsigned ChainI = 0;
4135 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4136 // Serializing loads here may result in excessive register pressure, and
4137 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4138 // could recover a bit by hoisting nodes upward in the chain by recognizing
4139 // they are side-effect free or do not alias. The optimizer should really
4140 // avoid this case by converting large object/array copies to llvm.memcpy
4141 // (MaxParallelChains should always remain as failsafe).
4142 if (ChainI == MaxParallelChains) {
4143 assert(PendingLoads.empty() && "PendingLoads must be serialized first")((PendingLoads.empty() && "PendingLoads must be serialized first"
) ? static_cast<void> (0) : __assert_fail ("PendingLoads.empty() && \"PendingLoads must be serialized first\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4143, __PRETTY_FUNCTION__))
;
4144 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4145 makeArrayRef(Chains.data(), ChainI));
4146 Root = Chain;
4147 ChainI = 0;
4148 }
4149 SDValue A = DAG.getNode(ISD::ADD, dl,
4150 PtrVT, Ptr,
4151 DAG.getConstant(Offsets[i], dl, PtrVT),
4152 Flags);
4153
4154 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4155 MachinePointerInfo(SV, Offsets[i]), Alignment,
4156 MMOFlags, AAInfo, Ranges);
4157 Chains[ChainI] = L.getValue(1);
4158
4159 if (MemVTs[i] != ValueVTs[i])
4160 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4161
4162 Values[i] = L;
4163 }
4164
4165 if (!ConstantMemory) {
4166 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4167 makeArrayRef(Chains.data(), ChainI));
4168 if (isVolatile)
4169 DAG.setRoot(Chain);
4170 else
4171 PendingLoads.push_back(Chain);
4172 }
4173
4174 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4175 DAG.getVTList(ValueVTs), Values));
4176}
4177
4178void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4179 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitStoreToSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitStoreToSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4180, __PRETTY_FUNCTION__))
4180 "call visitStoreToSwiftError when backend supports swifterror")((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitStoreToSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitStoreToSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4180, __PRETTY_FUNCTION__))
;
4181
4182 SmallVector<EVT, 4> ValueVTs;
4183 SmallVector<uint64_t, 4> Offsets;
4184 const Value *SrcV = I.getOperand(0);
4185 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4186 SrcV->getType(), ValueVTs, &Offsets);
4187 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4188, __PRETTY_FUNCTION__))
4188 "expect a single EVT for swifterror")((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4188, __PRETTY_FUNCTION__))
;
4189
4190 SDValue Src = getValue(SrcV);
4191 // Create a virtual register, then update the virtual register.
4192 Register VReg =
4193 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4194 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4195 // Chain can be getRoot or getControlRoot.
4196 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4197 SDValue(Src.getNode(), Src.getResNo()));
4198 DAG.setRoot(CopyNode);
4199}
4200
4201void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4202 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitLoadFromSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitLoadFromSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4203, __PRETTY_FUNCTION__))
4203 "call visitLoadFromSwiftError when backend supports swifterror")((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitLoadFromSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitLoadFromSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4203, __PRETTY_FUNCTION__))
;
4204
4205 assert(!I.isVolatile() &&((!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal
) && !I.hasMetadata(LLVMContext::MD_invariant_load) &&
"Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal) && !I.hasMetadata(LLVMContext::MD_invariant_load) && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4208, __PRETTY_FUNCTION__))
4206 !I.hasMetadata(LLVMContext::MD_nontemporal) &&((!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal
) && !I.hasMetadata(LLVMContext::MD_invariant_load) &&
"Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal) && !I.hasMetadata(LLVMContext::MD_invariant_load) && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4208, __PRETTY_FUNCTION__))
4207 !I.hasMetadata(LLVMContext::MD_invariant_load) &&((!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal
) && !I.hasMetadata(LLVMContext::MD_invariant_load) &&
"Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal) && !I.hasMetadata(LLVMContext::MD_invariant_load) && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4208, __PRETTY_FUNCTION__))
4208 "Support volatile, non temporal, invariant for load_from_swift_error")((!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal
) && !I.hasMetadata(LLVMContext::MD_invariant_load) &&
"Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal) && !I.hasMetadata(LLVMContext::MD_invariant_load) && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4208, __PRETTY_FUNCTION__))
;
4209
4210 const Value *SV = I.getOperand(0);
4211 Type *Ty = I.getType();
4212 AAMDNodes AAInfo;
4213 I.getAAMetadata(AAInfo);
4214 assert((((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
4215 (!AA ||(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
4216 !AA->pointsToConstantMemory(MemoryLocation((((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
4217 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
4218 AAInfo))) &&(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
4219 "load_from_swift_error should not be constant memory")(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize
::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))
) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4219, __PRETTY_FUNCTION__))
;
4220
4221 SmallVector<EVT, 4> ValueVTs;
4222 SmallVector<uint64_t, 4> Offsets;
4223 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4224 ValueVTs, &Offsets);
4225 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4226, __PRETTY_FUNCTION__))
4226 "expect a single EVT for swifterror")((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4226, __PRETTY_FUNCTION__))
;
4227
4228 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4229 SDValue L = DAG.getCopyFromReg(
4230 getRoot(), getCurSDLoc(),
4231 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4232
4233 setValue(&I, L);
4234}
4235
4236void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4237 if (I.isAtomic())
4238 return visitAtomicStore(I);
4239
4240 const Value *SrcV = I.getOperand(0);
4241 const Value *PtrV = I.getOperand(1);
4242
4243 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4244 if (TLI.supportSwiftError()) {
4245 // Swifterror values can come from either a function parameter with
4246 // swifterror attribute or an alloca with swifterror attribute.
4247 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4248 if (Arg->hasSwiftErrorAttr())
4249 return visitStoreToSwiftError(I);
4250 }
4251
4252 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4253 if (Alloca->isSwiftError())
4254 return visitStoreToSwiftError(I);
4255 }
4256 }
4257
4258 SmallVector<EVT, 4> ValueVTs, MemVTs;
4259 SmallVector<uint64_t, 4> Offsets;
4260 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4261 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4262 unsigned NumValues = ValueVTs.size();
4263 if (NumValues == 0)
4264 return;
4265
4266 // Get the lowered operands. Note that we do this after
4267 // checking if NumResults is zero, because with zero results
4268 // the operands won't have values in the map.
4269 SDValue Src = getValue(SrcV);
4270 SDValue Ptr = getValue(PtrV);
4271
4272 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4273 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4274 SDLoc dl = getCurSDLoc();
4275 unsigned Alignment = I.getAlignment();
4276 AAMDNodes AAInfo;
4277 I.getAAMetadata(AAInfo);
4278
4279 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4280
4281 // An aggregate load cannot wrap around the address space, so offsets to its
4282 // parts don't wrap either.
4283 SDNodeFlags Flags;
4284 Flags.setNoUnsignedWrap(true);
4285
4286 unsigned ChainI = 0;
4287 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4288 // See visitLoad comments.
4289 if (ChainI == MaxParallelChains) {
4290 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4291 makeArrayRef(Chains.data(), ChainI));
4292 Root = Chain;
4293 ChainI = 0;
4294 }
4295 SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags);
4296 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4297 if (MemVTs[i] != ValueVTs[i])
4298 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4299 SDValue St =
4300 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4301 Alignment, MMOFlags, AAInfo);
4302 Chains[ChainI] = St;
4303 }
4304
4305 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4306 makeArrayRef(Chains.data(), ChainI));
4307 DAG.setRoot(StoreNode);
4308}
4309
4310void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4311 bool IsCompressing) {
4312 SDLoc sdl = getCurSDLoc();
4313
4314 auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4315 unsigned& Alignment) {
4316 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4317 Src0 = I.getArgOperand(0);
4318 Ptr = I.getArgOperand(1);
4319 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
4320 Mask = I.getArgOperand(3);
4321 };
4322 auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4323 unsigned& Alignment) {
4324 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4325 Src0 = I.getArgOperand(0);
4326 Ptr = I.getArgOperand(1);
4327 Mask = I.getArgOperand(2);
4328 Alignment = 0;
4329 };
4330
4331 Value *PtrOperand, *MaskOperand, *Src0Operand;
4332 unsigned Alignment;
4333 if (IsCompressing)
4334 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4335 else
4336 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4337
4338 SDValue Ptr = getValue(PtrOperand);
4339 SDValue Src0 = getValue(Src0Operand);
4340 SDValue Mask = getValue(MaskOperand);
4341 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4342
4343 EVT VT = Src0.getValueType();
4344 if (!Alignment)
4345 Alignment = DAG.getEVTAlignment(VT);
4346
4347 AAMDNodes AAInfo;
4348 I.getAAMetadata(AAInfo);
4349
4350 MachineMemOperand *MMO =
4351 DAG.getMachineFunction().
4352 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4353 MachineMemOperand::MOStore,
4354 // TODO: Make MachineMemOperands aware of scalable
4355 // vectors.
4356 VT.getStoreSize().getKnownMinSize(),
4357 Alignment, AAInfo);
4358 SDValue StoreNode =
4359 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4360 ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4361 DAG.setRoot(StoreNode);
4362 setValue(&I, StoreNode);
4363}
4364
4365// Get a uniform base for the Gather/Scatter intrinsic.
4366// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4367// We try to represent it as a base pointer + vector of indices.
4368// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4369// The first operand of the GEP may be a single pointer or a vector of pointers
4370// Example:
4371// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4372// or
4373// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4374// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4375//
4376// When the first GEP operand is a single pointer - it is the uniform base we
4377// are looking for. If first operand of the GEP is a splat vector - we
4378// extract the splat value and use it as a uniform base.
4379// In all other cases the function returns 'false'.
4380static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index,
4381 ISD::MemIndexType &IndexType, SDValue &Scale,
4382 SelectionDAGBuilder *SDB) {
4383 SelectionDAG& DAG = SDB->DAG;
4384 LLVMContext &Context = *DAG.getContext();
4385
4386 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type")((Ptr->getType()->isVectorTy() && "Uexpected pointer type"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isVectorTy() && \"Uexpected pointer type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4386, __PRETTY_FUNCTION__))
;
4387 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4388 if (!GEP)
4389 return false;
4390
4391 const Value *GEPPtr = GEP->getPointerOperand();
4392 if (!GEPPtr->getType()->isVectorTy())
4393 Ptr = GEPPtr;
4394 else if (!(Ptr = getSplatValue(GEPPtr)))
4395 return false;
4396
4397 unsigned FinalIndex = GEP->getNumOperands() - 1;
4398 Value *IndexVal = GEP->getOperand(FinalIndex);
4399 gep_type_iterator GTI = gep_type_begin(*GEP);
4400
4401 // Ensure all the other indices are 0.
4402 for (unsigned i = 1; i < FinalIndex; ++i, ++GTI) {
4403 auto *C = dyn_cast<Constant>(GEP->getOperand(i));
4404 if (!C)
4405 return false;
4406 if (isa<VectorType>(C->getType()))
4407 C = C->getSplatValue();
4408 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4409 if (!CI || !CI->isZero())
4410 return false;
4411 }
4412
4413 // The operands of the GEP may be defined in another basic block.
4414 // In this case we'll not find nodes for the operands.
4415 if (!SDB->findValue(Ptr))
4416 return false;
4417 Constant *C = dyn_cast<Constant>(IndexVal);
4418 if (!C && !SDB->findValue(IndexVal))
4419 return false;
4420
4421 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4422 const DataLayout &DL = DAG.getDataLayout();
4423 StructType *STy = GTI.getStructTypeOrNull();
4424
4425 if (STy) {
4426 const StructLayout *SL = DL.getStructLayout(STy);
4427 if (isa<VectorType>(C->getType())) {
4428 C = C->getSplatValue();
4429 // FIXME: If getSplatValue may return nullptr for a structure?
4430 // If not, the following check can be removed.
4431 if (!C)
4432 return false;
4433 }
4434 auto *CI = cast<ConstantInt>(C);
4435 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4436 Index = DAG.getConstant(SL->getElementOffset(CI->getZExtValue()),
4437 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4438 } else {
4439 Scale = DAG.getTargetConstant(
4440 DL.getTypeAllocSize(GEP->getResultElementType()),
4441 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4442 Index = SDB->getValue(IndexVal);
4443 }
4444 Base = SDB->getValue(Ptr);
4445 IndexType = ISD::SIGNED_SCALED;
4446
4447 if (STy || !Index.getValueType().isVector()) {
4448 unsigned GEPWidth = GEP->getType()->getVectorNumElements();
4449 EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
4450 Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
4451 }
4452 return true;
4453}
4454
4455void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4456 SDLoc sdl = getCurSDLoc();
4457
4458 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4459 const Value *Ptr = I.getArgOperand(1);
4460 SDValue Src0 = getValue(I.getArgOperand(0));
4461 SDValue Mask = getValue(I.getArgOperand(3));
4462 EVT VT = Src0.getValueType();
4463 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4464 if (!Alignment)
4465 Alignment = DAG.getEVTAlignment(VT);
4466 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4467
4468 AAMDNodes AAInfo;
4469 I.getAAMetadata(AAInfo);
4470
4471 SDValue Base;
4472 SDValue Index;
4473 ISD::MemIndexType IndexType;
4474 SDValue Scale;
4475 const Value *BasePtr = Ptr;
4476 bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale,
4477 this);
4478
4479 const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4480 MachineMemOperand *MMO = DAG.getMachineFunction().
4481 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4482 MachineMemOperand::MOStore,
4483 // TODO: Make MachineMemOperands aware of scalable
4484 // vectors.
4485 VT.getStoreSize().getKnownMinSize(),
4486 Alignment, AAInfo);
4487 if (!UniformBase) {
4488 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4489 Index = getValue(Ptr);
4490 IndexType = ISD::SIGNED_SCALED;
4491 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4492 }
4493 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4494 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4495 Ops, MMO, IndexType);
4496 DAG.setRoot(Scatter);
4497 setValue(&I, Scatter);
4498}
4499
4500void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4501 SDLoc sdl = getCurSDLoc();
4502
4503 auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4504 unsigned& Alignment) {
4505 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4506 Ptr = I.getArgOperand(0);
4507 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4508 Mask = I.getArgOperand(2);
4509 Src0 = I.getArgOperand(3);
4510 };
4511 auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4512 unsigned& Alignment) {
4513 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4514 Ptr = I.getArgOperand(0);
4515 Alignment = 0;
4516 Mask = I.getArgOperand(1);
4517 Src0 = I.getArgOperand(2);
4518 };
4519
4520 Value *PtrOperand, *MaskOperand, *Src0Operand;
4521 unsigned Alignment;
4522 if (IsExpanding)
4523 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4524 else
4525 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4526
4527 SDValue Ptr = getValue(PtrOperand);
4528 SDValue Src0 = getValue(Src0Operand);
4529 SDValue Mask = getValue(MaskOperand);
4530 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4531
4532 EVT VT = Src0.getValueType();
4533 if (!Alignment)
4534 Alignment = DAG.getEVTAlignment(VT);
4535
4536 AAMDNodes AAInfo;
4537 I.getAAMetadata(AAInfo);
4538 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4539
4540 // Do not serialize masked loads of constant memory with anything.
4541 MemoryLocation ML;
4542 if (VT.isScalableVector())
4543 ML = MemoryLocation(PtrOperand);
4544 else
4545 ML = MemoryLocation(PtrOperand, LocationSize::precise(
4546 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4547 AAInfo);
4548 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4549
4550 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4551
4552 MachineMemOperand *MMO =
4553 DAG.getMachineFunction().
4554 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4555 MachineMemOperand::MOLoad,
4556 // TODO: Make MachineMemOperands aware of scalable
4557 // vectors.
4558 VT.getStoreSize().getKnownMinSize(),
4559 Alignment, AAInfo, Ranges);
4560
4561 SDValue Load =
4562 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4563 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4564 if (AddToChain)
4565 PendingLoads.push_back(Load.getValue(1));
4566 setValue(&I, Load);
4567}
4568
4569void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4570 SDLoc sdl = getCurSDLoc();
4571
4572 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4573 const Value *Ptr = I.getArgOperand(0);
4574 SDValue Src0 = getValue(I.getArgOperand(3));
4575 SDValue Mask = getValue(I.getArgOperand(2));
4576
4577 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4578 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4579 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4580 if (!Alignment)
4581 Alignment = DAG.getEVTAlignment(VT);
4582
4583 AAMDNodes AAInfo;
4584 I.getAAMetadata(AAInfo);
4585 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4586
4587 SDValue Root = DAG.getRoot();
4588 SDValue Base;
4589 SDValue Index;
4590 ISD::MemIndexType IndexType;
4591 SDValue Scale;
4592 const Value *BasePtr = Ptr;
4593 bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale,
4594 this);
4595 bool ConstantMemory = false;
4596 if (UniformBase && AA &&
4597 AA->pointsToConstantMemory(
4598 MemoryLocation(BasePtr,
4599 LocationSize::precise(
4600 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4601 AAInfo))) {
4602 // Do not serialize (non-volatile) loads of constant memory with anything.
4603 Root = DAG.getEntryNode();
4604 ConstantMemory = true;
4605 }
4606
4607 MachineMemOperand *MMO =
4608 DAG.getMachineFunction().
4609 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4610 MachineMemOperand::MOLoad,
4611 // TODO: Make MachineMemOperands aware of scalable
4612 // vectors.
4613 VT.getStoreSize().getKnownMinSize(),
4614 Alignment, AAInfo, Ranges);
4615
4616 if (!UniformBase) {
4617 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4618 Index = getValue(Ptr);
4619 IndexType = ISD::SIGNED_SCALED;
4620 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4621 }
4622 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4623 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4624 Ops, MMO, IndexType);
4625
4626 SDValue OutChain = Gather.getValue(1);
4627 if (!ConstantMemory)
4628 PendingLoads.push_back(OutChain);
4629 setValue(&I, Gather);
4630}
4631
4632void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4633 SDLoc dl = getCurSDLoc();
4634 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4635 AtomicOrdering FailureOrdering = I.getFailureOrdering();
4636 SyncScope::ID SSID = I.getSyncScopeID();
4637
4638 SDValue InChain = getRoot();
4639
4640 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4641 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4642
4643 auto Alignment = DAG.getEVTAlignment(MemVT);
4644 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4645 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4646
4647 MachineFunction &MF = DAG.getMachineFunction();
4648 MachineMemOperand *MMO =
4649 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4650 Flags, MemVT.getStoreSize(), Alignment,
4651 AAMDNodes(), nullptr, SSID, SuccessOrdering,
4652 FailureOrdering);
4653
4654 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4655 dl, MemVT, VTs, InChain,
4656 getValue(I.getPointerOperand()),
4657 getValue(I.getCompareOperand()),
4658 getValue(I.getNewValOperand()), MMO);
4659
4660 SDValue OutChain = L.getValue(2);
4661
4662 setValue(&I, L);
4663 DAG.setRoot(OutChain);
4664}
4665
4666void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4667 SDLoc dl = getCurSDLoc();
4668 ISD::NodeType NT;
4669 switch (I.getOperation()) {
4670 default: llvm_unreachable("Unknown atomicrmw operation")::llvm::llvm_unreachable_internal("Unknown atomicrmw operation"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4670)
;
4671 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4672 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4673 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4674 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4675 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4676 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4677 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4678 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4679 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4680 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4681 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4682 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4683 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4684 }
4685 AtomicOrdering Ordering = I.getOrdering();
4686 SyncScope::ID SSID = I.getSyncScopeID();
4687
4688 SDValue InChain = getRoot();
4689
4690 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4691 auto Alignment = DAG.getEVTAlignment(MemVT);
4692 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4693 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4694
4695 MachineFunction &MF = DAG.getMachineFunction();
4696 MachineMemOperand *MMO =
4697 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4698 MemVT.getStoreSize(), Alignment, AAMDNodes(),
4699 nullptr, SSID, Ordering);
4700
4701 SDValue L =
4702 DAG.getAtomic(NT, dl, MemVT, InChain,
4703 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4704 MMO);
4705
4706 SDValue OutChain = L.getValue(1);
4707
4708 setValue(&I, L);
4709 DAG.setRoot(OutChain);
4710}
4711
4712void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4713 SDLoc dl = getCurSDLoc();
4714 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4715 SDValue Ops[3];
4716 Ops[0] = getRoot();
4717 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4718 TLI.getFenceOperandTy(DAG.getDataLayout()));
4719 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4720 TLI.getFenceOperandTy(DAG.getDataLayout()));
4721 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4722}
4723
4724void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4725 SDLoc dl = getCurSDLoc();
4726 AtomicOrdering Order = I.getOrdering();
4727 SyncScope::ID SSID = I.getSyncScopeID();
4728
4729 SDValue InChain = getRoot();
4730
4731 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4732 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4733 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4734
4735 if (!TLI.supportsUnalignedAtomics() &&
4736 I.getAlignment() < MemVT.getSizeInBits() / 8)
4737 report_fatal_error("Cannot generate unaligned atomic load");
4738
4739 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4740
4741 MachineMemOperand *MMO =
4742 DAG.getMachineFunction().
4743 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4744 Flags, MemVT.getStoreSize(),
4745 I.getAlignment() ? I.getAlignment() :
4746 DAG.getEVTAlignment(MemVT),
4747 AAMDNodes(), nullptr, SSID, Order);
4748
4749 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4750
4751 SDValue Ptr = getValue(I.getPointerOperand());
4752
4753 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4754 // TODO: Once this is better exercised by tests, it should be merged with
4755 // the normal path for loads to prevent future divergence.
4756 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4757 if (MemVT != VT)
4758 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4759
4760 setValue(&I, L);
4761 SDValue OutChain = L.getValue(1);
4762 if (!I.isUnordered())
4763 DAG.setRoot(OutChain);
4764 else
4765 PendingLoads.push_back(OutChain);
4766 return;
4767 }
4768
4769 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4770 Ptr, MMO);
4771
4772 SDValue OutChain = L.getValue(1);
4773 if (MemVT != VT)
4774 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4775
4776 setValue(&I, L);
4777 DAG.setRoot(OutChain);
4778}
4779
4780void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4781 SDLoc dl = getCurSDLoc();
4782
4783 AtomicOrdering Ordering = I.getOrdering();
4784 SyncScope::ID SSID = I.getSyncScopeID();
4785
4786 SDValue InChain = getRoot();
4787
4788 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4789 EVT MemVT =
4790 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4791
4792 if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4793 report_fatal_error("Cannot generate unaligned atomic store");
4794
4795 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4796
4797 MachineFunction &MF = DAG.getMachineFunction();
4798 MachineMemOperand *MMO =
4799 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4800 MemVT.getStoreSize(), I.getAlignment(), AAMDNodes(),
4801 nullptr, SSID, Ordering);
4802
4803 SDValue Val = getValue(I.getValueOperand());
4804 if (Val.getValueType() != MemVT)
4805 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4806 SDValue Ptr = getValue(I.getPointerOperand());
4807
4808 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4809 // TODO: Once this is better exercised by tests, it should be merged with
4810 // the normal path for stores to prevent future divergence.
4811 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4812 DAG.setRoot(S);
4813 return;
4814 }
4815 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4816 Ptr, Val, MMO);
4817
4818
4819 DAG.setRoot(OutChain);
4820}
4821
4822/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4823/// node.
4824void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4825 unsigned Intrinsic) {
4826 // Ignore the callsite's attributes. A specific call site may be marked with
4827 // readnone, but the lowering code will expect the chain based on the
4828 // definition.
4829 const Function *F = I.getCalledFunction();
4830 bool HasChain = !F->doesNotAccessMemory();
4831 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4832
4833 // Build the operand list.
4834 SmallVector<SDValue, 8> Ops;
4835 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4836 if (OnlyLoad) {
4837 // We don't need to serialize loads against other loads.
4838 Ops.push_back(DAG.getRoot());
4839 } else {
4840 Ops.push_back(getRoot());
4841 }
4842 }
4843
4844 // Info is set by getTgtMemInstrinsic
4845 TargetLowering::IntrinsicInfo Info;
4846 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4847 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4848 DAG.getMachineFunction(),
4849 Intrinsic);
4850
4851 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4852 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4853 Info.opc == ISD::INTRINSIC_W_CHAIN)
4854 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4855 TLI.getPointerTy(DAG.getDataLayout())));
4856
4857 // Add all operands of the call to the operand list.
4858 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4859 const Value *Arg = I.getArgOperand(i);
4860 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4861 Ops.push_back(getValue(Arg));
4862 continue;
4863 }
4864
4865 // Use TargetConstant instead of a regular constant for immarg.
4866 EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4867 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4868 assert(CI->getBitWidth() <= 64 &&((CI->getBitWidth() <= 64 && "large intrinsic immediates not handled"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() <= 64 && \"large intrinsic immediates not handled\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4869, __PRETTY_FUNCTION__))
4869 "large intrinsic immediates not handled")((CI->getBitWidth() <= 64 && "large intrinsic immediates not handled"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() <= 64 && \"large intrinsic immediates not handled\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4869, __PRETTY_FUNCTION__))
;
4870 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4871 } else {
4872 Ops.push_back(
4873 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4874 }
4875 }
4876
4877 SmallVector<EVT, 4> ValueVTs;
4878 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4879
4880 if (HasChain)
4881 ValueVTs.push_back(MVT::Other);
4882
4883 SDVTList VTs = DAG.getVTList(ValueVTs);
4884
4885 // Create the node.
4886 SDValue Result;
4887 if (IsTgtIntrinsic) {
4888 // This is target intrinsic that touches memory
4889 AAMDNodes AAInfo;
4890 I.getAAMetadata(AAInfo);
4891 Result = DAG.getMemIntrinsicNode(
4892 Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4893 MachinePointerInfo(Info.ptrVal, Info.offset),
4894 Info.align ? Info.align->value() : 0, Info.flags, Info.size, AAInfo);
4895 } else if (!HasChain) {
4896 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4897 } else if (!I.getType()->isVoidTy()) {
4898 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4899 } else {
4900 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4901 }
4902
4903 if (HasChain) {
4904 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4905 if (OnlyLoad)
4906 PendingLoads.push_back(Chain);
4907 else
4908 DAG.setRoot(Chain);
4909 }
4910
4911 if (!I.getType()->isVoidTy()) {
4912 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4913 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4914 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4915 } else
4916 Result = lowerRangeToAssertZExt(DAG, I, Result);
4917
4918 setValue(&I, Result);
4919 }
4920}
4921
4922/// GetSignificand - Get the significand and build it into a floating-point
4923/// number with exponent of 1:
4924///
4925/// Op = (Op & 0x007fffff) | 0x3f800000;
4926///
4927/// where Op is the hexadecimal representation of floating point value.
4928static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4929 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4930 DAG.getConstant(0x007fffff, dl, MVT::i32));
4931 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4932 DAG.getConstant(0x3f800000, dl, MVT::i32));
4933 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4934}
4935
4936/// GetExponent - Get the exponent:
4937///
4938/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4939///
4940/// where Op is the hexadecimal representation of floating point value.
4941static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4942 const TargetLowering &TLI, const SDLoc &dl) {
4943 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4944 DAG.getConstant(0x7f800000, dl, MVT::i32));
4945 SDValue t1 = DAG.getNode(
4946 ISD::SRL, dl, MVT::i32, t0,
4947 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4948 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4949 DAG.getConstant(127, dl, MVT::i32));
4950 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4951}
4952
4953/// getF32Constant - Get 32-bit floating point constant.
4954static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4955 const SDLoc &dl) {
4956 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4957 MVT::f32);
4958}
4959
4960static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4961 SelectionDAG &DAG) {
4962 // TODO: What fast-math-flags should be set on the floating-point nodes?
4963
4964 // IntegerPartOfX = ((int32_t)(t0);
4965 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4966
4967 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4968 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4969 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4970
4971 // IntegerPartOfX <<= 23;
4972 IntegerPartOfX = DAG.getNode(
4973 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4974 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4975 DAG.getDataLayout())));
4976
4977 SDValue TwoToFractionalPartOfX;
4978 if (LimitFloatPrecision <= 6) {
4979 // For floating-point precision of 6:
4980 //
4981 // TwoToFractionalPartOfX =
4982 // 0.997535578f +
4983 // (0.735607626f + 0.252464424f * x) * x;
4984 //
4985 // error 0.0144103317, which is 6 bits
4986 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4987 getF32Constant(DAG, 0x3e814304, dl));
4988 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4989 getF32Constant(DAG, 0x3f3c50c8, dl));
4990 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4991 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4992 getF32Constant(DAG, 0x3f7f5e7e, dl));
4993 } else if (LimitFloatPrecision <= 12) {
4994 // For floating-point precision of 12:
4995 //
4996 // TwoToFractionalPartOfX =
4997 // 0.999892986f +
4998 // (0.696457318f +
4999 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5000 //
5001 // error 0.000107046256, which is 13 to 14 bits
5002 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5003 getF32Constant(DAG, 0x3da235e3, dl));
5004 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5005 getF32Constant(DAG, 0x3e65b8f3, dl));
5006 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5007 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5008 getF32Constant(DAG, 0x3f324b07, dl));
5009 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5010 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5011 getF32Constant(DAG, 0x3f7ff8fd, dl));
5012 } else { // LimitFloatPrecision <= 18
5013 // For floating-point precision of 18:
5014 //
5015 // TwoToFractionalPartOfX =
5016 // 0.999999982f +
5017 // (0.693148872f +
5018 // (0.240227044f +
5019 // (0.554906021e-1f +
5020 // (0.961591928e-2f +
5021 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5022 // error 2.47208000*10^(-7), which is better than 18 bits
5023 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5024 getF32Constant(DAG, 0x3924b03e, dl));
5025 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5026 getF32Constant(DAG, 0x3ab24b87, dl));
5027 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5028 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5029 getF32Constant(DAG, 0x3c1d8c17, dl));
5030 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5031 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5032 getF32Constant(DAG, 0x3d634a1d, dl));
5033 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5034 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5035 getF32Constant(DAG, 0x3e75fe14, dl));
5036 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5037 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5038 getF32Constant(DAG, 0x3f317234, dl));
5039 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5040 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5041 getF32Constant(DAG, 0x3f800000, dl));
5042 }
5043
5044 // Add the exponent into the result in integer domain.
5045 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5046 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5047 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5048}
5049
5050/// expandExp - Lower an exp intrinsic. Handles the special sequences for
5051/// limited-precision mode.
5052static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5053 const TargetLowering &TLI) {
5054 if (Op.getValueType() == MVT::f32 &&
5055 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5056
5057 // Put the exponent in the right bit position for later addition to the
5058 // final result:
5059 //
5060 // t0 = Op * log2(e)
5061
5062 // TODO: What fast-math-flags should be set here?
5063 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5064 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5065 return getLimitedPrecisionExp2(t0, dl, DAG);
5066 }
5067
5068 // No special expansion.
5069 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
5070}
5071
5072/// expandLog - Lower a log intrinsic. Handles the special sequences for
5073/// limited-precision mode.
5074static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5075 const TargetLowering &TLI) {
5076 // TODO: What fast-math-flags should be set on the floating-point nodes?
5077
5078 if (Op.getValueType() == MVT::f32 &&
5079 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5080 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5081
5082 // Scale the exponent by log(2).
5083 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5084 SDValue LogOfExponent =
5085 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5086 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5087
5088 // Get the significand and build it into a floating-point number with
5089 // exponent of 1.
5090 SDValue X = GetSignificand(DAG, Op1, dl);
5091
5092 SDValue LogOfMantissa;
5093 if (LimitFloatPrecision <= 6) {
5094 // For floating-point precision of 6:
5095 //
5096 // LogofMantissa =
5097 // -1.1609546f +
5098 // (1.4034025f - 0.23903021f * x) * x;
5099 //
5100 // error 0.0034276066, which is better than 8 bits
5101 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5102 getF32Constant(DAG, 0xbe74c456, dl));
5103 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5104 getF32Constant(DAG, 0x3fb3a2b1, dl));
5105 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5106 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5107 getF32Constant(DAG, 0x3f949a29, dl));
5108 } else if (LimitFloatPrecision <= 12) {
5109 // For floating-point precision of 12:
5110 //
5111 // LogOfMantissa =
5112 // -1.7417939f +
5113 // (2.8212026f +
5114 // (-1.4699568f +
5115 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5116 //
5117 // error 0.000061011436, which is 14 bits
5118 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5119 getF32Constant(DAG, 0xbd67b6d6, dl));
5120 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5121 getF32Constant(DAG, 0x3ee4f4b8, dl));
5122 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5123 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5124 getF32Constant(DAG, 0x3fbc278b, dl));
5125 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5126 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5127 getF32Constant(DAG, 0x40348e95, dl));
5128 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5129 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5130 getF32Constant(DAG, 0x3fdef31a, dl));
5131 } else { // LimitFloatPrecision <= 18
5132 // For floating-point precision of 18:
5133 //
5134 // LogOfMantissa =
5135 // -2.1072184f +
5136 // (4.2372794f +
5137 // (-3.7029485f +
5138 // (2.2781945f +
5139 // (-0.87823314f +
5140 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5141 //
5142 // error 0.0000023660568, which is better than 18 bits
5143 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5144 getF32Constant(DAG, 0xbc91e5ac, dl));
5145 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5146 getF32Constant(DAG, 0x3e4350aa, dl));
5147 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5148 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5149 getF32Constant(DAG, 0x3f60d3e3, dl));
5150 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5151 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5152 getF32Constant(DAG, 0x4011cdf0, dl));
5153 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5154 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5155 getF32Constant(DAG, 0x406cfd1c, dl));
5156 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5157 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5158 getF32Constant(DAG, 0x408797cb, dl));
5159 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5160 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5161 getF32Constant(DAG, 0x4006dcab, dl));
5162 }
5163
5164 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5165 }
5166
5167 // No special expansion.
5168 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
5169}
5170
5171/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5172/// limited-precision mode.
5173static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5174 const TargetLowering &TLI) {
5175 // TODO: What fast-math-flags should be set on the floating-point nodes?
5176
5177 if (Op.getValueType() == MVT::f32 &&
5178 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5179 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5180
5181 // Get the exponent.
5182 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5183
5184 // Get the significand and build it into a floating-point number with
5185 // exponent of 1.
5186 SDValue X = GetSignificand(DAG, Op1, dl);
5187
5188 // Different possible minimax approximations of significand in
5189 // floating-point for various degrees of accuracy over [1,2].
5190 SDValue Log2ofMantissa;
5191 if (LimitFloatPrecision <= 6) {
5192 // For floating-point precision of 6:
5193 //
5194 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5195 //
5196 // error 0.0049451742, which is more than 7 bits
5197 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5198 getF32Constant(DAG, 0xbeb08fe0, dl));
5199 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5200 getF32Constant(DAG, 0x40019463, dl));
5201 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5202 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5203 getF32Constant(DAG, 0x3fd6633d, dl));
5204 } else if (LimitFloatPrecision <= 12) {
5205 // For floating-point precision of 12:
5206 //
5207 // Log2ofMantissa =
5208 // -2.51285454f +
5209 // (4.07009056f +
5210 // (-2.12067489f +
5211 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5212 //
5213 // error 0.0000876136000, which is better than 13 bits
5214 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5215 getF32Constant(DAG, 0xbda7262e, dl));
5216 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5217 getF32Constant(DAG, 0x3f25280b, dl));
5218 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5219 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5220 getF32Constant(DAG, 0x4007b923, dl));
5221 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5222 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5223 getF32Constant(DAG, 0x40823e2f, dl));
5224 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5225 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5226 getF32Constant(DAG, 0x4020d29c, dl));
5227 } else { // LimitFloatPrecision <= 18
5228 // For floating-point precision of 18:
5229 //
5230 // Log2ofMantissa =
5231 // -3.0400495f +
5232 // (6.1129976f +
5233 // (-5.3420409f +
5234 // (3.2865683f +
5235 // (-1.2669343f +
5236 // (0.27515199f -
5237 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5238 //
5239 // error 0.0000018516, which is better than 18 bits
5240 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5241 getF32Constant(DAG, 0xbcd2769e, dl));
5242 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5243 getF32Constant(DAG, 0x3e8ce0b9, dl));
5244 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5245 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5246 getF32Constant(DAG, 0x3fa22ae7, dl));
5247 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5248 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5249 getF32Constant(DAG, 0x40525723, dl));
5250 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5251 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5252 getF32Constant(DAG, 0x40aaf200, dl));
5253 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5254 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5255 getF32Constant(DAG, 0x40c39dad, dl));
5256 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5257 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5258 getF32Constant(DAG, 0x4042902c, dl));
5259 }
5260
5261 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5262 }
5263
5264 // No special expansion.
5265 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5266}
5267
5268/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5269/// limited-precision mode.
5270static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5271 const TargetLowering &TLI) {
5272 // TODO: What fast-math-flags should be set on the floating-point nodes?
5273
5274 if (Op.getValueType() == MVT::f32 &&
5275 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5276 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5277
5278 // Scale the exponent by log10(2) [0.30102999f].
5279 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5280 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5281 getF32Constant(DAG, 0x3e9a209a, dl));
5282
5283 // Get the significand and build it into a floating-point number with
5284 // exponent of 1.
5285 SDValue X = GetSignificand(DAG, Op1, dl);
5286
5287 SDValue Log10ofMantissa;
5288 if (LimitFloatPrecision <= 6) {
5289 // For floating-point precision of 6:
5290 //
5291 // Log10ofMantissa =
5292 // -0.50419619f +
5293 // (0.60948995f - 0.10380950f * x) * x;
5294 //
5295 // error 0.0014886165, which is 6 bits
5296 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5297 getF32Constant(DAG, 0xbdd49a13, dl));
5298 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5299 getF32Constant(DAG, 0x3f1c0789, dl));
5300 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5301 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5302 getF32Constant(DAG, 0x3f011300, dl));
5303 } else if (LimitFloatPrecision <= 12) {
5304 // For floating-point precision of 12:
5305 //
5306 // Log10ofMantissa =
5307 // -0.64831180f +
5308 // (0.91751397f +
5309 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5310 //
5311 // error 0.00019228036, which is better than 12 bits
5312 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5313 getF32Constant(DAG, 0x3d431f31, dl));
5314 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5315 getF32Constant(DAG, 0x3ea21fb2, dl));
5316 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5317 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5318 getF32Constant(DAG, 0x3f6ae232, dl));
5319 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5320 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5321 getF32Constant(DAG, 0x3f25f7c3, dl));
5322 } else { // LimitFloatPrecision <= 18
5323 // For floating-point precision of 18:
5324 //
5325 // Log10ofMantissa =
5326 // -0.84299375f +
5327 // (1.5327582f +
5328 // (-1.0688956f +
5329 // (0.49102474f +
5330 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5331 //
5332 // error 0.0000037995730, which is better than 18 bits
5333 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5334 getF32Constant(DAG, 0x3c5d51ce, dl));
5335 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5336 getF32Constant(DAG, 0x3e00685a, dl));
5337 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5338 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5339 getF32Constant(DAG, 0x3efb6798, dl));
5340 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5341 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5342 getF32Constant(DAG, 0x3f88d192, dl));
5343 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5344 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5345 getF32Constant(DAG, 0x3fc4316c, dl));
5346 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5347 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5348 getF32Constant(DAG, 0x3f57ce70, dl));
5349 }
5350
5351 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5352 }
5353
5354 // No special expansion.
5355 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5356}
5357
5358/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5359/// limited-precision mode.
5360static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5361 const TargetLowering &TLI) {
5362 if (Op.getValueType() == MVT::f32 &&
5363 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5364 return getLimitedPrecisionExp2(Op, dl, DAG);
5365
5366 // No special expansion.
5367 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5368}
5369
5370/// visitPow - Lower a pow intrinsic. Handles the special sequences for
5371/// limited-precision mode with x == 10.0f.
5372static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5373 SelectionDAG &DAG, const TargetLowering &TLI) {
5374 bool IsExp10 = false;
5375 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5376 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5377 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5378 APFloat Ten(10.0f);
5379 IsExp10 = LHSC->isExactlyValue(Ten);
5380 }
5381 }
5382
5383 // TODO: What fast-math-flags should be set on the FMUL node?
5384 if (IsExp10) {
5385 // Put the exponent in the right bit position for later addition to the
5386 // final result:
5387 //
5388 // #define LOG2OF10 3.3219281f
5389 // t0 = Op * LOG2OF10;
5390 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5391 getF32Constant(DAG, 0x40549a78, dl));
5392 return getLimitedPrecisionExp2(t0, dl, DAG);
5393 }
5394
5395 // No special expansion.
5396 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5397}
5398
5399/// ExpandPowI - Expand a llvm.powi intrinsic.
5400static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5401 SelectionDAG &DAG) {
5402 // If RHS is a constant, we can expand this out to a multiplication tree,
5403 // otherwise we end up lowering to a call to __powidf2 (for example). When
5404 // optimizing for size, we only want to do this if the expansion would produce
5405 // a small number of multiplies, otherwise we do the full expansion.
5406 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5407 // Get the exponent as a positive value.
5408 unsigned Val = RHSC->getSExtValue();
5409 if ((int)Val < 0) Val = -Val;
5410
5411 // powi(x, 0) -> 1.0
5412 if (Val == 0)
5413 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5414
5415 bool OptForSize = DAG.shouldOptForSize();
5416 if (!OptForSize ||
5417 // If optimizing for size, don't insert too many multiplies.
5418 // This inserts up to 5 multiplies.
5419 countPopulation(Val) + Log2_32(Val) < 7) {
5420 // We use the simple binary decomposition method to generate the multiply
5421 // sequence. There are more optimal ways to do this (for example,
5422 // powi(x,15) generates one more multiply than it should), but this has
5423 // the benefit of being both really simple and much better than a libcall.
5424 SDValue Res; // Logically starts equal to 1.0
5425 SDValue CurSquare = LHS;
5426 // TODO: Intrinsics should have fast-math-flags that propagate to these
5427 // nodes.
5428 while (Val) {
5429 if (Val & 1) {
5430 if (Res.getNode())
5431 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5432 else
5433 Res = CurSquare; // 1.0*CurSquare.
5434 }
5435
5436 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5437 CurSquare, CurSquare);
5438 Val >>= 1;
5439 }
5440
5441 // If the original was negative, invert the result, producing 1/(x*x*x).
5442 if (RHSC->getSExtValue() < 0)
5443 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5444 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5445 return Res;
5446 }
5447 }
5448
5449 // Otherwise, expand to a libcall.
5450 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5451}
5452
5453static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5454 SDValue LHS, SDValue RHS, SDValue Scale,
5455 SelectionDAG &DAG, const TargetLowering &TLI) {
5456 EVT VT = LHS.getValueType();
5457 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5458 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5459 LLVMContext &Ctx = *DAG.getContext();
5460
5461 // If the type is legal but the operation isn't, this node might survive all
5462 // the way to operation legalization. If we end up there and we do not have
5463 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5464 // node.
5465
5466 // Coax the legalizer into expanding the node during type legalization instead
5467 // by bumping the size by one bit. This will force it to Promote, enabling the
5468 // early expansion and avoiding the need to expand later.
5469
5470 // We don't have to do this if Scale is 0; that can always be expanded, unless
5471 // it's a saturating signed operation. Those can experience true integer
5472 // division overflow, a case which we must avoid.
5473
5474 // FIXME: We wouldn't have to do this (or any of the early
5475 // expansion/promotion) if it was possible to expand a libcall of an
5476 // illegal type during operation legalization. But it's not, so things
5477 // get a bit hacky.
5478 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5479 if ((ScaleInt > 0 || (Saturating && Signed)) &&
5480 (TLI.isTypeLegal(VT) ||
5481 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5482 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5483 Opcode, VT, ScaleInt);
5484 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5485 EVT PromVT;
5486 if (VT.isScalarInteger())
5487 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5488 else if (VT.isVector()) {
5489 PromVT = VT.getVectorElementType();
5490 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5491 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5492 } else
5493 llvm_unreachable("Wrong VT for DIVFIX?")::llvm::llvm_unreachable_internal("Wrong VT for DIVFIX?", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5493)
;
5494 if (Signed) {
5495 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5496 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5497 } else {
5498 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5499 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5500 }
5501 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5502 // For saturating operations, we need to shift up the LHS to get the
5503 // proper saturation width, and then shift down again afterwards.
5504 if (Saturating)
5505 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5506 DAG.getConstant(1, DL, ShiftTy));
5507 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5508 if (Saturating)
5509 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5510 DAG.getConstant(1, DL, ShiftTy));
5511 return DAG.getZExtOrTrunc(Res, DL, VT);
5512 }
5513 }
5514
5515 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5516}
5517
5518// getUnderlyingArgRegs - Find underlying registers used for a truncated,
5519// bitcasted, or split argument. Returns a list of <Register, size in bits>
5520static void
5521getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
5522 const SDValue &N) {
5523 switch (N.getOpcode()) {
5524 case ISD::CopyFromReg: {
5525 SDValue Op = N.getOperand(1);
5526 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5527 Op.getValueType().getSizeInBits());
5528 return;
5529 }
5530 case ISD::BITCAST:
5531 case ISD::AssertZext:
5532 case ISD::AssertSext:
5533 case ISD::TRUNCATE:
5534 getUnderlyingArgRegs(Regs, N.getOperand(0));
5535 return;
5536 case ISD::BUILD_PAIR:
5537 case ISD::BUILD_VECTOR:
5538 case ISD::CONCAT_VECTORS:
5539 for (SDValue Op : N->op_values())
5540 getUnderlyingArgRegs(Regs, Op);
5541 return;
5542 default:
5543 return;
5544 }
5545}
5546
5547/// If the DbgValueInst is a dbg_value of a function argument, create the
5548/// corresponding DBG_VALUE machine instruction for it now. At the end of
5549/// instruction selection, they will be inserted to the entry BB.
5550bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5551 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5552 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5553 const Argument *Arg = dyn_cast<Argument>(V);
5554 if (!Arg)
5555 return false;
5556
5557 if (!IsDbgDeclare) {
5558 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5559 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5560 // the entry block.
5561 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5562 if (!IsInEntryBlock)
5563 return false;
5564
5565 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5566 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5567 // variable that also is a param.
5568 //
5569 // Although, if we are at the top of the entry block already, we can still
5570 // emit using ArgDbgValue. This might catch some situations when the
5571 // dbg.value refers to an argument that isn't used in the entry block, so
5572 // any CopyToReg node would be optimized out and the only way to express
5573 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5574 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5575 // we should only emit as ArgDbgValue if the Variable is an argument to the
5576 // current function, and the dbg.value intrinsic is found in the entry
5577 // block.
5578 bool VariableIsFunctionInputArg = Variable->isParameter() &&
5579 !DL->getInlinedAt();
5580 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5581 if (!IsInPrologue && !VariableIsFunctionInputArg)
5582 return false;
5583
5584 // Here we assume that a function argument on IR level only can be used to
5585 // describe one input parameter on source level. If we for example have
5586 // source code like this
5587 //
5588 // struct A { long x, y; };
5589 // void foo(struct A a, long b) {
5590 // ...
5591 // b = a.x;
5592 // ...
5593 // }
5594 //
5595 // and IR like this
5596 //
5597 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5598 // entry:
5599 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5600 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5601 // call void @llvm.dbg.value(metadata i32 %b, "b",
5602 // ...
5603 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5604 // ...
5605 //
5606 // then the last dbg.value is describing a parameter "b" using a value that
5607 // is an argument. But since we already has used %a1 to describe a parameter
5608 // we should not handle that last dbg.value here (that would result in an
5609 // incorrect hoisting of the DBG_VALUE to the function entry).
5610 // Notice that we allow one dbg.value per IR level argument, to accommodate
5611 // for the situation with fragments above.
5612 if (VariableIsFunctionInputArg) {
5613 unsigned ArgNo = Arg->getArgNo();
5614 if (ArgNo >= FuncInfo.DescribedArgs.size())
5615 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5616 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5617 return false;
5618 FuncInfo.DescribedArgs.set(ArgNo);
5619 }
5620 }
5621
5622 MachineFunction &MF = DAG.getMachineFunction();
5623 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5624
5625 bool IsIndirect = false;
5626 Optional<MachineOperand> Op;
5627 // Some arguments' frame index is recorded during argument lowering.
5628 int FI = FuncInfo.getArgumentFrameIndex(Arg);
5629 if (FI != std::numeric_limits<int>::max())
5630 Op = MachineOperand::CreateFI(FI);
5631
5632 SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes;
5633 if (!Op && N.getNode()) {
5634 getUnderlyingArgRegs(ArgRegsAndSizes, N);
5635 Register Reg;
5636 if (ArgRegsAndSizes.size() == 1)
5637 Reg = ArgRegsAndSizes.front().first;
5638
5639 if (Reg && Reg.isVirtual()) {
5640 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5641 Register PR = RegInfo.getLiveInPhysReg(Reg);
5642 if (PR)
5643 Reg = PR;
5644 }
5645 if (Reg) {
5646 Op = MachineOperand::CreateReg(Reg, false);
5647 IsIndirect = IsDbgDeclare;
5648 }
5649 }
5650
5651 if (!Op && N.getNode()) {
5652 // Check if frame index is available.
5653 SDValue LCandidate = peekThroughBitcasts(N);
5654 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5655 if (FrameIndexSDNode *FINode =
5656 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5657 Op = MachineOperand::CreateFI(FINode->getIndex());
5658 }
5659
5660 if (!Op) {
5661 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5662 auto splitMultiRegDbgValue
5663 = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) {
5664 unsigned Offset = 0;
5665 for (auto RegAndSize : SplitRegs) {
5666 // If the expression is already a fragment, the current register
5667 // offset+size might extend beyond the fragment. In this case, only
5668 // the register bits that are inside the fragment are relevant.
5669 int RegFragmentSizeInBits = RegAndSize.second;
5670 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5671 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5672 // The register is entirely outside the expression fragment,
5673 // so is irrelevant for debug info.
5674 if (Offset >= ExprFragmentSizeInBits)
5675 break;
5676 // The register is partially outside the expression fragment, only
5677 // the low bits within the fragment are relevant for debug info.
5678 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5679 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5680 }
5681 }
5682
5683 auto FragmentExpr = DIExpression::createFragmentExpression(
5684 Expr, Offset, RegFragmentSizeInBits);
5685 Offset += RegAndSize.second;
5686 // If a valid fragment expression cannot be created, the variable's
5687 // correct value cannot be determined and so it is set as Undef.
5688 if (!FragmentExpr) {
5689 SDDbgValue *SDV = DAG.getConstantDbgValue(
5690 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5691 DAG.AddDbgValue(SDV, nullptr, false);
5692 continue;
5693 }
5694 assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?")((!IsDbgDeclare && "DbgDeclare operand is not in memory?"
) ? static_cast<void> (0) : __assert_fail ("!IsDbgDeclare && \"DbgDeclare operand is not in memory?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5694, __PRETTY_FUNCTION__))
;
5695 FuncInfo.ArgDbgValues.push_back(
5696 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5697 RegAndSize.first, Variable, *FragmentExpr));
5698 }
5699 };
5700
5701 // Check if ValueMap has reg number.
5702 DenseMap<const Value *, unsigned>::const_iterator
5703 VMI = FuncInfo.ValueMap.find(V);
5704 if (VMI != FuncInfo.ValueMap.end()) {
5705 const auto &TLI = DAG.getTargetLoweringInfo();
5706 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5707 V->getType(), getABIRegCopyCC(V));
5708 if (RFV.occupiesMultipleRegs()) {
5709 splitMultiRegDbgValue(RFV.getRegsAndSizes());
5710 return true;
5711 }
5712
5713 Op = MachineOperand::CreateReg(VMI->second, false);
5714 IsIndirect = IsDbgDeclare;
5715 } else if (ArgRegsAndSizes.size() > 1) {
5716 // This was split due to the calling convention, and no virtual register
5717 // mapping exists for the value.
5718 splitMultiRegDbgValue(ArgRegsAndSizes);
5719 return true;
5720 }
5721 }
5722
5723 if (!Op)
5724 return false;
5725
5726 assert(Variable->isValidLocationForIntrinsic(DL) &&((Variable->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5727, __PRETTY_FUNCTION__))
5727 "Expected inlined-at fields to agree")((Variable->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5727, __PRETTY_FUNCTION__))
;
5728 IsIndirect = (Op->isReg()) ? IsIndirect : true;
5729 FuncInfo.ArgDbgValues.push_back(
5730 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5731 *Op, Variable, Expr));
5732
5733 return true;
5734}
5735
5736/// Return the appropriate SDDbgValue based on N.
5737SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5738 DILocalVariable *Variable,
5739 DIExpression *Expr,
5740 const DebugLoc &dl,
5741 unsigned DbgSDNodeOrder) {
5742 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5743 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5744 // stack slot locations.
5745 //
5746 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5747 // debug values here after optimization:
5748 //
5749 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5750 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5751 //
5752 // Both describe the direct values of their associated variables.
5753 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5754 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5755 }
5756 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5757 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5758}
5759
5760static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5761 switch (Intrinsic) {
5762 case Intrinsic::smul_fix:
5763 return ISD::SMULFIX;
5764 case Intrinsic::umul_fix:
5765 return ISD::UMULFIX;
5766 case Intrinsic::smul_fix_sat:
5767 return ISD::SMULFIXSAT;
5768 case Intrinsic::umul_fix_sat:
5769 return ISD::UMULFIXSAT;
5770 case Intrinsic::sdiv_fix:
5771 return ISD::SDIVFIX;
5772 case Intrinsic::udiv_fix:
5773 return ISD::UDIVFIX;
5774 case Intrinsic::sdiv_fix_sat:
5775 return ISD::SDIVFIXSAT;
5776 case Intrinsic::udiv_fix_sat:
5777 return ISD::UDIVFIXSAT;
5778 default:
5779 llvm_unreachable("Unhandled fixed point intrinsic")::llvm::llvm_unreachable_internal("Unhandled fixed point intrinsic"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5779)
;
5780 }
5781}
5782
5783void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5784 const char *FunctionName) {
5785 assert(FunctionName && "FunctionName must not be nullptr")((FunctionName && "FunctionName must not be nullptr")
? static_cast<void> (0) : __assert_fail ("FunctionName && \"FunctionName must not be nullptr\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5785, __PRETTY_FUNCTION__))
;
5786 SDValue Callee = DAG.getExternalSymbol(
5787 FunctionName,
5788 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5789 LowerCallTo(&I, Callee, I.isTailCall());
5790}
5791
5792/// Lower the call to the specified intrinsic function.
5793void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5794 unsigned Intrinsic) {
5795 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5796 SDLoc sdl = getCurSDLoc();
5797 DebugLoc dl = getCurDebugLoc();
5798 SDValue Res;
5799
5800 switch (Intrinsic) {
5801 default:
5802 // By default, turn this into a target intrinsic node.
5803 visitTargetIntrinsic(I, Intrinsic);
5804 return;
5805 case Intrinsic::vscale: {
5806 match(&I, m_VScale(DAG.getDataLayout()));
5807 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5808 setValue(&I,
5809 DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5810 return;
5811 }
5812 case Intrinsic::vastart: visitVAStart(I); return;
5813 case Intrinsic::vaend: visitVAEnd(I); return;
5814 case Intrinsic::vacopy: visitVACopy(I); return;
5815 case Intrinsic::returnaddress:
5816 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5817 TLI.getPointerTy(DAG.getDataLayout()),
5818 getValue(I.getArgOperand(0))));
5819 return;
5820 case Intrinsic::addressofreturnaddress:
5821 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5822 TLI.getPointerTy(DAG.getDataLayout())));
5823 return;
5824 case Intrinsic::sponentry:
5825 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5826 TLI.getFrameIndexTy(DAG.getDataLayout())));
5827 return;
5828 case Intrinsic::frameaddress:
5829 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5830 TLI.getFrameIndexTy(DAG.getDataLayout()),
5831 getValue(I.getArgOperand(0))));
5832 return;
5833 case Intrinsic::read_register: {
5834 Value *Reg = I.getArgOperand(0);
5835 SDValue Chain = getRoot();
5836 SDValue RegName =
5837 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5838 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5839 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5840 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5841 setValue(&I, Res);
5842 DAG.setRoot(Res.getValue(1));
5843 return;
5844 }
5845 case Intrinsic::write_register: {
5846 Value *Reg = I.getArgOperand(0);
5847 Value *RegValue = I.getArgOperand(1);
5848 SDValue Chain = getRoot();
5849 SDValue RegName =
5850 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5851 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5852 RegName, getValue(RegValue)));
5853 return;
5854 }
5855 case Intrinsic::memcpy: {
5856 const auto &MCI = cast<MemCpyInst>(I);
5857 SDValue Op1 = getValue(I.getArgOperand(0));
5858 SDValue Op2 = getValue(I.getArgOperand(1));
5859 SDValue Op3 = getValue(I.getArgOperand(2));
5860 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5861 Align DstAlign = MCI.getDestAlign().valueOrOne();
5862 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5863 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5864 bool isVol = MCI.isVolatile();
5865 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5866 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5867 // node.
5868 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5869 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5870 /* AlwaysInline */ false, isTC,
5871 MachinePointerInfo(I.getArgOperand(0)),
5872 MachinePointerInfo(I.getArgOperand(1)));
5873 updateDAGForMaybeTailCall(MC);
5874 return;
5875 }
5876 case Intrinsic::memcpy_inline: {
5877 const auto &MCI = cast<MemCpyInlineInst>(I);
5878 SDValue Dst = getValue(I.getArgOperand(0));
5879 SDValue Src = getValue(I.getArgOperand(1));
5880 SDValue Size = getValue(I.getArgOperand(2));
5881 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size")((isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantSDNode>(Size) && \"memcpy_inline needs constant size\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5881, __PRETTY_FUNCTION__))
;
5882 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5883 Align DstAlign = MCI.getDestAlign().valueOrOne();
5884 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5885 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5886 bool isVol = MCI.isVolatile();
5887 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5888 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5889 // node.
5890 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5891 /* AlwaysInline */ true