Bug Summary

File:llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Warning:line 6175, column 28
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SelectionDAGBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Triple.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/BlockFrequencyInfo.h"
28#include "llvm/Analysis/BranchProbabilityInfo.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/Loads.h"
32#include "llvm/Analysis/MemoryLocation.h"
33#include "llvm/Analysis/ProfileSummaryInfo.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/Analysis/VectorUtils.h"
37#include "llvm/CodeGen/Analysis.h"
38#include "llvm/CodeGen/FunctionLoweringInfo.h"
39#include "llvm/CodeGen/GCMetadata.h"
40#include "llvm/CodeGen/MachineBasicBlock.h"
41#include "llvm/CodeGen/MachineFrameInfo.h"
42#include "llvm/CodeGen/MachineFunction.h"
43#include "llvm/CodeGen/MachineInstr.h"
44#include "llvm/CodeGen/MachineInstrBuilder.h"
45#include "llvm/CodeGen/MachineJumpTableInfo.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/RuntimeLibcalls.h"
51#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
53#include "llvm/CodeGen/StackMaps.h"
54#include "llvm/CodeGen/SwiftErrorValueTracking.h"
55#include "llvm/CodeGen/TargetFrameLowering.h"
56#include "llvm/CodeGen/TargetInstrInfo.h"
57#include "llvm/CodeGen/TargetOpcodes.h"
58#include "llvm/CodeGen/TargetRegisterInfo.h"
59#include "llvm/CodeGen/TargetSubtargetInfo.h"
60#include "llvm/CodeGen/WinEHFuncInfo.h"
61#include "llvm/IR/Argument.h"
62#include "llvm/IR/Attributes.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/CallingConv.h"
66#include "llvm/IR/Constant.h"
67#include "llvm/IR/ConstantRange.h"
68#include "llvm/IR/Constants.h"
69#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/DebugInfoMetadata.h"
71#include "llvm/IR/DerivedTypes.h"
72#include "llvm/IR/DiagnosticInfo.h"
73#include "llvm/IR/Function.h"
74#include "llvm/IR/GetElementPtrTypeIterator.h"
75#include "llvm/IR/InlineAsm.h"
76#include "llvm/IR/InstrTypes.h"
77#include "llvm/IR/Instructions.h"
78#include "llvm/IR/IntrinsicInst.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/IntrinsicsAArch64.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
82#include "llvm/IR/LLVMContext.h"
83#include "llvm/IR/Metadata.h"
84#include "llvm/IR/Module.h"
85#include "llvm/IR/Operator.h"
86#include "llvm/IR/PatternMatch.h"
87#include "llvm/IR/Statepoint.h"
88#include "llvm/IR/Type.h"
89#include "llvm/IR/User.h"
90#include "llvm/IR/Value.h"
91#include "llvm/MC/MCContext.h"
92#include "llvm/MC/MCSymbol.h"
93#include "llvm/Support/AtomicOrdering.h"
94#include "llvm/Support/Casting.h"
95#include "llvm/Support/CommandLine.h"
96#include "llvm/Support/Compiler.h"
97#include "llvm/Support/Debug.h"
98#include "llvm/Support/MathExtras.h"
99#include "llvm/Support/raw_ostream.h"
100#include "llvm/Target/TargetIntrinsicInfo.h"
101#include "llvm/Target/TargetMachine.h"
102#include "llvm/Target/TargetOptions.h"
103#include "llvm/Transforms/Utils/Local.h"
104#include <cstddef>
105#include <cstring>
106#include <iterator>
107#include <limits>
108#include <numeric>
109#include <tuple>
110
111using namespace llvm;
112using namespace PatternMatch;
113using namespace SwitchCG;
114
115#define DEBUG_TYPE"isel" "isel"
116
117/// LimitFloatPrecision - Generate low-precision inline sequences for
118/// some float libcalls (6, 8 or 12 bits).
119static unsigned LimitFloatPrecision;
120
121static cl::opt<bool>
122 InsertAssertAlign("insert-assert-align", cl::init(true),
123 cl::desc("Insert the experimental `assertalign` node."),
124 cl::ReallyHidden);
125
126static cl::opt<unsigned, true>
127 LimitFPPrecision("limit-float-precision",
128 cl::desc("Generate low-precision inline sequences "
129 "for some float libcalls"),
130 cl::location(LimitFloatPrecision), cl::Hidden,
131 cl::init(0));
132
133static cl::opt<unsigned> SwitchPeelThreshold(
134 "switch-peel-threshold", cl::Hidden, cl::init(66),
135 cl::desc("Set the case probability threshold for peeling the case from a "
136 "switch statement. A value greater than 100 will void this "
137 "optimization"));
138
139// Limit the width of DAG chains. This is important in general to prevent
140// DAG-based analysis from blowing up. For example, alias analysis and
141// load clustering may not complete in reasonable time. It is difficult to
142// recognize and avoid this situation within each individual analysis, and
143// future analyses are likely to have the same behavior. Limiting DAG width is
144// the safe approach and will be especially important with global DAGs.
145//
146// MaxParallelChains default is arbitrarily high to avoid affecting
147// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
148// sequence over this should have been converted to llvm.memcpy by the
149// frontend. It is easy to induce this behavior with .ll code such as:
150// %buffer = alloca [4096 x i8]
151// %data = load [4096 x i8]* %argPtr
152// store [4096 x i8] %data, [4096 x i8]* %buffer
153static const unsigned MaxParallelChains = 64;
154
155static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
156 const SDValue *Parts, unsigned NumParts,
157 MVT PartVT, EVT ValueVT, const Value *V,
158 Optional<CallingConv::ID> CC);
159
160/// getCopyFromParts - Create a value that contains the specified legal parts
161/// combined into the value they represent. If the parts combine to a type
162/// larger than ValueVT then AssertOp can be used to specify whether the extra
163/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
164/// (ISD::AssertSext).
165static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
166 const SDValue *Parts, unsigned NumParts,
167 MVT PartVT, EVT ValueVT, const Value *V,
168 Optional<CallingConv::ID> CC = None,
169 Optional<ISD::NodeType> AssertOp = None) {
170 // Let the target assemble the parts if it wants to
171 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
172 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
173 PartVT, ValueVT, CC))
174 return Val;
175
176 if (ValueVT.isVector())
177 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
178 CC);
179
180 assert(NumParts > 0 && "No parts to assemble!")(static_cast<void> (0));
181 SDValue Val = Parts[0];
182
183 if (NumParts > 1) {
184 // Assemble the value from multiple parts.
185 if (ValueVT.isInteger()) {
186 unsigned PartBits = PartVT.getSizeInBits();
187 unsigned ValueBits = ValueVT.getSizeInBits();
188
189 // Assemble the power of 2 part.
190 unsigned RoundParts =
191 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
194 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
195 SDValue Lo, Hi;
196
197 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
198
199 if (RoundParts > 2) {
200 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
201 PartVT, HalfVT, V);
202 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
203 RoundParts / 2, PartVT, HalfVT, V);
204 } else {
205 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
206 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
207 }
208
209 if (DAG.getDataLayout().isBigEndian())
210 std::swap(Lo, Hi);
211
212 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
213
214 if (RoundParts < NumParts) {
215 // Assemble the trailing non-power-of-2 part.
216 unsigned OddParts = NumParts - RoundParts;
217 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
218 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
219 OddVT, V, CC);
220
221 // Combine the round and odd parts.
222 Lo = Val;
223 if (DAG.getDataLayout().isBigEndian())
224 std::swap(Lo, Hi);
225 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
226 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
227 Hi =
228 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
229 DAG.getConstant(Lo.getValueSizeInBits(), DL,
230 TLI.getPointerTy(DAG.getDataLayout())));
231 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
232 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
233 }
234 } else if (PartVT.isFloatingPoint()) {
235 // FP split into multiple FP parts (for ppcf128)
236 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&(static_cast<void> (0))
237 "Unexpected split")(static_cast<void> (0));
238 SDValue Lo, Hi;
239 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
240 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
241 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
242 std::swap(Lo, Hi);
243 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
244 } else {
245 // FP split into integer parts (soft fp)
246 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&(static_cast<void> (0))
247 !PartVT.isVector() && "Unexpected split")(static_cast<void> (0));
248 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
249 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
250 }
251 }
252
253 // There is now one part, held in Val. Correct it to match ValueVT.
254 // PartEVT is the type of the register class that holds the value.
255 // ValueVT is the type of the inline asm operation.
256 EVT PartEVT = Val.getValueType();
257
258 if (PartEVT == ValueVT)
259 return Val;
260
261 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
262 ValueVT.bitsLT(PartEVT)) {
263 // For an FP value in an integer part, we need to truncate to the right
264 // width first.
265 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
266 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
267 }
268
269 // Handle types that have the same size.
270 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
271 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
272
273 // Handle types with different sizes.
274 if (PartEVT.isInteger() && ValueVT.isInteger()) {
275 if (ValueVT.bitsLT(PartEVT)) {
276 // For a truncate, see if we have any information to
277 // indicate whether the truncated bits will always be
278 // zero or sign-extension.
279 if (AssertOp.hasValue())
280 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
281 DAG.getValueType(ValueVT));
282 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
283 }
284 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
285 }
286
287 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
288 // FP_ROUND's are always exact here.
289 if (ValueVT.bitsLT(Val.getValueType()))
290 return DAG.getNode(
291 ISD::FP_ROUND, DL, ValueVT, Val,
292 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
293
294 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
295 }
296
297 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
298 // then truncating.
299 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
300 ValueVT.bitsLT(PartEVT)) {
301 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
302 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
303 }
304
305 report_fatal_error("Unknown mismatch in getCopyFromParts!");
306}
307
308static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
309 const Twine &ErrMsg) {
310 const Instruction *I = dyn_cast_or_null<Instruction>(V);
311 if (!V)
312 return Ctx.emitError(ErrMsg);
313
314 const char *AsmError = ", possible invalid constraint for vector type";
315 if (const CallInst *CI = dyn_cast<CallInst>(I))
316 if (CI->isInlineAsm())
317 return Ctx.emitError(I, ErrMsg + AsmError);
318
319 return Ctx.emitError(I, ErrMsg);
320}
321
322/// getCopyFromPartsVector - Create a value that contains the specified legal
323/// parts combined into the value they represent. If the parts combine to a
324/// type larger than ValueVT then AssertOp can be used to specify whether the
325/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
326/// ValueVT (ISD::AssertSext).
327static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
328 const SDValue *Parts, unsigned NumParts,
329 MVT PartVT, EVT ValueVT, const Value *V,
330 Optional<CallingConv::ID> CallConv) {
331 assert(ValueVT.isVector() && "Not a vector value")(static_cast<void> (0));
332 assert(NumParts > 0 && "No parts to assemble!")(static_cast<void> (0));
333 const bool IsABIRegCopy = CallConv.hasValue();
334
335 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
336 SDValue Val = Parts[0];
337
338 // Handle a multi-element vector.
339 if (NumParts > 1) {
340 EVT IntermediateVT;
341 MVT RegisterVT;
342 unsigned NumIntermediates;
343 unsigned NumRegs;
344
345 if (IsABIRegCopy) {
346 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
347 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
348 NumIntermediates, RegisterVT);
349 } else {
350 NumRegs =
351 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
352 NumIntermediates, RegisterVT);
353 }
354
355 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")(static_cast<void> (0));
356 NumParts = NumRegs; // Silence a compiler warning.
357 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")(static_cast<void> (0));
358 assert(RegisterVT.getSizeInBits() ==(static_cast<void> (0))
359 Parts[0].getSimpleValueType().getSizeInBits() &&(static_cast<void> (0))
360 "Part type sizes don't match!")(static_cast<void> (0));
361
362 // Assemble the parts into intermediate operands.
363 SmallVector<SDValue, 8> Ops(NumIntermediates);
364 if (NumIntermediates == NumParts) {
365 // If the register was not expanded, truncate or copy the value,
366 // as appropriate.
367 for (unsigned i = 0; i != NumParts; ++i)
368 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
369 PartVT, IntermediateVT, V, CallConv);
370 } else if (NumParts > 0) {
371 // If the intermediate type was expanded, build the intermediate
372 // operands from the parts.
373 assert(NumParts % NumIntermediates == 0 &&(static_cast<void> (0))
374 "Must expand into a divisible number of parts!")(static_cast<void> (0));
375 unsigned Factor = NumParts / NumIntermediates;
376 for (unsigned i = 0; i != NumIntermediates; ++i)
377 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
378 PartVT, IntermediateVT, V, CallConv);
379 }
380
381 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
382 // intermediate operands.
383 EVT BuiltVectorTy =
384 IntermediateVT.isVector()
385 ? EVT::getVectorVT(
386 *DAG.getContext(), IntermediateVT.getScalarType(),
387 IntermediateVT.getVectorElementCount() * NumParts)
388 : EVT::getVectorVT(*DAG.getContext(),
389 IntermediateVT.getScalarType(),
390 NumIntermediates);
391 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
392 : ISD::BUILD_VECTOR,
393 DL, BuiltVectorTy, Ops);
394 }
395
396 // There is now one part, held in Val. Correct it to match ValueVT.
397 EVT PartEVT = Val.getValueType();
398
399 if (PartEVT == ValueVT)
400 return Val;
401
402 if (PartEVT.isVector()) {
403 // Vector/Vector bitcast.
404 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
405 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
406
407 // If the element type of the source/dest vectors are the same, but the
408 // parts vector has more elements than the value vector, then we have a
409 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
410 // elements we want.
411 if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
412 assert((PartEVT.getVectorElementCount().getKnownMinValue() >(static_cast<void> (0))
413 ValueVT.getVectorElementCount().getKnownMinValue()) &&(static_cast<void> (0))
414 (PartEVT.getVectorElementCount().isScalable() ==(static_cast<void> (0))
415 ValueVT.getVectorElementCount().isScalable()) &&(static_cast<void> (0))
416 "Cannot narrow, it would be a lossy transformation")(static_cast<void> (0));
417 PartEVT =
418 EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
419 ValueVT.getVectorElementCount());
420 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
421 DAG.getVectorIdxConstant(0, DL));
422 if (PartEVT == ValueVT)
423 return Val;
424 }
425
426 // Promoted vector extract
427 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
428 }
429
430 // Trivial bitcast if the types are the same size and the destination
431 // vector type is legal.
432 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
433 TLI.isTypeLegal(ValueVT))
434 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
435
436 if (ValueVT.getVectorNumElements() != 1) {
437 // Certain ABIs require that vectors are passed as integers. For vectors
438 // are the same size, this is an obvious bitcast.
439 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
440 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
441 } else if (ValueVT.bitsLT(PartEVT)) {
442 const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
443 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
444 // Drop the extra bits.
445 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
446 return DAG.getBitcast(ValueVT, Val);
447 }
448
449 diagnosePossiblyInvalidConstraint(
450 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
451 return DAG.getUNDEF(ValueVT);
452 }
453
454 // Handle cases such as i8 -> <1 x i1>
455 EVT ValueSVT = ValueVT.getVectorElementType();
456 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
457 if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
458 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
459 else
460 Val = ValueVT.isFloatingPoint()
461 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
462 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
463 }
464
465 return DAG.getBuildVector(ValueVT, DL, Val);
466}
467
468static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
469 SDValue Val, SDValue *Parts, unsigned NumParts,
470 MVT PartVT, const Value *V,
471 Optional<CallingConv::ID> CallConv);
472
473/// getCopyToParts - Create a series of nodes that contain the specified value
474/// split into legal parts. If the parts contain more bits than Val, then, for
475/// integers, ExtendKind can be used to specify how to generate the extra bits.
476static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
477 SDValue *Parts, unsigned NumParts, MVT PartVT,
478 const Value *V,
479 Optional<CallingConv::ID> CallConv = None,
480 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
481 // Let the target split the parts if it wants to
482 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
483 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
484 CallConv))
485 return;
486 EVT ValueVT = Val.getValueType();
487
488 // Handle the vector case separately.
489 if (ValueVT.isVector())
490 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
491 CallConv);
492
493 unsigned PartBits = PartVT.getSizeInBits();
494 unsigned OrigNumParts = NumParts;
495 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&(static_cast<void> (0))
496 "Copying to an illegal type!")(static_cast<void> (0));
497
498 if (NumParts == 0)
499 return;
500
501 assert(!ValueVT.isVector() && "Vector case handled elsewhere")(static_cast<void> (0));
502 EVT PartEVT = PartVT;
503 if (PartEVT == ValueVT) {
504 assert(NumParts == 1 && "No-op copy with multiple parts!")(static_cast<void> (0));
505 Parts[0] = Val;
506 return;
507 }
508
509 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
510 // If the parts cover more bits than the value has, promote the value.
511 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
512 assert(NumParts == 1 && "Do not know what to promote to!")(static_cast<void> (0));
513 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
514 } else {
515 if (ValueVT.isFloatingPoint()) {
516 // FP values need to be bitcast, then extended if they are being put
517 // into a larger container.
518 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
519 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
520 }
521 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(static_cast<void> (0))
522 ValueVT.isInteger() &&(static_cast<void> (0))
523 "Unknown mismatch!")(static_cast<void> (0));
524 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
525 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
526 if (PartVT == MVT::x86mmx)
527 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
528 }
529 } else if (PartBits == ValueVT.getSizeInBits()) {
530 // Different types of the same size.
531 assert(NumParts == 1 && PartEVT != ValueVT)(static_cast<void> (0));
532 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
533 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
534 // If the parts cover less bits than value has, truncate the value.
535 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(static_cast<void> (0))
536 ValueVT.isInteger() &&(static_cast<void> (0))
537 "Unknown mismatch!")(static_cast<void> (0));
538 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
539 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
540 if (PartVT == MVT::x86mmx)
541 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
542 }
543
544 // The value may have changed - recompute ValueVT.
545 ValueVT = Val.getValueType();
546 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&(static_cast<void> (0))
547 "Failed to tile the value with PartVT!")(static_cast<void> (0));
548
549 if (NumParts == 1) {
550 if (PartEVT != ValueVT) {
551 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
552 "scalar-to-vector conversion failed");
553 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554 }
555
556 Parts[0] = Val;
557 return;
558 }
559
560 // Expand the value into multiple parts.
561 if (NumParts & (NumParts - 1)) {
562 // The number of parts is not a power of 2. Split off and copy the tail.
563 assert(PartVT.isInteger() && ValueVT.isInteger() &&(static_cast<void> (0))
564 "Do not know what to expand to!")(static_cast<void> (0));
565 unsigned RoundParts = 1 << Log2_32(NumParts);
566 unsigned RoundBits = RoundParts * PartBits;
567 unsigned OddParts = NumParts - RoundParts;
568 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
569 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
570
571 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
572 CallConv);
573
574 if (DAG.getDataLayout().isBigEndian())
575 // The odd parts were reversed by getCopyToParts - unreverse them.
576 std::reverse(Parts + RoundParts, Parts + NumParts);
577
578 NumParts = RoundParts;
579 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
580 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
581 }
582
583 // The number of parts is a power of 2. Repeatedly bisect the value using
584 // EXTRACT_ELEMENT.
585 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
586 EVT::getIntegerVT(*DAG.getContext(),
587 ValueVT.getSizeInBits()),
588 Val);
589
590 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
591 for (unsigned i = 0; i < NumParts; i += StepSize) {
592 unsigned ThisBits = StepSize * PartBits / 2;
593 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
594 SDValue &Part0 = Parts[i];
595 SDValue &Part1 = Parts[i+StepSize/2];
596
597 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
598 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
599 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
600 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
601
602 if (ThisBits == PartBits && ThisVT != PartVT) {
603 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
604 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
605 }
606 }
607 }
608
609 if (DAG.getDataLayout().isBigEndian())
610 std::reverse(Parts, Parts + OrigNumParts);
611}
612
613static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
614 const SDLoc &DL, EVT PartVT) {
615 if (!PartVT.isVector())
616 return SDValue();
617
618 EVT ValueVT = Val.getValueType();
619 ElementCount PartNumElts = PartVT.getVectorElementCount();
620 ElementCount ValueNumElts = ValueVT.getVectorElementCount();
621
622 // We only support widening vectors with equivalent element types and
623 // fixed/scalable properties. If a target needs to widen a fixed-length type
624 // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
625 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
626 PartNumElts.isScalable() != ValueNumElts.isScalable() ||
627 PartVT.getVectorElementType() != ValueVT.getVectorElementType())
628 return SDValue();
629
630 // Widening a scalable vector to another scalable vector is done by inserting
631 // the vector into a larger undef one.
632 if (PartNumElts.isScalable())
633 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
634 Val, DAG.getVectorIdxConstant(0, DL));
635
636 EVT ElementVT = PartVT.getVectorElementType();
637 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
638 // undef elements.
639 SmallVector<SDValue, 16> Ops;
640 DAG.ExtractVectorElements(Val, Ops);
641 SDValue EltUndef = DAG.getUNDEF(ElementVT);
642 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
643
644 // FIXME: Use CONCAT for 2x -> 4x.
645 return DAG.getBuildVector(PartVT, DL, Ops);
646}
647
648/// getCopyToPartsVector - Create a series of nodes that contain the specified
649/// value split into legal parts.
650static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
651 SDValue Val, SDValue *Parts, unsigned NumParts,
652 MVT PartVT, const Value *V,
653 Optional<CallingConv::ID> CallConv) {
654 EVT ValueVT = Val.getValueType();
655 assert(ValueVT.isVector() && "Not a vector")(static_cast<void> (0));
656 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
657 const bool IsABIRegCopy = CallConv.hasValue();
658
659 if (NumParts == 1) {
660 EVT PartEVT = PartVT;
661 if (PartEVT == ValueVT) {
662 // Nothing to do.
663 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
664 // Bitconvert vector->vector case.
665 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
666 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
667 Val = Widened;
668 } else if (PartVT.isVector() &&
669 PartEVT.getVectorElementType().bitsGE(
670 ValueVT.getVectorElementType()) &&
671 PartEVT.getVectorElementCount() ==
672 ValueVT.getVectorElementCount()) {
673
674 // Promoted vector extract
675 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
676 } else {
677 if (ValueVT.getVectorElementCount().isScalar()) {
678 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
679 DAG.getVectorIdxConstant(0, DL));
680 } else {
681 uint64_t ValueSize = ValueVT.getFixedSizeInBits();
682 assert(PartVT.getFixedSizeInBits() > ValueSize &&(static_cast<void> (0))
683 "lossy conversion of vector to scalar type")(static_cast<void> (0));
684 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
685 Val = DAG.getBitcast(IntermediateType, Val);
686 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
687 }
688 }
689
690 assert(Val.getValueType() == PartVT && "Unexpected vector part value type")(static_cast<void> (0));
691 Parts[0] = Val;
692 return;
693 }
694
695 // Handle a multi-element vector.
696 EVT IntermediateVT;
697 MVT RegisterVT;
698 unsigned NumIntermediates;
699 unsigned NumRegs;
700 if (IsABIRegCopy) {
701 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
702 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
703 NumIntermediates, RegisterVT);
704 } else {
705 NumRegs =
706 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
707 NumIntermediates, RegisterVT);
708 }
709
710 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")(static_cast<void> (0));
711 NumParts = NumRegs; // Silence a compiler warning.
712 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")(static_cast<void> (0));
713
714 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&(static_cast<void> (0))
715 "Mixing scalable and fixed vectors when copying in parts")(static_cast<void> (0));
716
717 Optional<ElementCount> DestEltCnt;
718
719 if (IntermediateVT.isVector())
720 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
721 else
722 DestEltCnt = ElementCount::getFixed(NumIntermediates);
723
724 EVT BuiltVectorTy = EVT::getVectorVT(
725 *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
726
727 if (ValueVT == BuiltVectorTy) {
728 // Nothing to do.
729 } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
730 // Bitconvert vector->vector case.
731 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
732 } else {
733 if (BuiltVectorTy.getVectorElementType().bitsGT(
734 ValueVT.getVectorElementType())) {
735 // Integer promotion.
736 ValueVT = EVT::getVectorVT(*DAG.getContext(),
737 BuiltVectorTy.getVectorElementType(),
738 ValueVT.getVectorElementCount());
739 Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
740 }
741
742 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
743 Val = Widened;
744 }
745 }
746
747 assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type")(static_cast<void> (0));
748
749 // Split the vector into intermediate operands.
750 SmallVector<SDValue, 8> Ops(NumIntermediates);
751 for (unsigned i = 0; i != NumIntermediates; ++i) {
752 if (IntermediateVT.isVector()) {
753 // This does something sensible for scalable vectors - see the
754 // definition of EXTRACT_SUBVECTOR for further details.
755 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
756 Ops[i] =
757 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
758 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
759 } else {
760 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
761 DAG.getVectorIdxConstant(i, DL));
762 }
763 }
764
765 // Split the intermediate operands into legal parts.
766 if (NumParts == NumIntermediates) {
767 // If the register was not expanded, promote or copy the value,
768 // as appropriate.
769 for (unsigned i = 0; i != NumParts; ++i)
770 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
771 } else if (NumParts > 0) {
772 // If the intermediate type was expanded, split each the value into
773 // legal parts.
774 assert(NumIntermediates != 0 && "division by zero")(static_cast<void> (0));
775 assert(NumParts % NumIntermediates == 0 &&(static_cast<void> (0))
776 "Must expand into a divisible number of parts!")(static_cast<void> (0));
777 unsigned Factor = NumParts / NumIntermediates;
778 for (unsigned i = 0; i != NumIntermediates; ++i)
779 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
780 CallConv);
781 }
782}
783
784RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
785 EVT valuevt, Optional<CallingConv::ID> CC)
786 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
787 RegCount(1, regs.size()), CallConv(CC) {}
788
789RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
790 const DataLayout &DL, unsigned Reg, Type *Ty,
791 Optional<CallingConv::ID> CC) {
792 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
793
794 CallConv = CC;
795
796 for (EVT ValueVT : ValueVTs) {
797 unsigned NumRegs =
798 isABIMangled()
799 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
800 : TLI.getNumRegisters(Context, ValueVT);
801 MVT RegisterVT =
802 isABIMangled()
803 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
804 : TLI.getRegisterType(Context, ValueVT);
805 for (unsigned i = 0; i != NumRegs; ++i)
806 Regs.push_back(Reg + i);
807 RegVTs.push_back(RegisterVT);
808 RegCount.push_back(NumRegs);
809 Reg += NumRegs;
810 }
811}
812
813SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
814 FunctionLoweringInfo &FuncInfo,
815 const SDLoc &dl, SDValue &Chain,
816 SDValue *Flag, const Value *V) const {
817 // A Value with type {} or [0 x %t] needs no registers.
818 if (ValueVTs.empty())
819 return SDValue();
820
821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
822
823 // Assemble the legal parts into the final values.
824 SmallVector<SDValue, 4> Values(ValueVTs.size());
825 SmallVector<SDValue, 8> Parts;
826 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
827 // Copy the legal parts from the registers.
828 EVT ValueVT = ValueVTs[Value];
829 unsigned NumRegs = RegCount[Value];
830 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
831 *DAG.getContext(),
832 CallConv.getValue(), RegVTs[Value])
833 : RegVTs[Value];
834
835 Parts.resize(NumRegs);
836 for (unsigned i = 0; i != NumRegs; ++i) {
837 SDValue P;
838 if (!Flag) {
839 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
840 } else {
841 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
842 *Flag = P.getValue(2);
843 }
844
845 Chain = P.getValue(1);
846 Parts[i] = P;
847
848 // If the source register was virtual and if we know something about it,
849 // add an assert node.
850 if (!Register::isVirtualRegister(Regs[Part + i]) ||
851 !RegisterVT.isInteger())
852 continue;
853
854 const FunctionLoweringInfo::LiveOutInfo *LOI =
855 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
856 if (!LOI)
857 continue;
858
859 unsigned RegSize = RegisterVT.getScalarSizeInBits();
860 unsigned NumSignBits = LOI->NumSignBits;
861 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
862
863 if (NumZeroBits == RegSize) {
864 // The current value is a zero.
865 // Explicitly express that as it would be easier for
866 // optimizations to kick in.
867 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
868 continue;
869 }
870
871 // FIXME: We capture more information than the dag can represent. For
872 // now, just use the tightest assertzext/assertsext possible.
873 bool isSExt;
874 EVT FromVT(MVT::Other);
875 if (NumZeroBits) {
876 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
877 isSExt = false;
878 } else if (NumSignBits > 1) {
879 FromVT =
880 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
881 isSExt = true;
882 } else {
883 continue;
884 }
885 // Add an assertion node.
886 assert(FromVT != MVT::Other)(static_cast<void> (0));
887 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
888 RegisterVT, P, DAG.getValueType(FromVT));
889 }
890
891 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
892 RegisterVT, ValueVT, V, CallConv);
893 Part += NumRegs;
894 Parts.clear();
895 }
896
897 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
898}
899
900void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
901 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
902 const Value *V,
903 ISD::NodeType PreferredExtendType) const {
904 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
905 ISD::NodeType ExtendKind = PreferredExtendType;
906
907 // Get the list of the values's legal parts.
908 unsigned NumRegs = Regs.size();
909 SmallVector<SDValue, 8> Parts(NumRegs);
910 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
911 unsigned NumParts = RegCount[Value];
912
913 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
914 *DAG.getContext(),
915 CallConv.getValue(), RegVTs[Value])
916 : RegVTs[Value];
917
918 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
919 ExtendKind = ISD::ZERO_EXTEND;
920
921 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
922 NumParts, RegisterVT, V, CallConv, ExtendKind);
923 Part += NumParts;
924 }
925
926 // Copy the parts into the registers.
927 SmallVector<SDValue, 8> Chains(NumRegs);
928 for (unsigned i = 0; i != NumRegs; ++i) {
929 SDValue Part;
930 if (!Flag) {
931 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
932 } else {
933 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
934 *Flag = Part.getValue(1);
935 }
936
937 Chains[i] = Part.getValue(0);
938 }
939
940 if (NumRegs == 1 || Flag)
941 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
942 // flagged to it. That is the CopyToReg nodes and the user are considered
943 // a single scheduling unit. If we create a TokenFactor and return it as
944 // chain, then the TokenFactor is both a predecessor (operand) of the
945 // user as well as a successor (the TF operands are flagged to the user).
946 // c1, f1 = CopyToReg
947 // c2, f2 = CopyToReg
948 // c3 = TokenFactor c1, c2
949 // ...
950 // = op c3, ..., f2
951 Chain = Chains[NumRegs-1];
952 else
953 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
954}
955
956void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
957 unsigned MatchingIdx, const SDLoc &dl,
958 SelectionDAG &DAG,
959 std::vector<SDValue> &Ops) const {
960 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
961
962 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
963 if (HasMatching)
964 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
965 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
966 // Put the register class of the virtual registers in the flag word. That
967 // way, later passes can recompute register class constraints for inline
968 // assembly as well as normal instructions.
969 // Don't do this for tied operands that can use the regclass information
970 // from the def.
971 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
972 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
973 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
974 }
975
976 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
977 Ops.push_back(Res);
978
979 if (Code == InlineAsm::Kind_Clobber) {
980 // Clobbers should always have a 1:1 mapping with registers, and may
981 // reference registers that have illegal (e.g. vector) types. Hence, we
982 // shouldn't try to apply any sort of splitting logic to them.
983 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&(static_cast<void> (0))
984 "No 1:1 mapping from clobbers to regs?")(static_cast<void> (0));
985 Register SP = TLI.getStackPointerRegisterToSaveRestore();
986 (void)SP;
987 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
988 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
989 assert((static_cast<void> (0))
990 (Regs[I] != SP ||(static_cast<void> (0))
991 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&(static_cast<void> (0))
992 "If we clobbered the stack pointer, MFI should know about it.")(static_cast<void> (0));
993 }
994 return;
995 }
996
997 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
998 MVT RegisterVT = RegVTs[Value];
999 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1000 RegisterVT);
1001 for (unsigned i = 0; i != NumRegs; ++i) {
1002 assert(Reg < Regs.size() && "Mismatch in # registers expected")(static_cast<void> (0));
1003 unsigned TheReg = Regs[Reg++];
1004 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1005 }
1006 }
1007}
1008
1009SmallVector<std::pair<unsigned, TypeSize>, 4>
1010RegsForValue::getRegsAndSizes() const {
1011 SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1012 unsigned I = 0;
1013 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1014 unsigned RegCount = std::get<0>(CountAndVT);
1015 MVT RegisterVT = std::get<1>(CountAndVT);
1016 TypeSize RegisterSize = RegisterVT.getSizeInBits();
1017 for (unsigned E = I + RegCount; I != E; ++I)
1018 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1019 }
1020 return OutVec;
1021}
1022
1023void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1024 const TargetLibraryInfo *li) {
1025 AA = aa;
1026 GFI = gfi;
1027 LibInfo = li;
1028 DL = &DAG.getDataLayout();
1029 Context = DAG.getContext();
1030 LPadToCallSiteMap.clear();
1031 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1032}
1033
1034void SelectionDAGBuilder::clear() {
1035 NodeMap.clear();
1036 UnusedArgNodeMap.clear();
1037 PendingLoads.clear();
1038 PendingExports.clear();
1039 PendingConstrainedFP.clear();
1040 PendingConstrainedFPStrict.clear();
1041 CurInst = nullptr;
1042 HasTailCall = false;
1043 SDNodeOrder = LowestSDNodeOrder;
1044 StatepointLowering.clear();
1045}
1046
1047void SelectionDAGBuilder::clearDanglingDebugInfo() {
1048 DanglingDebugInfoMap.clear();
1049}
1050
1051// Update DAG root to include dependencies on Pending chains.
1052SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1053 SDValue Root = DAG.getRoot();
1054
1055 if (Pending.empty())
1056 return Root;
1057
1058 // Add current root to PendingChains, unless we already indirectly
1059 // depend on it.
1060 if (Root.getOpcode() != ISD::EntryToken) {
1061 unsigned i = 0, e = Pending.size();
1062 for (; i != e; ++i) {
1063 assert(Pending[i].getNode()->getNumOperands() > 1)(static_cast<void> (0));
1064 if (Pending[i].getNode()->getOperand(0) == Root)
1065 break; // Don't add the root if we already indirectly depend on it.
1066 }
1067
1068 if (i == e)
1069 Pending.push_back(Root);
1070 }
1071
1072 if (Pending.size() == 1)
1073 Root = Pending[0];
1074 else
1075 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1076
1077 DAG.setRoot(Root);
1078 Pending.clear();
1079 return Root;
1080}
1081
1082SDValue SelectionDAGBuilder::getMemoryRoot() {
1083 return updateRoot(PendingLoads);
1084}
1085
1086SDValue SelectionDAGBuilder::getRoot() {
1087 // Chain up all pending constrained intrinsics together with all
1088 // pending loads, by simply appending them to PendingLoads and
1089 // then calling getMemoryRoot().
1090 PendingLoads.reserve(PendingLoads.size() +
1091 PendingConstrainedFP.size() +
1092 PendingConstrainedFPStrict.size());
1093 PendingLoads.append(PendingConstrainedFP.begin(),
1094 PendingConstrainedFP.end());
1095 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1096 PendingConstrainedFPStrict.end());
1097 PendingConstrainedFP.clear();
1098 PendingConstrainedFPStrict.clear();
1099 return getMemoryRoot();
1100}
1101
1102SDValue SelectionDAGBuilder::getControlRoot() {
1103 // We need to emit pending fpexcept.strict constrained intrinsics,
1104 // so append them to the PendingExports list.
1105 PendingExports.append(PendingConstrainedFPStrict.begin(),
1106 PendingConstrainedFPStrict.end());
1107 PendingConstrainedFPStrict.clear();
1108 return updateRoot(PendingExports);
1109}
1110
1111void SelectionDAGBuilder::visit(const Instruction &I) {
1112 // Set up outgoing PHI node register values before emitting the terminator.
1113 if (I.isTerminator()) {
1114 HandlePHINodesInSuccessorBlocks(I.getParent());
1115 }
1116
1117 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1118 if (!isa<DbgInfoIntrinsic>(I))
1119 ++SDNodeOrder;
1120
1121 CurInst = &I;
1122
1123 visit(I.getOpcode(), I);
1124
1125 if (!I.isTerminator() && !HasTailCall &&
1126 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1127 CopyToExportRegsIfNeeded(&I);
1128
1129 CurInst = nullptr;
1130}
1131
1132void SelectionDAGBuilder::visitPHI(const PHINode &) {
1133 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!")__builtin_unreachable();
1134}
1135
1136void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1137 // Note: this doesn't use InstVisitor, because it has to work with
1138 // ConstantExpr's in addition to instructions.
1139 switch (Opcode) {
1140 default: llvm_unreachable("Unknown instruction type encountered!")__builtin_unreachable();
1141 // Build the switch statement using the Instruction.def file.
1142#define HANDLE_INST(NUM, OPCODE, CLASS) \
1143 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1144#include "llvm/IR/Instruction.def"
1145 }
1146}
1147
1148void SelectionDAGBuilder::addDanglingDebugInfo(const DbgValueInst *DI,
1149 DebugLoc DL, unsigned Order) {
1150 // We treat variadic dbg_values differently at this stage.
1151 if (DI->hasArgList()) {
1152 // For variadic dbg_values we will now insert an undef.
1153 // FIXME: We can potentially recover these!
1154 SmallVector<SDDbgOperand, 2> Locs;
1155 for (const Value *V : DI->getValues()) {
1156 auto Undef = UndefValue::get(V->getType());
1157 Locs.push_back(SDDbgOperand::fromConst(Undef));
1158 }
1159 SDDbgValue *SDV = DAG.getDbgValueList(
1160 DI->getVariable(), DI->getExpression(), Locs, {},
1161 /*IsIndirect=*/false, DL, Order, /*IsVariadic=*/true);
1162 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1163 } else {
1164 // TODO: Dangling debug info will eventually either be resolved or produce
1165 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1166 // between the original dbg.value location and its resolved DBG_VALUE,
1167 // which we should ideally fill with an extra Undef DBG_VALUE.
1168 assert(DI->getNumVariableLocationOps() == 1 &&(static_cast<void> (0))
1169 "DbgValueInst without an ArgList should have a single location "(static_cast<void> (0))
1170 "operand.")(static_cast<void> (0));
1171 DanglingDebugInfoMap[DI->getValue(0)].emplace_back(DI, DL, Order);
1172 }
1173}
1174
1175void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1176 const DIExpression *Expr) {
1177 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1178 const DbgValueInst *DI = DDI.getDI();
1179 DIVariable *DanglingVariable = DI->getVariable();
1180 DIExpression *DanglingExpr = DI->getExpression();
1181 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1182 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n")do { } while (false);
1183 return true;
1184 }
1185 return false;
1186 };
1187
1188 for (auto &DDIMI : DanglingDebugInfoMap) {
1189 DanglingDebugInfoVector &DDIV = DDIMI.second;
1190
1191 // If debug info is to be dropped, run it through final checks to see
1192 // whether it can be salvaged.
1193 for (auto &DDI : DDIV)
1194 if (isMatchingDbgValue(DDI))
1195 salvageUnresolvedDbgValue(DDI);
1196
1197 erase_if(DDIV, isMatchingDbgValue);
1198 }
1199}
1200
1201// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1202// generate the debug data structures now that we've seen its definition.
1203void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1204 SDValue Val) {
1205 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1206 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1207 return;
1208
1209 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1210 for (auto &DDI : DDIV) {
1211 const DbgValueInst *DI = DDI.getDI();
1212 assert(!DI->hasArgList() && "Not implemented for variadic dbg_values")(static_cast<void> (0));
1213 assert(DI && "Ill-formed DanglingDebugInfo")(static_cast<void> (0));
1214 DebugLoc dl = DDI.getdl();
1215 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1216 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1217 DILocalVariable *Variable = DI->getVariable();
1218 DIExpression *Expr = DI->getExpression();
1219 assert(Variable->isValidLocationForIntrinsic(dl) &&(static_cast<void> (0))
1220 "Expected inlined-at fields to agree")(static_cast<void> (0));
1221 SDDbgValue *SDV;
1222 if (Val.getNode()) {
1223 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1224 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1225 // we couldn't resolve it directly when examining the DbgValue intrinsic
1226 // in the first place we should not be more successful here). Unless we
1227 // have some test case that prove this to be correct we should avoid
1228 // calling EmitFuncArgumentDbgValue here.
1229 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1230 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="do { } while (false)
1231 << DbgSDNodeOrder << "] for:\n " << *DI << "\n")do { } while (false);
1232 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump())do { } while (false);
1233 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1234 // inserted after the definition of Val when emitting the instructions
1235 // after ISel. An alternative could be to teach
1236 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1237 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()do { } while (false)
1238 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "do { } while (false)
1239 << ValSDNodeOrder << "\n")do { } while (false);
1240 SDV = getDbgValue(Val, Variable, Expr, dl,
1241 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1242 DAG.AddDbgValue(SDV, false);
1243 } else
1244 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DIdo { } while (false)
1245 << "in EmitFuncArgumentDbgValue\n")do { } while (false);
1246 } else {
1247 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n")do { } while (false);
1248 auto Undef = UndefValue::get(DDI.getDI()->getValue(0)->getType());
1249 auto SDV =
1250 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1251 DAG.AddDbgValue(SDV, false);
1252 }
1253 }
1254 DDIV.clear();
1255}
1256
1257void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1258 // TODO: For the variadic implementation, instead of only checking the fail
1259 // state of `handleDebugValue`, we need know specifically which values were
1260 // invalid, so that we attempt to salvage only those values when processing
1261 // a DIArgList.
1262 assert(!DDI.getDI()->hasArgList() &&(static_cast<void> (0))
1263 "Not implemented for variadic dbg_values")(static_cast<void> (0));
1264 Value *V = DDI.getDI()->getValue(0);
1265 DILocalVariable *Var = DDI.getDI()->getVariable();
1266 DIExpression *Expr = DDI.getDI()->getExpression();
1267 DebugLoc DL = DDI.getdl();
1268 DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1269 unsigned SDOrder = DDI.getSDNodeOrder();
1270 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1271 // that DW_OP_stack_value is desired.
1272 assert(isa<DbgValueInst>(DDI.getDI()))(static_cast<void> (0));
1273 bool StackValue = true;
1274
1275 // Can this Value can be encoded without any further work?
1276 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder, /*IsVariadic=*/false))
1277 return;
1278
1279 // Attempt to salvage back through as many instructions as possible. Bail if
1280 // a non-instruction is seen, such as a constant expression or global
1281 // variable. FIXME: Further work could recover those too.
1282 while (isa<Instruction>(V)) {
1283 Instruction &VAsInst = *cast<Instruction>(V);
1284 // Temporary "0", awaiting real implementation.
1285 SmallVector<uint64_t, 16> Ops;
1286 SmallVector<Value *, 4> AdditionalValues;
1287 V = salvageDebugInfoImpl(VAsInst, Expr->getNumLocationOperands(), Ops,
1288 AdditionalValues);
1289 // If we cannot salvage any further, and haven't yet found a suitable debug
1290 // expression, bail out.
1291 if (!V)
1292 break;
1293
1294 // TODO: If AdditionalValues isn't empty, then the salvage can only be
1295 // represented with a DBG_VALUE_LIST, so we give up. When we have support
1296 // here for variadic dbg_values, remove that condition.
1297 if (!AdditionalValues.empty())
1298 break;
1299
1300 // New value and expr now represent this debuginfo.
1301 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1302
1303 // Some kind of simplification occurred: check whether the operand of the
1304 // salvaged debug expression can be encoded in this DAG.
1305 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder,
1306 /*IsVariadic=*/false)) {
1307 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "do { } while (false)
1308 << DDI.getDI() << "\nBy stripping back to:\n " << V)do { } while (false);
1309 return;
1310 }
1311 }
1312
1313 // This was the final opportunity to salvage this debug information, and it
1314 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1315 // any earlier variable location.
1316 auto Undef = UndefValue::get(DDI.getDI()->getValue(0)->getType());
1317 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1318 DAG.AddDbgValue(SDV, false);
1319
1320 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()do { } while (false)
1321 << "\n")do { } while (false);
1322 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0)do { } while (false)
1323 << "\n")do { } while (false);
1324}
1325
1326bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1327 DILocalVariable *Var,
1328 DIExpression *Expr, DebugLoc dl,
1329 DebugLoc InstDL, unsigned Order,
1330 bool IsVariadic) {
1331 if (Values.empty())
1332 return true;
1333 SmallVector<SDDbgOperand> LocationOps;
1334 SmallVector<SDNode *> Dependencies;
1335 for (const Value *V : Values) {
1336 // Constant value.
1337 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1338 isa<ConstantPointerNull>(V)) {
1339 LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1340 continue;
1341 }
1342
1343 // If the Value is a frame index, we can create a FrameIndex debug value
1344 // without relying on the DAG at all.
1345 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1346 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1347 if (SI != FuncInfo.StaticAllocaMap.end()) {
1348 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1349 continue;
1350 }
1351 }
1352
1353 // Do not use getValue() in here; we don't want to generate code at
1354 // this point if it hasn't been done yet.
1355 SDValue N = NodeMap[V];
1356 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1357 N = UnusedArgNodeMap[V];
1358 if (N.getNode()) {
1359 // Only emit func arg dbg value for non-variadic dbg.values for now.
1360 if (!IsVariadic && EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1361 return true;
1362 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1363 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1364 // describe stack slot locations.
1365 //
1366 // Consider "int x = 0; int *px = &x;". There are two kinds of
1367 // interesting debug values here after optimization:
1368 //
1369 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
1370 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1371 //
1372 // Both describe the direct values of their associated variables.
1373 Dependencies.push_back(N.getNode());
1374 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1375 continue;
1376 }
1377 LocationOps.emplace_back(
1378 SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1379 continue;
1380 }
1381
1382 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1383 // Special rules apply for the first dbg.values of parameter variables in a
1384 // function. Identify them by the fact they reference Argument Values, that
1385 // they're parameters, and they are parameters of the current function. We
1386 // need to let them dangle until they get an SDNode.
1387 bool IsParamOfFunc =
1388 isa<Argument>(V) && Var->isParameter() && !InstDL.getInlinedAt();
1389 if (IsParamOfFunc)
1390 return false;
1391
1392 // The value is not used in this block yet (or it would have an SDNode).
1393 // We still want the value to appear for the user if possible -- if it has
1394 // an associated VReg, we can refer to that instead.
1395 auto VMI = FuncInfo.ValueMap.find(V);
1396 if (VMI != FuncInfo.ValueMap.end()) {
1397 unsigned Reg = VMI->second;
1398 // If this is a PHI node, it may be split up into several MI PHI nodes
1399 // (in FunctionLoweringInfo::set).
1400 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1401 V->getType(), None);
1402 if (RFV.occupiesMultipleRegs()) {
1403 // FIXME: We could potentially support variadic dbg_values here.
1404 if (IsVariadic)
1405 return false;
1406 unsigned Offset = 0;
1407 unsigned BitsToDescribe = 0;
1408 if (auto VarSize = Var->getSizeInBits())
1409 BitsToDescribe = *VarSize;
1410 if (auto Fragment = Expr->getFragmentInfo())
1411 BitsToDescribe = Fragment->SizeInBits;
1412 for (auto RegAndSize : RFV.getRegsAndSizes()) {
1413 // Bail out if all bits are described already.
1414 if (Offset >= BitsToDescribe)
1415 break;
1416 // TODO: handle scalable vectors.
1417 unsigned RegisterSize = RegAndSize.second;
1418 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1419 ? BitsToDescribe - Offset
1420 : RegisterSize;
1421 auto FragmentExpr = DIExpression::createFragmentExpression(
1422 Expr, Offset, FragmentSize);
1423 if (!FragmentExpr)
1424 continue;
1425 SDDbgValue *SDV = DAG.getVRegDbgValue(
1426 Var, *FragmentExpr, RegAndSize.first, false, dl, SDNodeOrder);
1427 DAG.AddDbgValue(SDV, false);
1428 Offset += RegisterSize;
1429 }
1430 return true;
1431 }
1432 // We can use simple vreg locations for variadic dbg_values as well.
1433 LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1434 continue;
1435 }
1436 // We failed to create a SDDbgOperand for V.
1437 return false;
1438 }
1439
1440 // We have created a SDDbgOperand for each Value in Values.
1441 // Should use Order instead of SDNodeOrder?
1442 assert(!LocationOps.empty())(static_cast<void> (0));
1443 SDDbgValue *SDV =
1444 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1445 /*IsIndirect=*/false, dl, SDNodeOrder, IsVariadic);
1446 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1447 return true;
1448}
1449
1450void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1451 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1452 for (auto &Pair : DanglingDebugInfoMap)
1453 for (auto &DDI : Pair.second)
1454 salvageUnresolvedDbgValue(DDI);
1455 clearDanglingDebugInfo();
1456}
1457
1458/// getCopyFromRegs - If there was virtual register allocated for the value V
1459/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1460SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1461 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1462 SDValue Result;
1463
1464 if (It != FuncInfo.ValueMap.end()) {
1465 Register InReg = It->second;
1466
1467 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1468 DAG.getDataLayout(), InReg, Ty,
1469 None); // This is not an ABI copy.
1470 SDValue Chain = DAG.getEntryNode();
1471 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1472 V);
1473 resolveDanglingDebugInfo(V, Result);
1474 }
1475
1476 return Result;
1477}
1478
1479/// getValue - Return an SDValue for the given Value.
1480SDValue SelectionDAGBuilder::getValue(const Value *V) {
1481 // If we already have an SDValue for this value, use it. It's important
1482 // to do this first, so that we don't create a CopyFromReg if we already
1483 // have a regular SDValue.
1484 SDValue &N = NodeMap[V];
1485 if (N.getNode()) return N;
1486
1487 // If there's a virtual register allocated and initialized for this
1488 // value, use it.
1489 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1490 return copyFromReg;
1491
1492 // Otherwise create a new SDValue and remember it.
1493 SDValue Val = getValueImpl(V);
1494 NodeMap[V] = Val;
1495 resolveDanglingDebugInfo(V, Val);
1496 return Val;
1497}
1498
1499/// getNonRegisterValue - Return an SDValue for the given Value, but
1500/// don't look in FuncInfo.ValueMap for a virtual register.
1501SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1502 // If we already have an SDValue for this value, use it.
1503 SDValue &N = NodeMap[V];
1504 if (N.getNode()) {
1505 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1506 // Remove the debug location from the node as the node is about to be used
1507 // in a location which may differ from the original debug location. This
1508 // is relevant to Constant and ConstantFP nodes because they can appear
1509 // as constant expressions inside PHI nodes.
1510 N->setDebugLoc(DebugLoc());
1511 }
1512 return N;
1513 }
1514
1515 // Otherwise create a new SDValue and remember it.
1516 SDValue Val = getValueImpl(V);
1517 NodeMap[V] = Val;
1518 resolveDanglingDebugInfo(V, Val);
1519 return Val;
1520}
1521
1522/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1523/// Create an SDValue for the given value.
1524SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1525 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1526
1527 if (const Constant *C = dyn_cast<Constant>(V)) {
1528 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1529
1530 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1531 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1532
1533 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1534 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1535
1536 if (isa<ConstantPointerNull>(C)) {
1537 unsigned AS = V->getType()->getPointerAddressSpace();
1538 return DAG.getConstant(0, getCurSDLoc(),
1539 TLI.getPointerTy(DAG.getDataLayout(), AS));
1540 }
1541
1542 if (match(C, m_VScale(DAG.getDataLayout())))
1543 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1544
1545 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1546 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1547
1548 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1549 return DAG.getUNDEF(VT);
1550
1551 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1552 visit(CE->getOpcode(), *CE);
1553 SDValue N1 = NodeMap[V];
1554 assert(N1.getNode() && "visit didn't populate the NodeMap!")(static_cast<void> (0));
1555 return N1;
1556 }
1557
1558 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1559 SmallVector<SDValue, 4> Constants;
1560 for (const Use &U : C->operands()) {
1561 SDNode *Val = getValue(U).getNode();
1562 // If the operand is an empty aggregate, there are no values.
1563 if (!Val) continue;
1564 // Add each leaf value from the operand to the Constants list
1565 // to form a flattened list of all the values.
1566 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1567 Constants.push_back(SDValue(Val, i));
1568 }
1569
1570 return DAG.getMergeValues(Constants, getCurSDLoc());
1571 }
1572
1573 if (const ConstantDataSequential *CDS =
1574 dyn_cast<ConstantDataSequential>(C)) {
1575 SmallVector<SDValue, 4> Ops;
1576 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1577 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1578 // Add each leaf value from the operand to the Constants list
1579 // to form a flattened list of all the values.
1580 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1581 Ops.push_back(SDValue(Val, i));
1582 }
1583
1584 if (isa<ArrayType>(CDS->getType()))
1585 return DAG.getMergeValues(Ops, getCurSDLoc());
1586 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1587 }
1588
1589 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1590 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&(static_cast<void> (0))
1591 "Unknown struct or array constant!")(static_cast<void> (0));
1592
1593 SmallVector<EVT, 4> ValueVTs;
1594 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1595 unsigned NumElts = ValueVTs.size();
1596 if (NumElts == 0)
1597 return SDValue(); // empty struct
1598 SmallVector<SDValue, 4> Constants(NumElts);
1599 for (unsigned i = 0; i != NumElts; ++i) {
1600 EVT EltVT = ValueVTs[i];
1601 if (isa<UndefValue>(C))
1602 Constants[i] = DAG.getUNDEF(EltVT);
1603 else if (EltVT.isFloatingPoint())
1604 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1605 else
1606 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1607 }
1608
1609 return DAG.getMergeValues(Constants, getCurSDLoc());
1610 }
1611
1612 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1613 return DAG.getBlockAddress(BA, VT);
1614
1615 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1616 return getValue(Equiv->getGlobalValue());
1617
1618 VectorType *VecTy = cast<VectorType>(V->getType());
1619
1620 // Now that we know the number and type of the elements, get that number of
1621 // elements into the Ops array based on what kind of constant it is.
1622 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1623 SmallVector<SDValue, 16> Ops;
1624 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1625 for (unsigned i = 0; i != NumElements; ++i)
1626 Ops.push_back(getValue(CV->getOperand(i)));
1627
1628 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1629 } else if (isa<ConstantAggregateZero>(C)) {
1630 EVT EltVT =
1631 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1632
1633 SDValue Op;
1634 if (EltVT.isFloatingPoint())
1635 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1636 else
1637 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1638
1639 if (isa<ScalableVectorType>(VecTy))
1640 return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op);
1641 else {
1642 SmallVector<SDValue, 16> Ops;
1643 Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op);
1644 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1645 }
1646 }
1647 llvm_unreachable("Unknown vector constant")__builtin_unreachable();
1648 }
1649
1650 // If this is a static alloca, generate it as the frameindex instead of
1651 // computation.
1652 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1653 DenseMap<const AllocaInst*, int>::iterator SI =
1654 FuncInfo.StaticAllocaMap.find(AI);
1655 if (SI != FuncInfo.StaticAllocaMap.end())
1656 return DAG.getFrameIndex(SI->second,
1657 TLI.getFrameIndexTy(DAG.getDataLayout()));
1658 }
1659
1660 // If this is an instruction which fast-isel has deferred, select it now.
1661 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1662 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1663
1664 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1665 Inst->getType(), None);
1666 SDValue Chain = DAG.getEntryNode();
1667 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1668 }
1669
1670 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
1671 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1672 }
1673 llvm_unreachable("Can't get register for value!")__builtin_unreachable();
1674}
1675
1676void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1677 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1678 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1679 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1680 bool IsSEH = isAsynchronousEHPersonality(Pers);
1681 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1682 if (!IsSEH)
1683 CatchPadMBB->setIsEHScopeEntry();
1684 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1685 if (IsMSVCCXX || IsCoreCLR)
1686 CatchPadMBB->setIsEHFuncletEntry();
1687}
1688
1689void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1690 // Update machine-CFG edge.
1691 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1692 FuncInfo.MBB->addSuccessor(TargetMBB);
1693 TargetMBB->setIsEHCatchretTarget(true);
1694 DAG.getMachineFunction().setHasEHCatchret(true);
1695
1696 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1697 bool IsSEH = isAsynchronousEHPersonality(Pers);
1698 if (IsSEH) {
1699 // If this is not a fall-through branch or optimizations are switched off,
1700 // emit the branch.
1701 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1702 TM.getOptLevel() == CodeGenOpt::None)
1703 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1704 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1705 return;
1706 }
1707
1708 // Figure out the funclet membership for the catchret's successor.
1709 // This will be used by the FuncletLayout pass to determine how to order the
1710 // BB's.
1711 // A 'catchret' returns to the outer scope's color.
1712 Value *ParentPad = I.getCatchSwitchParentPad();
1713 const BasicBlock *SuccessorColor;
1714 if (isa<ConstantTokenNone>(ParentPad))
1715 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1716 else
1717 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1718 assert(SuccessorColor && "No parent funclet for catchret!")(static_cast<void> (0));
1719 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1720 assert(SuccessorColorMBB && "No MBB for SuccessorColor!")(static_cast<void> (0));
1721
1722 // Create the terminator node.
1723 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1724 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1725 DAG.getBasicBlock(SuccessorColorMBB));
1726 DAG.setRoot(Ret);
1727}
1728
1729void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1730 // Don't emit any special code for the cleanuppad instruction. It just marks
1731 // the start of an EH scope/funclet.
1732 FuncInfo.MBB->setIsEHScopeEntry();
1733 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1734 if (Pers != EHPersonality::Wasm_CXX) {
1735 FuncInfo.MBB->setIsEHFuncletEntry();
1736 FuncInfo.MBB->setIsCleanupFuncletEntry();
1737 }
1738}
1739
1740// In wasm EH, even though a catchpad may not catch an exception if a tag does
1741// not match, it is OK to add only the first unwind destination catchpad to the
1742// successors, because there will be at least one invoke instruction within the
1743// catch scope that points to the next unwind destination, if one exists, so
1744// CFGSort cannot mess up with BB sorting order.
1745// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1746// call within them, and catchpads only consisting of 'catch (...)' have a
1747// '__cxa_end_catch' call within them, both of which generate invokes in case
1748// the next unwind destination exists, i.e., the next unwind destination is not
1749// the caller.)
1750//
1751// Having at most one EH pad successor is also simpler and helps later
1752// transformations.
1753//
1754// For example,
1755// current:
1756// invoke void @foo to ... unwind label %catch.dispatch
1757// catch.dispatch:
1758// %0 = catchswitch within ... [label %catch.start] unwind label %next
1759// catch.start:
1760// ...
1761// ... in this BB or some other child BB dominated by this BB there will be an
1762// invoke that points to 'next' BB as an unwind destination
1763//
1764// next: ; We don't need to add this to 'current' BB's successor
1765// ...
1766static void findWasmUnwindDestinations(
1767 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1768 BranchProbability Prob,
1769 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1770 &UnwindDests) {
1771 while (EHPadBB) {
1772 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1773 if (isa<CleanupPadInst>(Pad)) {
1774 // Stop on cleanup pads.
1775 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1776 UnwindDests.back().first->setIsEHScopeEntry();
1777 break;
1778 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1779 // Add the catchpad handlers to the possible destinations. We don't
1780 // continue to the unwind destination of the catchswitch for wasm.
1781 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1782 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1783 UnwindDests.back().first->setIsEHScopeEntry();
1784 }
1785 break;
1786 } else {
1787 continue;
1788 }
1789 }
1790}
1791
1792/// When an invoke or a cleanupret unwinds to the next EH pad, there are
1793/// many places it could ultimately go. In the IR, we have a single unwind
1794/// destination, but in the machine CFG, we enumerate all the possible blocks.
1795/// This function skips over imaginary basic blocks that hold catchswitch
1796/// instructions, and finds all the "real" machine
1797/// basic block destinations. As those destinations may not be successors of
1798/// EHPadBB, here we also calculate the edge probability to those destinations.
1799/// The passed-in Prob is the edge probability to EHPadBB.
1800static void findUnwindDestinations(
1801 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1802 BranchProbability Prob,
1803 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1804 &UnwindDests) {
1805 EHPersonality Personality =
1806 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1807 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1808 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1809 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1810 bool IsSEH = isAsynchronousEHPersonality(Personality);
1811
1812 if (IsWasmCXX) {
1813 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1814 assert(UnwindDests.size() <= 1 &&(static_cast<void> (0))
1815 "There should be at most one unwind destination for wasm")(static_cast<void> (0));
1816 return;
1817 }
1818
1819 while (EHPadBB) {
1820 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1821 BasicBlock *NewEHPadBB = nullptr;
1822 if (isa<LandingPadInst>(Pad)) {
1823 // Stop on landingpads. They are not funclets.
1824 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1825 break;
1826 } else if (isa<CleanupPadInst>(Pad)) {
1827 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1828 // personalities.
1829 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1830 UnwindDests.back().first->setIsEHScopeEntry();
1831 UnwindDests.back().first->setIsEHFuncletEntry();
1832 break;
1833 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1834 // Add the catchpad handlers to the possible destinations.
1835 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1836 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1837 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1838 if (IsMSVCCXX || IsCoreCLR)
1839 UnwindDests.back().first->setIsEHFuncletEntry();
1840 if (!IsSEH)
1841 UnwindDests.back().first->setIsEHScopeEntry();
1842 }
1843 NewEHPadBB = CatchSwitch->getUnwindDest();
1844 } else {
1845 continue;
1846 }
1847
1848 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1849 if (BPI && NewEHPadBB)
1850 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1851 EHPadBB = NewEHPadBB;
1852 }
1853}
1854
1855void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1856 // Update successor info.
1857 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1858 auto UnwindDest = I.getUnwindDest();
1859 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1860 BranchProbability UnwindDestProb =
1861 (BPI && UnwindDest)
1862 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1863 : BranchProbability::getZero();
1864 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1865 for (auto &UnwindDest : UnwindDests) {
1866 UnwindDest.first->setIsEHPad();
1867 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1868 }
1869 FuncInfo.MBB->normalizeSuccProbs();
1870
1871 // Create the terminator node.
1872 SDValue Ret =
1873 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1874 DAG.setRoot(Ret);
1875}
1876
1877void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1878 report_fatal_error("visitCatchSwitch not yet implemented!");
1879}
1880
1881void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1882 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1883 auto &DL = DAG.getDataLayout();
1884 SDValue Chain = getControlRoot();
1885 SmallVector<ISD::OutputArg, 8> Outs;
1886 SmallVector<SDValue, 8> OutVals;
1887
1888 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1889 // lower
1890 //
1891 // %val = call <ty> @llvm.experimental.deoptimize()
1892 // ret <ty> %val
1893 //
1894 // differently.
1895 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1896 LowerDeoptimizingReturn();
1897 return;
1898 }
1899
1900 if (!FuncInfo.CanLowerReturn) {
1901 unsigned DemoteReg = FuncInfo.DemoteRegister;
1902 const Function *F = I.getParent()->getParent();
1903
1904 // Emit a store of the return value through the virtual register.
1905 // Leave Outs empty so that LowerReturn won't try to load return
1906 // registers the usual way.
1907 SmallVector<EVT, 1> PtrValueVTs;
1908 ComputeValueVTs(TLI, DL,
1909 F->getReturnType()->getPointerTo(
1910 DAG.getDataLayout().getAllocaAddrSpace()),
1911 PtrValueVTs);
1912
1913 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1914 DemoteReg, PtrValueVTs[0]);
1915 SDValue RetOp = getValue(I.getOperand(0));
1916
1917 SmallVector<EVT, 4> ValueVTs, MemVTs;
1918 SmallVector<uint64_t, 4> Offsets;
1919 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1920 &Offsets);
1921 unsigned NumValues = ValueVTs.size();
1922
1923 SmallVector<SDValue, 4> Chains(NumValues);
1924 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
1925 for (unsigned i = 0; i != NumValues; ++i) {
1926 // An aggregate return value cannot wrap around the address space, so
1927 // offsets to its parts don't wrap either.
1928 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
1929 TypeSize::Fixed(Offsets[i]));
1930
1931 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1932 if (MemVTs[i] != ValueVTs[i])
1933 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1934 Chains[i] = DAG.getStore(
1935 Chain, getCurSDLoc(), Val,
1936 // FIXME: better loc info would be nice.
1937 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1938 commonAlignment(BaseAlign, Offsets[i]));
1939 }
1940
1941 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1942 MVT::Other, Chains);
1943 } else if (I.getNumOperands() != 0) {
1944 SmallVector<EVT, 4> ValueVTs;
1945 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1946 unsigned NumValues = ValueVTs.size();
1947 if (NumValues) {
1948 SDValue RetOp = getValue(I.getOperand(0));
1949
1950 const Function *F = I.getParent()->getParent();
1951
1952 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1953 I.getOperand(0)->getType(), F->getCallingConv(),
1954 /*IsVarArg*/ false, DL);
1955
1956 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1957 if (F->getAttributes().hasRetAttr(Attribute::SExt))
1958 ExtendKind = ISD::SIGN_EXTEND;
1959 else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
1960 ExtendKind = ISD::ZERO_EXTEND;
1961
1962 LLVMContext &Context = F->getContext();
1963 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
1964
1965 for (unsigned j = 0; j != NumValues; ++j) {
1966 EVT VT = ValueVTs[j];
1967
1968 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1969 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1970
1971 CallingConv::ID CC = F->getCallingConv();
1972
1973 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1974 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1975 SmallVector<SDValue, 4> Parts(NumParts);
1976 getCopyToParts(DAG, getCurSDLoc(),
1977 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1978 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1979
1980 // 'inreg' on function refers to return value
1981 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1982 if (RetInReg)
1983 Flags.setInReg();
1984
1985 if (I.getOperand(0)->getType()->isPointerTy()) {
1986 Flags.setPointer();
1987 Flags.setPointerAddrSpace(
1988 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1989 }
1990
1991 if (NeedsRegBlock) {
1992 Flags.setInConsecutiveRegs();
1993 if (j == NumValues - 1)
1994 Flags.setInConsecutiveRegsLast();
1995 }
1996
1997 // Propagate extension type if any
1998 if (ExtendKind == ISD::SIGN_EXTEND)
1999 Flags.setSExt();
2000 else if (ExtendKind == ISD::ZERO_EXTEND)
2001 Flags.setZExt();
2002
2003 for (unsigned i = 0; i < NumParts; ++i) {
2004 Outs.push_back(ISD::OutputArg(Flags,
2005 Parts[i].getValueType().getSimpleVT(),
2006 VT, /*isfixed=*/true, 0, 0));
2007 OutVals.push_back(Parts[i]);
2008 }
2009 }
2010 }
2011 }
2012
2013 // Push in swifterror virtual register as the last element of Outs. This makes
2014 // sure swifterror virtual register will be returned in the swifterror
2015 // physical register.
2016 const Function *F = I.getParent()->getParent();
2017 if (TLI.supportSwiftError() &&
2018 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2019 assert(SwiftError.getFunctionArg() && "Need a swift error argument")(static_cast<void> (0));
2020 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2021 Flags.setSwiftError();
2022 Outs.push_back(ISD::OutputArg(
2023 Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2024 /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2025 // Create SDNode for the swifterror virtual register.
2026 OutVals.push_back(
2027 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2028 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2029 EVT(TLI.getPointerTy(DL))));
2030 }
2031
2032 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2033 CallingConv::ID CallConv =
2034 DAG.getMachineFunction().getFunction().getCallingConv();
2035 Chain = DAG.getTargetLoweringInfo().LowerReturn(
2036 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2037
2038 // Verify that the target's LowerReturn behaved as expected.
2039 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&(static_cast<void> (0))
2040 "LowerReturn didn't return a valid chain!")(static_cast<void> (0));
2041
2042 // Update the DAG with the new chain value resulting from return lowering.
2043 DAG.setRoot(Chain);
2044}
2045
2046/// CopyToExportRegsIfNeeded - If the given value has virtual registers
2047/// created for it, emit nodes to copy the value into the virtual
2048/// registers.
2049void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2050 // Skip empty types
2051 if (V->getType()->isEmptyTy())
2052 return;
2053
2054 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2055 if (VMI != FuncInfo.ValueMap.end()) {
2056 assert(!V->use_empty() && "Unused value assigned virtual registers!")(static_cast<void> (0));
2057 CopyValueToVirtualRegister(V, VMI->second);
2058 }
2059}
2060
2061/// ExportFromCurrentBlock - If this condition isn't known to be exported from
2062/// the current basic block, add it to ValueMap now so that we'll get a
2063/// CopyTo/FromReg.
2064void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2065 // No need to export constants.
2066 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2067
2068 // Already exported?
2069 if (FuncInfo.isExportedInst(V)) return;
2070
2071 unsigned Reg = FuncInfo.InitializeRegForValue(V);
2072 CopyValueToVirtualRegister(V, Reg);
2073}
2074
2075bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2076 const BasicBlock *FromBB) {
2077 // The operands of the setcc have to be in this block. We don't know
2078 // how to export them from some other block.
2079 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2080 // Can export from current BB.
2081 if (VI->getParent() == FromBB)
2082 return true;
2083
2084 // Is already exported, noop.
2085 return FuncInfo.isExportedInst(V);
2086 }
2087
2088 // If this is an argument, we can export it if the BB is the entry block or
2089 // if it is already exported.
2090 if (isa<Argument>(V)) {
2091 if (FromBB->isEntryBlock())
2092 return true;
2093
2094 // Otherwise, can only export this if it is already exported.
2095 return FuncInfo.isExportedInst(V);
2096 }
2097
2098 // Otherwise, constants can always be exported.
2099 return true;
2100}
2101
2102/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2103BranchProbability
2104SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2105 const MachineBasicBlock *Dst) const {
2106 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2107 const BasicBlock *SrcBB = Src->getBasicBlock();
2108 const BasicBlock *DstBB = Dst->getBasicBlock();
2109 if (!BPI) {
2110 // If BPI is not available, set the default probability as 1 / N, where N is
2111 // the number of successors.
2112 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2113 return BranchProbability(1, SuccSize);
2114 }
2115 return BPI->getEdgeProbability(SrcBB, DstBB);
2116}
2117
2118void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2119 MachineBasicBlock *Dst,
2120 BranchProbability Prob) {
2121 if (!FuncInfo.BPI)
2122 Src->addSuccessorWithoutProb(Dst);
2123 else {
2124 if (Prob.isUnknown())
2125 Prob = getEdgeProbability(Src, Dst);
2126 Src->addSuccessor(Dst, Prob);
2127 }
2128}
2129
2130static bool InBlock(const Value *V, const BasicBlock *BB) {
2131 if (const Instruction *I = dyn_cast<Instruction>(V))
2132 return I->getParent() == BB;
2133 return true;
2134}
2135
2136/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2137/// This function emits a branch and is used at the leaves of an OR or an
2138/// AND operator tree.
2139void
2140SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2141 MachineBasicBlock *TBB,
2142 MachineBasicBlock *FBB,
2143 MachineBasicBlock *CurBB,
2144 MachineBasicBlock *SwitchBB,
2145 BranchProbability TProb,
2146 BranchProbability FProb,
2147 bool InvertCond) {
2148 const BasicBlock *BB = CurBB->getBasicBlock();
2149
2150 // If the leaf of the tree is a comparison, merge the condition into
2151 // the caseblock.
2152 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2153 // The operands of the cmp have to be in this block. We don't know
2154 // how to export them from some other block. If this is the first block
2155 // of the sequence, no exporting is needed.
2156 if (CurBB == SwitchBB ||
2157 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2158 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2159 ISD::CondCode Condition;
2160 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2161 ICmpInst::Predicate Pred =
2162 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2163 Condition = getICmpCondCode(Pred);
2164 } else {
2165 const FCmpInst *FC = cast<FCmpInst>(Cond);
2166 FCmpInst::Predicate Pred =
2167 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2168 Condition = getFCmpCondCode(Pred);
2169 if (TM.Options.NoNaNsFPMath)
2170 Condition = getFCmpCodeWithoutNaN(Condition);
2171 }
2172
2173 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2174 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2175 SL->SwitchCases.push_back(CB);
2176 return;
2177 }
2178 }
2179
2180 // Create a CaseBlock record representing this branch.
2181 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2182 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2183 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2184 SL->SwitchCases.push_back(CB);
2185}
2186
2187void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2188 MachineBasicBlock *TBB,
2189 MachineBasicBlock *FBB,
2190 MachineBasicBlock *CurBB,
2191 MachineBasicBlock *SwitchBB,
2192 Instruction::BinaryOps Opc,
2193 BranchProbability TProb,
2194 BranchProbability FProb,
2195 bool InvertCond) {
2196 // Skip over not part of the tree and remember to invert op and operands at
2197 // next level.
2198 Value *NotCond;
2199 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2200 InBlock(NotCond, CurBB->getBasicBlock())) {
2201 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2202 !InvertCond);
2203 return;
2204 }
2205
2206 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2207 const Value *BOpOp0, *BOpOp1;
2208 // Compute the effective opcode for Cond, taking into account whether it needs
2209 // to be inverted, e.g.
2210 // and (not (or A, B)), C
2211 // gets lowered as
2212 // and (and (not A, not B), C)
2213 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2214 if (BOp) {
2215 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2216 ? Instruction::And
2217 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2218 ? Instruction::Or
2219 : (Instruction::BinaryOps)0);
2220 if (InvertCond) {
2221 if (BOpc == Instruction::And)
2222 BOpc = Instruction::Or;
2223 else if (BOpc == Instruction::Or)
2224 BOpc = Instruction::And;
2225 }
2226 }
2227
2228 // If this node is not part of the or/and tree, emit it as a branch.
2229 // Note that all nodes in the tree should have same opcode.
2230 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2231 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2232 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2233 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2234 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2235 TProb, FProb, InvertCond);
2236 return;
2237 }
2238
2239 // Create TmpBB after CurBB.
2240 MachineFunction::iterator BBI(CurBB);
2241 MachineFunction &MF = DAG.getMachineFunction();
2242 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2243 CurBB->getParent()->insert(++BBI, TmpBB);
2244
2245 if (Opc == Instruction::Or) {
2246 // Codegen X | Y as:
2247 // BB1:
2248 // jmp_if_X TBB
2249 // jmp TmpBB
2250 // TmpBB:
2251 // jmp_if_Y TBB
2252 // jmp FBB
2253 //
2254
2255 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2256 // The requirement is that
2257 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2258 // = TrueProb for original BB.
2259 // Assuming the original probabilities are A and B, one choice is to set
2260 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2261 // A/(1+B) and 2B/(1+B). This choice assumes that
2262 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2263 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2264 // TmpBB, but the math is more complicated.
2265
2266 auto NewTrueProb = TProb / 2;
2267 auto NewFalseProb = TProb / 2 + FProb;
2268 // Emit the LHS condition.
2269 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2270 NewFalseProb, InvertCond);
2271
2272 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2273 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2274 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2275 // Emit the RHS condition into TmpBB.
2276 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2277 Probs[1], InvertCond);
2278 } else {
2279 assert(Opc == Instruction::And && "Unknown merge op!")(static_cast<void> (0));
2280 // Codegen X & Y as:
2281 // BB1:
2282 // jmp_if_X TmpBB
2283 // jmp FBB
2284 // TmpBB:
2285 // jmp_if_Y TBB
2286 // jmp FBB
2287 //
2288 // This requires creation of TmpBB after CurBB.
2289
2290 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2291 // The requirement is that
2292 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2293 // = FalseProb for original BB.
2294 // Assuming the original probabilities are A and B, one choice is to set
2295 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2296 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2297 // TrueProb for BB1 * FalseProb for TmpBB.
2298
2299 auto NewTrueProb = TProb + FProb / 2;
2300 auto NewFalseProb = FProb / 2;
2301 // Emit the LHS condition.
2302 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2303 NewFalseProb, InvertCond);
2304
2305 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2306 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2307 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2308 // Emit the RHS condition into TmpBB.
2309 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2310 Probs[1], InvertCond);
2311 }
2312}
2313
2314/// If the set of cases should be emitted as a series of branches, return true.
2315/// If we should emit this as a bunch of and/or'd together conditions, return
2316/// false.
2317bool
2318SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2319 if (Cases.size() != 2) return true;
2320
2321 // If this is two comparisons of the same values or'd or and'd together, they
2322 // will get folded into a single comparison, so don't emit two blocks.
2323 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2324 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2325 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2326 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2327 return false;
2328 }
2329
2330 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2331 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2332 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2333 Cases[0].CC == Cases[1].CC &&
2334 isa<Constant>(Cases[0].CmpRHS) &&
2335 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2336 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2337 return false;
2338 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2339 return false;
2340 }
2341
2342 return true;
2343}
2344
2345void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2346 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2347
2348 // Update machine-CFG edges.
2349 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2350
2351 if (I.isUnconditional()) {
2352 // Update machine-CFG edges.
2353 BrMBB->addSuccessor(Succ0MBB);
2354
2355 // If this is not a fall-through branch or optimizations are switched off,
2356 // emit the branch.
2357 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2358 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2359 MVT::Other, getControlRoot(),
2360 DAG.getBasicBlock(Succ0MBB)));
2361
2362 return;
2363 }
2364
2365 // If this condition is one of the special cases we handle, do special stuff
2366 // now.
2367 const Value *CondVal = I.getCondition();
2368 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2369
2370 // If this is a series of conditions that are or'd or and'd together, emit
2371 // this as a sequence of branches instead of setcc's with and/or operations.
2372 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2373 // unpredictable branches, and vector extracts because those jumps are likely
2374 // expensive for any target), this should improve performance.
2375 // For example, instead of something like:
2376 // cmp A, B
2377 // C = seteq
2378 // cmp D, E
2379 // F = setle
2380 // or C, F
2381 // jnz foo
2382 // Emit:
2383 // cmp A, B
2384 // je foo
2385 // cmp D, E
2386 // jle foo
2387 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2388 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2389 BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2390 Value *Vec;
2391 const Value *BOp0, *BOp1;
2392 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2393 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2394 Opcode = Instruction::And;
2395 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2396 Opcode = Instruction::Or;
2397
2398 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2399 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2400 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2401 getEdgeProbability(BrMBB, Succ0MBB),
2402 getEdgeProbability(BrMBB, Succ1MBB),
2403 /*InvertCond=*/false);
2404 // If the compares in later blocks need to use values not currently
2405 // exported from this block, export them now. This block should always
2406 // be the first entry.
2407 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!")(static_cast<void> (0));
2408
2409 // Allow some cases to be rejected.
2410 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2411 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2412 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2413 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2414 }
2415
2416 // Emit the branch for this block.
2417 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2418 SL->SwitchCases.erase(SL->SwitchCases.begin());
2419 return;
2420 }
2421
2422 // Okay, we decided not to do this, remove any inserted MBB's and clear
2423 // SwitchCases.
2424 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2425 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2426
2427 SL->SwitchCases.clear();
2428 }
2429 }
2430
2431 // Create a CaseBlock record representing this branch.
2432 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2433 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2434
2435 // Use visitSwitchCase to actually insert the fast branch sequence for this
2436 // cond branch.
2437 visitSwitchCase(CB, BrMBB);
2438}
2439
2440/// visitSwitchCase - Emits the necessary code to represent a single node in
2441/// the binary search tree resulting from lowering a switch instruction.
2442void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2443 MachineBasicBlock *SwitchBB) {
2444 SDValue Cond;
2445 SDValue CondLHS = getValue(CB.CmpLHS);
2446 SDLoc dl = CB.DL;
2447
2448 if (CB.CC == ISD::SETTRUE) {
2449 // Branch or fall through to TrueBB.
2450 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2451 SwitchBB->normalizeSuccProbs();
2452 if (CB.TrueBB != NextBlock(SwitchBB)) {
2453 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2454 DAG.getBasicBlock(CB.TrueBB)));
2455 }
2456 return;
2457 }
2458
2459 auto &TLI = DAG.getTargetLoweringInfo();
2460 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2461
2462 // Build the setcc now.
2463 if (!CB.CmpMHS) {
2464 // Fold "(X == true)" to X and "(X == false)" to !X to
2465 // handle common cases produced by branch lowering.
2466 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2467 CB.CC == ISD::SETEQ)
2468 Cond = CondLHS;
2469 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2470 CB.CC == ISD::SETEQ) {
2471 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2472 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2473 } else {
2474 SDValue CondRHS = getValue(CB.CmpRHS);
2475
2476 // If a pointer's DAG type is larger than its memory type then the DAG
2477 // values are zero-extended. This breaks signed comparisons so truncate
2478 // back to the underlying type before doing the compare.
2479 if (CondLHS.getValueType() != MemVT) {
2480 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2481 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2482 }
2483 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2484 }
2485 } else {
2486 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now")(static_cast<void> (0));
2487
2488 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2489 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2490
2491 SDValue CmpOp = getValue(CB.CmpMHS);
2492 EVT VT = CmpOp.getValueType();
2493
2494 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2495 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2496 ISD::SETLE);
2497 } else {
2498 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2499 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2500 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2501 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2502 }
2503 }
2504
2505 // Update successor info
2506 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2507 // TrueBB and FalseBB are always different unless the incoming IR is
2508 // degenerate. This only happens when running llc on weird IR.
2509 if (CB.TrueBB != CB.FalseBB)
2510 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2511 SwitchBB->normalizeSuccProbs();
2512
2513 // If the lhs block is the next block, invert the condition so that we can
2514 // fall through to the lhs instead of the rhs block.
2515 if (CB.TrueBB == NextBlock(SwitchBB)) {
2516 std::swap(CB.TrueBB, CB.FalseBB);
2517 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2518 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2519 }
2520
2521 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2522 MVT::Other, getControlRoot(), Cond,
2523 DAG.getBasicBlock(CB.TrueBB));
2524
2525 // Insert the false branch. Do this even if it's a fall through branch,
2526 // this makes it easier to do DAG optimizations which require inverting
2527 // the branch condition.
2528 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2529 DAG.getBasicBlock(CB.FalseBB));
2530
2531 DAG.setRoot(BrCond);
2532}
2533
2534/// visitJumpTable - Emit JumpTable node in the current MBB
2535void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2536 // Emit the code for the jump table
2537 assert(JT.Reg != -1U && "Should lower JT Header first!")(static_cast<void> (0));
2538 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2539 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2540 JT.Reg, PTy);
2541 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2542 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2543 MVT::Other, Index.getValue(1),
2544 Table, Index);
2545 DAG.setRoot(BrJumpTable);
2546}
2547
2548/// visitJumpTableHeader - This function emits necessary code to produce index
2549/// in the JumpTable from switch case.
2550void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2551 JumpTableHeader &JTH,
2552 MachineBasicBlock *SwitchBB) {
2553 SDLoc dl = getCurSDLoc();
2554
2555 // Subtract the lowest switch case value from the value being switched on.
2556 SDValue SwitchOp = getValue(JTH.SValue);
2557 EVT VT = SwitchOp.getValueType();
2558 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2559 DAG.getConstant(JTH.First, dl, VT));
2560
2561 // The SDNode we just created, which holds the value being switched on minus
2562 // the smallest case value, needs to be copied to a virtual register so it
2563 // can be used as an index into the jump table in a subsequent basic block.
2564 // This value may be smaller or larger than the target's pointer type, and
2565 // therefore require extension or truncating.
2566 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2567 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2568
2569 unsigned JumpTableReg =
2570 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2571 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2572 JumpTableReg, SwitchOp);
2573 JT.Reg = JumpTableReg;
2574
2575 if (!JTH.OmitRangeCheck) {
2576 // Emit the range check for the jump table, and branch to the default block
2577 // for the switch statement if the value being switched on exceeds the
2578 // largest case in the switch.
2579 SDValue CMP = DAG.getSetCC(
2580 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2581 Sub.getValueType()),
2582 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2583
2584 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2585 MVT::Other, CopyTo, CMP,
2586 DAG.getBasicBlock(JT.Default));
2587
2588 // Avoid emitting unnecessary branches to the next block.
2589 if (JT.MBB != NextBlock(SwitchBB))
2590 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2591 DAG.getBasicBlock(JT.MBB));
2592
2593 DAG.setRoot(BrCond);
2594 } else {
2595 // Avoid emitting unnecessary branches to the next block.
2596 if (JT.MBB != NextBlock(SwitchBB))
2597 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2598 DAG.getBasicBlock(JT.MBB)));
2599 else
2600 DAG.setRoot(CopyTo);
2601 }
2602}
2603
2604/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2605/// variable if there exists one.
2606static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2607 SDValue &Chain) {
2608 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2609 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2610 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2611 MachineFunction &MF = DAG.getMachineFunction();
2612 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2613 MachineSDNode *Node =
2614 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2615 if (Global) {
2616 MachinePointerInfo MPInfo(Global);
2617 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2618 MachineMemOperand::MODereferenceable;
2619 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2620 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2621 DAG.setNodeMemRefs(Node, {MemRef});
2622 }
2623 if (PtrTy != PtrMemTy)
2624 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2625 return SDValue(Node, 0);
2626}
2627
2628/// Codegen a new tail for a stack protector check ParentMBB which has had its
2629/// tail spliced into a stack protector check success bb.
2630///
2631/// For a high level explanation of how this fits into the stack protector
2632/// generation see the comment on the declaration of class
2633/// StackProtectorDescriptor.
2634void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2635 MachineBasicBlock *ParentBB) {
2636
2637 // First create the loads to the guard/stack slot for the comparison.
2638 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2639 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2640 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2641
2642 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2643 int FI = MFI.getStackProtectorIndex();
2644
2645 SDValue Guard;
2646 SDLoc dl = getCurSDLoc();
2647 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2648 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2649 Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
2650
2651 // Generate code to load the content of the guard slot.
2652 SDValue GuardVal = DAG.getLoad(
2653 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2654 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2655 MachineMemOperand::MOVolatile);
2656
2657 if (TLI.useStackGuardXorFP())
2658 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2659
2660 // Retrieve guard check function, nullptr if instrumentation is inlined.
2661 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2662 // The target provides a guard check function to validate the guard value.
2663 // Generate a call to that function with the content of the guard slot as
2664 // argument.
2665 FunctionType *FnTy = GuardCheckFn->getFunctionType();
2666 assert(FnTy->getNumParams() == 1 && "Invalid function signature")(static_cast<void> (0));
2667
2668 TargetLowering::ArgListTy Args;
2669 TargetLowering::ArgListEntry Entry;
2670 Entry.Node = GuardVal;
2671 Entry.Ty = FnTy->getParamType(0);
2672 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2673 Entry.IsInReg = true;
2674 Args.push_back(Entry);
2675
2676 TargetLowering::CallLoweringInfo CLI(DAG);
2677 CLI.setDebugLoc(getCurSDLoc())
2678 .setChain(DAG.getEntryNode())
2679 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2680 getValue(GuardCheckFn), std::move(Args));
2681
2682 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2683 DAG.setRoot(Result.second);
2684 return;
2685 }
2686
2687 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2688 // Otherwise, emit a volatile load to retrieve the stack guard value.
2689 SDValue Chain = DAG.getEntryNode();
2690 if (TLI.useLoadStackGuardNode()) {
2691 Guard = getLoadStackGuard(DAG, dl, Chain);
2692 } else {
2693 const Value *IRGuard = TLI.getSDagStackGuard(M);
2694 SDValue GuardPtr = getValue(IRGuard);
2695
2696 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2697 MachinePointerInfo(IRGuard, 0), Align,
2698 MachineMemOperand::MOVolatile);
2699 }
2700
2701 // Perform the comparison via a getsetcc.
2702 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2703 *DAG.getContext(),
2704 Guard.getValueType()),
2705 Guard, GuardVal, ISD::SETNE);
2706
2707 // If the guard/stackslot do not equal, branch to failure MBB.
2708 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2709 MVT::Other, GuardVal.getOperand(0),
2710 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2711 // Otherwise branch to success MBB.
2712 SDValue Br = DAG.getNode(ISD::BR, dl,
2713 MVT::Other, BrCond,
2714 DAG.getBasicBlock(SPD.getSuccessMBB()));
2715
2716 DAG.setRoot(Br);
2717}
2718
2719/// Codegen the failure basic block for a stack protector check.
2720///
2721/// A failure stack protector machine basic block consists simply of a call to
2722/// __stack_chk_fail().
2723///
2724/// For a high level explanation of how this fits into the stack protector
2725/// generation see the comment on the declaration of class
2726/// StackProtectorDescriptor.
2727void
2728SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2729 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2730 TargetLowering::MakeLibCallOptions CallOptions;
2731 CallOptions.setDiscardResult(true);
2732 SDValue Chain =
2733 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2734 None, CallOptions, getCurSDLoc()).second;
2735 // On PS4, the "return address" must still be within the calling function,
2736 // even if it's at the very end, so emit an explicit TRAP here.
2737 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2738 if (TM.getTargetTriple().isPS4CPU())
2739 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2740 // WebAssembly needs an unreachable instruction after a non-returning call,
2741 // because the function return type can be different from __stack_chk_fail's
2742 // return type (void).
2743 if (TM.getTargetTriple().isWasm())
2744 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2745
2746 DAG.setRoot(Chain);
2747}
2748
2749/// visitBitTestHeader - This function emits necessary code to produce value
2750/// suitable for "bit tests"
2751void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2752 MachineBasicBlock *SwitchBB) {
2753 SDLoc dl = getCurSDLoc();
2754
2755 // Subtract the minimum value.
2756 SDValue SwitchOp = getValue(B.SValue);
2757 EVT VT = SwitchOp.getValueType();
2758 SDValue RangeSub =
2759 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2760
2761 // Determine the type of the test operands.
2762 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2763 bool UsePtrType = false;
2764 if (!TLI.isTypeLegal(VT)) {
2765 UsePtrType = true;
2766 } else {
2767 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2768 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2769 // Switch table case range are encoded into series of masks.
2770 // Just use pointer type, it's guaranteed to fit.
2771 UsePtrType = true;
2772 break;
2773 }
2774 }
2775 SDValue Sub = RangeSub;
2776 if (UsePtrType) {
2777 VT = TLI.getPointerTy(DAG.getDataLayout());
2778 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2779 }
2780
2781 B.RegVT = VT.getSimpleVT();
2782 B.Reg = FuncInfo.CreateReg(B.RegVT);
2783 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2784
2785 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2786
2787 if (!B.OmitRangeCheck)
2788 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2789 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2790 SwitchBB->normalizeSuccProbs();
2791
2792 SDValue Root = CopyTo;
2793 if (!B.OmitRangeCheck) {
2794 // Conditional branch to the default block.
2795 SDValue RangeCmp = DAG.getSetCC(dl,
2796 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2797 RangeSub.getValueType()),
2798 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2799 ISD::SETUGT);
2800
2801 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2802 DAG.getBasicBlock(B.Default));
2803 }
2804
2805 // Avoid emitting unnecessary branches to the next block.
2806 if (MBB != NextBlock(SwitchBB))
2807 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2808
2809 DAG.setRoot(Root);
2810}
2811
2812/// visitBitTestCase - this function produces one "bit test"
2813void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2814 MachineBasicBlock* NextMBB,
2815 BranchProbability BranchProbToNext,
2816 unsigned Reg,
2817 BitTestCase &B,
2818 MachineBasicBlock *SwitchBB) {
2819 SDLoc dl = getCurSDLoc();
2820 MVT VT = BB.RegVT;
2821 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2822 SDValue Cmp;
2823 unsigned PopCount = countPopulation(B.Mask);
2824 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2825 if (PopCount == 1) {
2826 // Testing for a single bit; just compare the shift count with what it
2827 // would need to be to shift a 1 bit in that position.
2828 Cmp = DAG.getSetCC(
2829 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2830 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2831 ISD::SETEQ);
2832 } else if (PopCount == BB.Range) {
2833 // There is only one zero bit in the range, test for it directly.
2834 Cmp = DAG.getSetCC(
2835 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2836 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2837 ISD::SETNE);
2838 } else {
2839 // Make desired shift
2840 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2841 DAG.getConstant(1, dl, VT), ShiftOp);
2842
2843 // Emit bit tests and jumps
2844 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2845 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2846 Cmp = DAG.getSetCC(
2847 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2848 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2849 }
2850
2851 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2852 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2853 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2854 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2855 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2856 // one as they are relative probabilities (and thus work more like weights),
2857 // and hence we need to normalize them to let the sum of them become one.
2858 SwitchBB->normalizeSuccProbs();
2859
2860 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2861 MVT::Other, getControlRoot(),
2862 Cmp, DAG.getBasicBlock(B.TargetBB));
2863
2864 // Avoid emitting unnecessary branches to the next block.
2865 if (NextMBB != NextBlock(SwitchBB))
2866 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2867 DAG.getBasicBlock(NextMBB));
2868
2869 DAG.setRoot(BrAnd);
2870}
2871
2872void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2873 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2874
2875 // Retrieve successors. Look through artificial IR level blocks like
2876 // catchswitch for successors.
2877 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2878 const BasicBlock *EHPadBB = I.getSuccessor(1);
2879
2880 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2881 // have to do anything here to lower funclet bundles.
2882 assert(!I.hasOperandBundlesOtherThan((static_cast<void> (0))
2883 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,(static_cast<void> (0))
2884 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,(static_cast<void> (0))
2885 LLVMContext::OB_cfguardtarget,(static_cast<void> (0))
2886 LLVMContext::OB_clang_arc_attachedcall}) &&(static_cast<void> (0))
2887 "Cannot lower invokes with arbitrary operand bundles yet!")(static_cast<void> (0));
2888
2889 const Value *Callee(I.getCalledOperand());
2890 const Function *Fn = dyn_cast<Function>(Callee);
2891 if (isa<InlineAsm>(Callee))
2892 visitInlineAsm(I, EHPadBB);
2893 else if (Fn && Fn->isIntrinsic()) {
2894 switch (Fn->getIntrinsicID()) {
2895 default:
2896 llvm_unreachable("Cannot invoke this intrinsic")__builtin_unreachable();
2897 case Intrinsic::donothing:
2898 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2899 case Intrinsic::seh_try_begin:
2900 case Intrinsic::seh_scope_begin:
2901 case Intrinsic::seh_try_end:
2902 case Intrinsic::seh_scope_end:
2903 break;
2904 case Intrinsic::experimental_patchpoint_void:
2905 case Intrinsic::experimental_patchpoint_i64:
2906 visitPatchpoint(I, EHPadBB);
2907 break;
2908 case Intrinsic::experimental_gc_statepoint:
2909 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
2910 break;
2911 case Intrinsic::wasm_rethrow: {
2912 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2913 // special because it can be invoked, so we manually lower it to a DAG
2914 // node here.
2915 SmallVector<SDValue, 8> Ops;
2916 Ops.push_back(getRoot()); // inchain
2917 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2918 Ops.push_back(
2919 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
2920 TLI.getPointerTy(DAG.getDataLayout())));
2921 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2922 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2923 break;
2924 }
2925 }
2926 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2927 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2928 // Eventually we will support lowering the @llvm.experimental.deoptimize
2929 // intrinsic, and right now there are no plans to support other intrinsics
2930 // with deopt state.
2931 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2932 } else {
2933 LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
2934 }
2935
2936 // If the value of the invoke is used outside of its defining block, make it
2937 // available as a virtual register.
2938 // We already took care of the exported value for the statepoint instruction
2939 // during call to the LowerStatepoint.
2940 if (!isa<GCStatepointInst>(I)) {
2941 CopyToExportRegsIfNeeded(&I);
2942 }
2943
2944 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2945 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2946 BranchProbability EHPadBBProb =
2947 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2948 : BranchProbability::getZero();
2949 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2950
2951 // Update successor info.
2952 addSuccessorWithProb(InvokeMBB, Return);
2953 for (auto &UnwindDest : UnwindDests) {
2954 UnwindDest.first->setIsEHPad();
2955 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2956 }
2957 InvokeMBB->normalizeSuccProbs();
2958
2959 // Drop into normal successor.
2960 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2961 DAG.getBasicBlock(Return)));
2962}
2963
2964void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2965 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2966
2967 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2968 // have to do anything here to lower funclet bundles.
2969 assert(!I.hasOperandBundlesOtherThan((static_cast<void> (0))
2970 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&(static_cast<void> (0))
2971 "Cannot lower callbrs with arbitrary operand bundles yet!")(static_cast<void> (0));
2972
2973 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr")(static_cast<void> (0));
2974 visitInlineAsm(I);
2975 CopyToExportRegsIfNeeded(&I);
2976
2977 // Retrieve successors.
2978 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2979
2980 // Update successor info.
2981 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
2982 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2983 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2984 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
2985 Target->setIsInlineAsmBrIndirectTarget();
2986 }
2987 CallBrMBB->normalizeSuccProbs();
2988
2989 // Drop into default successor.
2990 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2991 MVT::Other, getControlRoot(),
2992 DAG.getBasicBlock(Return)));
2993}
2994
2995void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2996 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!")__builtin_unreachable();
2997}
2998
2999void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3000 assert(FuncInfo.MBB->isEHPad() &&(static_cast<void> (0))
3001 "Call to landingpad not in landing pad!")(static_cast<void> (0));
3002
3003 // If there aren't registers to copy the values into (e.g., during SjLj
3004 // exceptions), then don't bother to create these DAG nodes.
3005 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3006 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3007 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3008 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3009 return;
3010
3011 // If landingpad's return type is token type, we don't create DAG nodes
3012 // for its exception pointer and selector value. The extraction of exception
3013 // pointer or selector value from token type landingpads is not currently
3014 // supported.
3015 if (LP.getType()->isTokenTy())
3016 return;
3017
3018 SmallVector<EVT, 2> ValueVTs;
3019 SDLoc dl = getCurSDLoc();
3020 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3021 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported")(static_cast<void> (0));
3022
3023 // Get the two live-in registers as SDValues. The physregs have already been
3024 // copied into virtual registers.
3025 SDValue Ops[2];
3026 if (FuncInfo.ExceptionPointerVirtReg) {
3027 Ops[0] = DAG.getZExtOrTrunc(
3028 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3029 FuncInfo.ExceptionPointerVirtReg,
3030 TLI.getPointerTy(DAG.getDataLayout())),
3031 dl, ValueVTs[0]);
3032 } else {
3033 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3034 }
3035 Ops[1] = DAG.getZExtOrTrunc(
3036 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3037 FuncInfo.ExceptionSelectorVirtReg,
3038 TLI.getPointerTy(DAG.getDataLayout())),
3039 dl, ValueVTs[1]);
3040
3041 // Merge into one.
3042 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3043 DAG.getVTList(ValueVTs), Ops);
3044 setValue(&LP, Res);
3045}
3046
3047void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3048 MachineBasicBlock *Last) {
3049 // Update JTCases.
3050 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
3051 if (SL->JTCases[i].first.HeaderBB == First)
3052 SL->JTCases[i].first.HeaderBB = Last;
3053
3054 // Update BitTestCases.
3055 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
3056 if (SL->BitTestCases[i].Parent == First)
3057 SL->BitTestCases[i].Parent = Last;
3058}
3059
3060void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3061 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3062
3063 // Update machine-CFG edges with unique successors.
3064 SmallSet<BasicBlock*, 32> Done;
3065 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3066 BasicBlock *BB = I.getSuccessor(i);
3067 bool Inserted = Done.insert(BB).second;
3068 if (!Inserted)
3069 continue;
3070
3071 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3072 addSuccessorWithProb(IndirectBrMBB, Succ);
3073 }
3074 IndirectBrMBB->normalizeSuccProbs();
3075
3076 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3077 MVT::Other, getControlRoot(),
3078 getValue(I.getAddress())));
3079}
3080
3081void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3082 if (!DAG.getTarget().Options.TrapUnreachable)
3083 return;
3084
3085 // We may be able to ignore unreachable behind a noreturn call.
3086 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3087 const BasicBlock &BB = *I.getParent();
3088 if (&I != &BB.front()) {
3089 BasicBlock::const_iterator PredI =
3090 std::prev(BasicBlock::const_iterator(&I));
3091 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
3092 if (Call->doesNotReturn())
3093 return;
3094 }
3095 }
3096 }
3097
3098 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3099}
3100
3101void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3102 SDNodeFlags Flags;
3103
3104 SDValue Op = getValue(I.getOperand(0));
3105 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3106 Op, Flags);
3107 setValue(&I, UnNodeValue);
3108}
3109
3110void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3111 SDNodeFlags Flags;
3112 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3113 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3114 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3115 }
3116 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3117 Flags.setExact(ExactOp->isExact());
3118 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3119 Flags.copyFMF(*FPOp);
3120
3121 SDValue Op1 = getValue(I.getOperand(0));
3122 SDValue Op2 = getValue(I.getOperand(1));
3123 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3124 Op1, Op2, Flags);
3125 setValue(&I, BinNodeValue);
3126}
3127
3128void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3129 SDValue Op1 = getValue(I.getOperand(0));
3130 SDValue Op2 = getValue(I.getOperand(1));
3131
3132 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3133 Op1.getValueType(), DAG.getDataLayout());
3134
3135 // Coerce the shift amount to the right type if we can.
3136 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3137 unsigned ShiftSize = ShiftTy.getSizeInBits();
3138 unsigned Op2Size = Op2.getValueSizeInBits();
3139 SDLoc DL = getCurSDLoc();
3140
3141 // If the operand is smaller than the shift count type, promote it.
3142 if (ShiftSize > Op2Size)
3143 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3144
3145 // If the operand is larger than the shift count type but the shift
3146 // count type has enough bits to represent any shift value, truncate
3147 // it now. This is a common case and it exposes the truncate to
3148 // optimization early.
3149 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3150 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3151 // Otherwise we'll need to temporarily settle for some other convenient
3152 // type. Type legalization will make adjustments once the shiftee is split.
3153 else
3154 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3155 }
3156
3157 bool nuw = false;
3158 bool nsw = false;
3159 bool exact = false;
3160
3161 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3162
3163 if (const OverflowingBinaryOperator *OFBinOp =
3164 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3165 nuw = OFBinOp->hasNoUnsignedWrap();
3166 nsw = OFBinOp->hasNoSignedWrap();
3167 }
3168 if (const PossiblyExactOperator *ExactOp =
3169 dyn_cast<const PossiblyExactOperator>(&I))
3170 exact = ExactOp->isExact();
3171 }
3172 SDNodeFlags Flags;
3173 Flags.setExact(exact);
3174 Flags.setNoSignedWrap(nsw);
3175 Flags.setNoUnsignedWrap(nuw);
3176 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3177 Flags);
3178 setValue(&I, Res);
3179}
3180
3181void SelectionDAGBuilder::visitSDiv(const User &I) {
3182 SDValue Op1 = getValue(I.getOperand(0));
3183 SDValue Op2 = getValue(I.getOperand(1));
3184
3185 SDNodeFlags Flags;
3186 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3187 cast<PossiblyExactOperator>(&I)->isExact());
3188 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3189 Op2, Flags));
3190}
3191
3192void SelectionDAGBuilder::visitICmp(const User &I) {
3193 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3194 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3195 predicate = IC->getPredicate();
3196 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3197 predicate = ICmpInst::Predicate(IC->getPredicate());
3198 SDValue Op1 = getValue(I.getOperand(0));
3199 SDValue Op2 = getValue(I.getOperand(1));
3200 ISD::CondCode Opcode = getICmpCondCode(predicate);
3201
3202 auto &TLI = DAG.getTargetLoweringInfo();
3203 EVT MemVT =
3204 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3205
3206 // If a pointer's DAG type is larger than its memory type then the DAG values
3207 // are zero-extended. This breaks signed comparisons so truncate back to the
3208 // underlying type before doing the compare.
3209 if (Op1.getValueType() != MemVT) {
3210 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3211 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3212 }
3213
3214 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3215 I.getType());
3216 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3217}
3218
3219void SelectionDAGBuilder::visitFCmp(const User &I) {
3220 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3221 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3222 predicate = FC->getPredicate();
3223 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3224 predicate = FCmpInst::Predicate(FC->getPredicate());
3225 SDValue Op1 = getValue(I.getOperand(0));
3226 SDValue Op2 = getValue(I.getOperand(1));
3227
3228 ISD::CondCode Condition = getFCmpCondCode(predicate);
3229 auto *FPMO = cast<FPMathOperator>(&I);
3230 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3231 Condition = getFCmpCodeWithoutNaN(Condition);
3232
3233 SDNodeFlags Flags;
3234 Flags.copyFMF(*FPMO);
3235 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3236
3237 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3238 I.getType());
3239 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3240}
3241
3242// Check if the condition of the select has one use or two users that are both
3243// selects with the same condition.
3244static bool hasOnlySelectUsers(const Value *Cond) {
3245 return llvm::all_of(Cond->users(), [](const Value *V) {
3246 return isa<SelectInst>(V);
3247 });
3248}
3249
3250void SelectionDAGBuilder::visitSelect(const User &I) {
3251 SmallVector<EVT, 4> ValueVTs;
3252 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3253 ValueVTs);
3254 unsigned NumValues = ValueVTs.size();
3255 if (NumValues == 0) return;
3256
3257 SmallVector<SDValue, 4> Values(NumValues);
3258 SDValue Cond = getValue(I.getOperand(0));
3259 SDValue LHSVal = getValue(I.getOperand(1));
3260 SDValue RHSVal = getValue(I.getOperand(2));
3261 SmallVector<SDValue, 1> BaseOps(1, Cond);
3262 ISD::NodeType OpCode =
3263 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3264
3265 bool IsUnaryAbs = false;
3266 bool Negate = false;
3267
3268 SDNodeFlags Flags;
3269 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3270 Flags.copyFMF(*FPOp);
3271
3272 // Min/max matching is only viable if all output VTs are the same.
3273 if (is_splat(ValueVTs)) {
3274 EVT VT = ValueVTs[0];
3275 LLVMContext &Ctx = *DAG.getContext();
3276 auto &TLI = DAG.getTargetLoweringInfo();
3277
3278 // We care about the legality of the operation after it has been type
3279 // legalized.
3280 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3281 VT = TLI.getTypeToTransformTo(Ctx, VT);
3282
3283 // If the vselect is legal, assume we want to leave this as a vector setcc +
3284 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3285 // min/max is legal on the scalar type.
3286 bool UseScalarMinMax = VT.isVector() &&
3287 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3288
3289 Value *LHS, *RHS;
3290 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3291 ISD::NodeType Opc = ISD::DELETED_NODE;
3292 switch (SPR.Flavor) {
3293 case SPF_UMAX: Opc = ISD::UMAX; break;
3294 case SPF_UMIN: Opc = ISD::UMIN; break;
3295 case SPF_SMAX: Opc = ISD::SMAX; break;
3296 case SPF_SMIN: Opc = ISD::SMIN; break;
3297 case SPF_FMINNUM:
3298 switch (SPR.NaNBehavior) {
3299 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")__builtin_unreachable();
3300 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
3301 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3302 case SPNB_RETURNS_ANY: {
3303 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3304 Opc = ISD::FMINNUM;
3305 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3306 Opc = ISD::FMINIMUM;
3307 else if (UseScalarMinMax)
3308 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3309 ISD::FMINNUM : ISD::FMINIMUM;
3310 break;
3311 }
3312 }
3313 break;
3314 case SPF_FMAXNUM:
3315 switch (SPR.NaNBehavior) {
3316 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")__builtin_unreachable();
3317 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3318 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3319 case SPNB_RETURNS_ANY:
3320
3321 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3322 Opc = ISD::FMAXNUM;
3323 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3324 Opc = ISD::FMAXIMUM;
3325 else if (UseScalarMinMax)
3326 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3327 ISD::FMAXNUM : ISD::FMAXIMUM;
3328 break;
3329 }
3330 break;
3331 case SPF_NABS:
3332 Negate = true;
3333 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3334 case SPF_ABS:
3335 IsUnaryAbs = true;
3336 Opc = ISD::ABS;
3337 break;
3338 default: break;
3339 }
3340
3341 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3342 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3343 (UseScalarMinMax &&
3344 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3345 // If the underlying comparison instruction is used by any other
3346 // instruction, the consumed instructions won't be destroyed, so it is
3347 // not profitable to convert to a min/max.
3348 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3349 OpCode = Opc;
3350 LHSVal = getValue(LHS);
3351 RHSVal = getValue(RHS);
3352 BaseOps.clear();
3353 }
3354
3355 if (IsUnaryAbs) {
3356 OpCode = Opc;
3357 LHSVal = getValue(LHS);
3358 BaseOps.clear();
3359 }
3360 }
3361
3362 if (IsUnaryAbs) {
3363 for (unsigned i = 0; i != NumValues; ++i) {
3364 SDLoc dl = getCurSDLoc();
3365 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3366 Values[i] =
3367 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3368 if (Negate)
3369 Values[i] = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
3370 Values[i]);
3371 }
3372 } else {
3373 for (unsigned i = 0; i != NumValues; ++i) {
3374 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3375 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3376 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3377 Values[i] = DAG.getNode(
3378 OpCode, getCurSDLoc(),
3379 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3380 }
3381 }
3382
3383 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3384 DAG.getVTList(ValueVTs), Values));
3385}
3386
3387void SelectionDAGBuilder::visitTrunc(const User &I) {
3388 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3389 SDValue N = getValue(I.getOperand(0));
3390 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3391 I.getType());
3392 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3393}
3394
3395void SelectionDAGBuilder::visitZExt(const User &I) {
3396 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3397 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3398 SDValue N = getValue(I.getOperand(0));
3399 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3400 I.getType());
3401 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3402}
3403
3404void SelectionDAGBuilder::visitSExt(const User &I) {
3405 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3406 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3407 SDValue N = getValue(I.getOperand(0));
3408 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3409 I.getType());
3410 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3411}
3412
3413void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3414 // FPTrunc is never a no-op cast, no need to check
3415 SDValue N = getValue(I.getOperand(0));
3416 SDLoc dl = getCurSDLoc();
3417 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3418 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3419 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3420 DAG.getTargetConstant(
3421 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3422}
3423
3424void SelectionDAGBuilder::visitFPExt(const User &I) {
3425 // FPExt is never a no-op cast, no need to check
3426 SDValue N = getValue(I.getOperand(0));
3427 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3428 I.getType());
3429 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3430}
3431
3432void SelectionDAGBuilder::visitFPToUI(const User &I) {
3433 // FPToUI is never a no-op cast, no need to check
3434 SDValue N = getValue(I.getOperand(0));
3435 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3436 I.getType());
3437 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3438}
3439
3440void SelectionDAGBuilder::visitFPToSI(const User &I) {
3441 // FPToSI is never a no-op cast, no need to check
3442 SDValue N = getValue(I.getOperand(0));
3443 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3444 I.getType());
3445 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3446}
3447
3448void SelectionDAGBuilder::visitUIToFP(const User &I) {
3449 // UIToFP is never a no-op cast, no need to check
3450 SDValue N = getValue(I.getOperand(0));
3451 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3452 I.getType());
3453 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3454}
3455
3456void SelectionDAGBuilder::visitSIToFP(const User &I) {
3457 // SIToFP is never a no-op cast, no need to check
3458 SDValue N = getValue(I.getOperand(0));
3459 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3460 I.getType());
3461 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3462}
3463
3464void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3465 // What to do depends on the size of the integer and the size of the pointer.
3466 // We can either truncate, zero extend, or no-op, accordingly.
3467 SDValue N = getValue(I.getOperand(0));
3468 auto &TLI = DAG.getTargetLoweringInfo();
3469 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3470 I.getType());
3471 EVT PtrMemVT =
3472 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3473 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3474 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3475 setValue(&I, N);
3476}
3477
3478void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3479 // What to do depends on the size of the integer and the size of the pointer.
3480 // We can either truncate, zero extend, or no-op, accordingly.
3481 SDValue N = getValue(I.getOperand(0));
3482 auto &TLI = DAG.getTargetLoweringInfo();
3483 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3484 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3485 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3486 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3487 setValue(&I, N);
3488}
3489
3490void SelectionDAGBuilder::visitBitCast(const User &I) {
3491 SDValue N = getValue(I.getOperand(0));
3492 SDLoc dl = getCurSDLoc();
3493 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3494 I.getType());
3495
3496 // BitCast assures us that source and destination are the same size so this is
3497 // either a BITCAST or a no-op.
3498 if (DestVT != N.getValueType())
3499 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3500 DestVT, N)); // convert types.
3501 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3502 // might fold any kind of constant expression to an integer constant and that
3503 // is not what we are looking for. Only recognize a bitcast of a genuine
3504 // constant integer as an opaque constant.
3505 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3506 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3507 /*isOpaque*/true));
3508 else
3509 setValue(&I, N); // noop cast.
3510}
3511
3512void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3513 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3514 const Value *SV = I.getOperand(0);
3515 SDValue N = getValue(SV);
3516 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3517
3518 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3519 unsigned DestAS = I.getType()->getPointerAddressSpace();
3520
3521 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3522 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3523
3524 setValue(&I, N);
3525}
3526
3527void SelectionDAGBuilder::visitInsertElement(const User &I) {
3528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3529 SDValue InVec = getValue(I.getOperand(0));
3530 SDValue InVal = getValue(I.getOperand(1));
3531 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3532 TLI.getVectorIdxTy(DAG.getDataLayout()));
3533 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3534 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3535 InVec, InVal, InIdx));
3536}
3537
3538void SelectionDAGBuilder::visitExtractElement(const User &I) {
3539 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3540 SDValue InVec = getValue(I.getOperand(0));
3541 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3542 TLI.getVectorIdxTy(DAG.getDataLayout()));
3543 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3544 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3545 InVec, InIdx));
3546}
3547
3548void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3549 SDValue Src1 = getValue(I.getOperand(0));
3550 SDValue Src2 = getValue(I.getOperand(1));
3551 ArrayRef<int> Mask;
3552 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3553 Mask = SVI->getShuffleMask();
3554 else
3555 Mask = cast<ConstantExpr>(I).getShuffleMask();
3556 SDLoc DL = getCurSDLoc();
3557 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3558 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3559 EVT SrcVT = Src1.getValueType();
3560
3561 if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3562 VT.isScalableVector()) {
3563 // Canonical splat form of first element of first input vector.
3564 SDValue FirstElt =
3565 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3566 DAG.getVectorIdxConstant(0, DL));
3567 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3568 return;
3569 }
3570
3571 // For now, we only handle splats for scalable vectors.
3572 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3573 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3574 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle")(static_cast<void> (0));
3575
3576 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3577 unsigned MaskNumElts = Mask.size();
3578
3579 if (SrcNumElts == MaskNumElts) {
3580 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3581 return;
3582 }
3583
3584 // Normalize the shuffle vector since mask and vector length don't match.
3585 if (SrcNumElts < MaskNumElts) {
3586 // Mask is longer than the source vectors. We can use concatenate vector to
3587 // make the mask and vectors lengths match.
3588
3589 if (MaskNumElts % SrcNumElts == 0) {
3590 // Mask length is a multiple of the source vector length.
3591 // Check if the shuffle is some kind of concatenation of the input
3592 // vectors.
3593 unsigned NumConcat = MaskNumElts / SrcNumElts;
3594 bool IsConcat = true;
3595 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3596 for (unsigned i = 0; i != MaskNumElts; ++i) {
3597 int Idx = Mask[i];
3598 if (Idx < 0)
3599 continue;
3600 // Ensure the indices in each SrcVT sized piece are sequential and that
3601 // the same source is used for the whole piece.
3602 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3603 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3604 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3605 IsConcat = false;
3606 break;
3607 }
3608 // Remember which source this index came from.
3609 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3610 }
3611
3612 // The shuffle is concatenating multiple vectors together. Just emit
3613 // a CONCAT_VECTORS operation.
3614 if (IsConcat) {
3615 SmallVector<SDValue, 8> ConcatOps;
3616 for (auto Src : ConcatSrcs) {
3617 if (Src < 0)
3618 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3619 else if (Src == 0)
3620 ConcatOps.push_back(Src1);
3621 else
3622 ConcatOps.push_back(Src2);
3623 }
3624 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3625 return;
3626 }
3627 }
3628
3629 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3630 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3631 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3632 PaddedMaskNumElts);
3633
3634 // Pad both vectors with undefs to make them the same length as the mask.
3635 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3636
3637 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3638 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3639 MOps1[0] = Src1;
3640 MOps2[0] = Src2;
3641
3642 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3643 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3644
3645 // Readjust mask for new input vector length.
3646 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3647 for (unsigned i = 0; i != MaskNumElts; ++i) {
3648 int Idx = Mask[i];
3649 if (Idx >= (int)SrcNumElts)
3650 Idx -= SrcNumElts - PaddedMaskNumElts;
3651 MappedOps[i] = Idx;
3652 }
3653
3654 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3655
3656 // If the concatenated vector was padded, extract a subvector with the
3657 // correct number of elements.
3658 if (MaskNumElts != PaddedMaskNumElts)
3659 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3660 DAG.getVectorIdxConstant(0, DL));
3661
3662 setValue(&I, Result);
3663 return;
3664 }
3665
3666 if (SrcNumElts > MaskNumElts) {
3667 // Analyze the access pattern of the vector to see if we can extract
3668 // two subvectors and do the shuffle.
3669 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3670 bool CanExtract = true;
3671 for (int Idx : Mask) {
3672 unsigned Input = 0;
3673 if (Idx < 0)
3674 continue;
3675
3676 if (Idx >= (int)SrcNumElts) {
3677 Input = 1;
3678 Idx -= SrcNumElts;
3679 }
3680
3681 // If all the indices come from the same MaskNumElts sized portion of
3682 // the sources we can use extract. Also make sure the extract wouldn't
3683 // extract past the end of the source.
3684 int NewStartIdx = alignDown(Idx, MaskNumElts);
3685 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3686 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3687 CanExtract = false;
3688 // Make sure we always update StartIdx as we use it to track if all
3689 // elements are undef.
3690 StartIdx[Input] = NewStartIdx;
3691 }
3692
3693 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3694 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3695 return;
3696 }
3697 if (CanExtract) {
3698 // Extract appropriate subvector and generate a vector shuffle
3699 for (unsigned Input = 0; Input < 2; ++Input) {
3700 SDValue &Src = Input == 0 ? Src1 : Src2;
3701 if (StartIdx[Input] < 0)
3702 Src = DAG.getUNDEF(VT);
3703 else {
3704 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3705 DAG.getVectorIdxConstant(StartIdx[Input], DL));
3706 }
3707 }
3708
3709 // Calculate new mask.
3710 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3711 for (int &Idx : MappedOps) {
3712 if (Idx >= (int)SrcNumElts)
3713 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3714 else if (Idx >= 0)
3715 Idx -= StartIdx[0];
3716 }
3717
3718 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3719 return;
3720 }
3721 }
3722
3723 // We can't use either concat vectors or extract subvectors so fall back to
3724 // replacing the shuffle with extract and build vector.
3725 // to insert and build vector.
3726 EVT EltVT = VT.getVectorElementType();
3727 SmallVector<SDValue,8> Ops;
3728 for (int Idx : Mask) {
3729 SDValue Res;
3730
3731 if (Idx < 0) {
3732 Res = DAG.getUNDEF(EltVT);
3733 } else {
3734 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3735 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3736
3737 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3738 DAG.getVectorIdxConstant(Idx, DL));
3739 }
3740
3741 Ops.push_back(Res);
3742 }
3743
3744 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3745}
3746
3747void SelectionDAGBuilder::visitInsertValue(const User &I) {
3748 ArrayRef<unsigned> Indices;
3749 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3750 Indices = IV->getIndices();
3751 else
3752 Indices = cast<ConstantExpr>(&I)->getIndices();
3753
3754 const Value *Op0 = I.getOperand(0);
3755 const Value *Op1 = I.getOperand(1);
3756 Type *AggTy = I.getType();
3757 Type *ValTy = Op1->getType();
3758 bool IntoUndef = isa<UndefValue>(Op0);
3759 bool FromUndef = isa<UndefValue>(Op1);
3760
3761 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3762
3763 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3764 SmallVector<EVT, 4> AggValueVTs;
3765 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3766 SmallVector<EVT, 4> ValValueVTs;
3767 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3768
3769 unsigned NumAggValues = AggValueVTs.size();
3770 unsigned NumValValues = ValValueVTs.size();
3771 SmallVector<SDValue, 4> Values(NumAggValues);
3772
3773 // Ignore an insertvalue that produces an empty object
3774 if (!NumAggValues) {
3775 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3776 return;
3777 }
3778
3779 SDValue Agg = getValue(Op0);
3780 unsigned i = 0;
3781 // Copy the beginning value(s) from the original aggregate.
3782 for (; i != LinearIndex; ++i)
3783 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3784 SDValue(Agg.getNode(), Agg.getResNo() + i);
3785 // Copy values from the inserted value(s).
3786 if (NumValValues) {
3787 SDValue Val = getValue(Op1);
3788 for (; i != LinearIndex + NumValValues; ++i)
3789 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3790 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3791 }
3792 // Copy remaining value(s) from the original aggregate.
3793 for (; i != NumAggValues; ++i)
3794 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3795 SDValue(Agg.getNode(), Agg.getResNo() + i);
3796
3797 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3798 DAG.getVTList(AggValueVTs), Values));
3799}
3800
3801void SelectionDAGBuilder::visitExtractValue(const User &I) {
3802 ArrayRef<unsigned> Indices;
3803 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3804 Indices = EV->getIndices();
3805 else
3806 Indices = cast<ConstantExpr>(&I)->getIndices();
3807
3808 const Value *Op0 = I.getOperand(0);
3809 Type *AggTy = Op0->getType();
3810 Type *ValTy = I.getType();
3811 bool OutOfUndef = isa<UndefValue>(Op0);
3812
3813 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3814
3815 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3816 SmallVector<EVT, 4> ValValueVTs;
3817 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3818
3819 unsigned NumValValues = ValValueVTs.size();
3820
3821 // Ignore a extractvalue that produces an empty object
3822 if (!NumValValues) {
3823 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3824 return;
3825 }
3826
3827 SmallVector<SDValue, 4> Values(NumValValues);
3828
3829 SDValue Agg = getValue(Op0);
3830 // Copy out the selected value(s).
3831 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3832 Values[i - LinearIndex] =
3833 OutOfUndef ?
3834 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3835 SDValue(Agg.getNode(), Agg.getResNo() + i);
3836
3837 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3838 DAG.getVTList(ValValueVTs), Values));
3839}
3840
3841void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3842 Value *Op0 = I.getOperand(0);
3843 // Note that the pointer operand may be a vector of pointers. Take the scalar
3844 // element which holds a pointer.
3845 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3846 SDValue N = getValue(Op0);
3847 SDLoc dl = getCurSDLoc();
3848 auto &TLI = DAG.getTargetLoweringInfo();
3849
3850 // Normalize Vector GEP - all scalar operands should be converted to the
3851 // splat vector.
3852 bool IsVectorGEP = I.getType()->isVectorTy();
3853 ElementCount VectorElementCount =
3854 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3855 : ElementCount::getFixed(0);
3856
3857 if (IsVectorGEP && !N.getValueType().isVector()) {
3858 LLVMContext &Context = *DAG.getContext();
3859 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3860 if (VectorElementCount.isScalable())
3861 N = DAG.getSplatVector(VT, dl, N);
3862 else
3863 N = DAG.getSplatBuildVector(VT, dl, N);
3864 }
3865
3866 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3867 GTI != E; ++GTI) {
3868 const Value *Idx = GTI.getOperand();
3869 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3870 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3871 if (Field) {
3872 // N = N + Offset
3873 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3874
3875 // In an inbounds GEP with an offset that is nonnegative even when
3876 // interpreted as signed, assume there is no unsigned overflow.
3877 SDNodeFlags Flags;
3878 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3879 Flags.setNoUnsignedWrap(true);
3880
3881 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3882 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3883 }
3884 } else {
3885 // IdxSize is the width of the arithmetic according to IR semantics.
3886 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3887 // (and fix up the result later).
3888 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3889 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3890 TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3891 // We intentionally mask away the high bits here; ElementSize may not
3892 // fit in IdxTy.
3893 APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3894 bool ElementScalable = ElementSize.isScalable();
3895
3896 // If this is a scalar constant or a splat vector of constants,
3897 // handle it quickly.
3898 const auto *C = dyn_cast<Constant>(Idx);
3899 if (C && isa<VectorType>(C->getType()))
3900 C = C->getSplatValue();
3901
3902 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3903 if (CI && CI->isZero())
3904 continue;
3905 if (CI && !ElementScalable) {
3906 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3907 LLVMContext &Context = *DAG.getContext();
3908 SDValue OffsVal;
3909 if (IsVectorGEP)
3910 OffsVal = DAG.getConstant(
3911 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3912 else
3913 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3914
3915 // In an inbounds GEP with an offset that is nonnegative even when
3916 // interpreted as signed, assume there is no unsigned overflow.
3917 SDNodeFlags Flags;
3918 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3919 Flags.setNoUnsignedWrap(true);
3920
3921 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3922
3923 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3924 continue;
3925 }
3926
3927 // N = N + Idx * ElementMul;
3928 SDValue IdxN = getValue(Idx);
3929
3930 if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3931 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3932 VectorElementCount);
3933 if (VectorElementCount.isScalable())
3934 IdxN = DAG.getSplatVector(VT, dl, IdxN);
3935 else
3936 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3937 }
3938
3939 // If the index is smaller or larger than intptr_t, truncate or extend
3940 // it.
3941 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3942
3943 if (ElementScalable) {
3944 EVT VScaleTy = N.getValueType().getScalarType();
3945 SDValue VScale = DAG.getNode(
3946 ISD::VSCALE, dl, VScaleTy,
3947 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3948 if (IsVectorGEP)
3949 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3950 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3951 } else {
3952 // If this is a multiply by a power of two, turn it into a shl
3953 // immediately. This is a very common case.
3954 if (ElementMul != 1) {
3955 if (ElementMul.isPowerOf2()) {
3956 unsigned Amt = ElementMul.logBase2();
3957 IdxN = DAG.getNode(ISD::SHL, dl,
3958 N.getValueType(), IdxN,
3959 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3960 } else {
3961 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3962 IdxN.getValueType());
3963 IdxN = DAG.getNode(ISD::MUL, dl,
3964 N.getValueType(), IdxN, Scale);
3965 }
3966 }
3967 }
3968
3969 N = DAG.getNode(ISD::ADD, dl,
3970 N.getValueType(), N, IdxN);
3971 }
3972 }
3973
3974 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3975 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3976 if (IsVectorGEP) {
3977 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
3978 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
3979 }
3980
3981 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3982 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3983
3984 setValue(&I, N);
3985}
3986
3987void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3988 // If this is a fixed sized alloca in the entry block of the function,
3989 // allocate it statically on the stack.
3990 if (FuncInfo.StaticAllocaMap.count(&I))
3991 return; // getValue will auto-populate this.
3992
3993 SDLoc dl = getCurSDLoc();
3994 Type *Ty = I.getAllocatedType();
3995 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3996 auto &DL = DAG.getDataLayout();
3997 uint64_t TySize = DL.getTypeAllocSize(Ty);
3998 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
3999
4000 SDValue AllocSize = getValue(I.getArraySize());
4001
4002 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
4003 if (AllocSize.getValueType() != IntPtr)
4004 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4005
4006 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
4007 AllocSize,
4008 DAG.getConstant(TySize, dl, IntPtr));
4009
4010 // Handle alignment. If the requested alignment is less than or equal to
4011 // the stack alignment, ignore it. If the size is greater than or equal to
4012 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4013 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4014 if (*Alignment <= StackAlign)
4015 Alignment = None;
4016
4017 const uint64_t StackAlignMask = StackAlign.value() - 1U;
4018 // Round the size of the allocation up to the stack alignment size
4019 // by add SA-1 to the size. This doesn't overflow because we're computing
4020 // an address inside an alloca.
4021 SDNodeFlags Flags;
4022 Flags.setNoUnsignedWrap(true);
4023 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4024 DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4025
4026 // Mask out the low bits for alignment purposes.
4027 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4028 DAG.getConstant(~StackAlignMask, dl, IntPtr));
4029
4030 SDValue Ops[] = {
4031 getRoot(), AllocSize,
4032 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4033 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4034 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4035 setValue(&I, DSA);
4036 DAG.setRoot(DSA.getValue(1));
4037
4038 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects())(static_cast<void> (0));
4039}
4040
4041void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4042 if (I.isAtomic())
4043 return visitAtomicLoad(I);
4044
4045 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4046 const Value *SV = I.getOperand(0);
4047 if (TLI.supportSwiftError()) {
4048 // Swifterror values can come from either a function parameter with
4049 // swifterror attribute or an alloca with swifterror attribute.
4050 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4051 if (Arg->hasSwiftErrorAttr())
4052 return visitLoadFromSwiftError(I);
4053 }
4054
4055 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4056 if (Alloca->isSwiftError())
4057 return visitLoadFromSwiftError(I);
4058 }
4059 }
4060
4061 SDValue Ptr = getValue(SV);
4062
4063 Type *Ty = I.getType();
4064 Align Alignment = I.getAlign();
4065
4066 AAMDNodes AAInfo;
4067 I.getAAMetadata(AAInfo);
4068 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4069
4070 SmallVector<EVT, 4> ValueVTs, MemVTs;
4071 SmallVector<uint64_t, 4> Offsets;
4072 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4073 unsigned NumValues = ValueVTs.size();
4074 if (NumValues == 0)
4075 return;
4076
4077 bool isVolatile = I.isVolatile();
4078
4079 SDValue Root;
4080 bool ConstantMemory = false;
4081 if (isVolatile)
4082 // Serialize volatile loads with other side effects.
4083 Root = getRoot();
4084 else if (NumValues > MaxParallelChains)
4085 Root = getMemoryRoot();
4086 else if (AA &&
4087 AA->pointsToConstantMemory(MemoryLocation(
4088 SV,
4089 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4090 AAInfo))) {
4091 // Do not serialize (non-volatile) loads of constant memory with anything.
4092 Root = DAG.getEntryNode();
4093 ConstantMemory = true;
4094 } else {
4095 // Do not serialize non-volatile loads against each other.
4096 Root = DAG.getRoot();
4097 }
4098
4099 SDLoc dl = getCurSDLoc();
4100
4101 if (isVolatile)
4102 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4103
4104 // An aggregate load cannot wrap around the address space, so offsets to its
4105 // parts don't wrap either.
4106 SDNodeFlags Flags;
4107 Flags.setNoUnsignedWrap(true);
4108
4109 SmallVector<SDValue, 4> Values(NumValues);
4110 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4111 EVT PtrVT = Ptr.getValueType();
4112
4113 MachineMemOperand::Flags MMOFlags
4114 = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4115
4116 unsigned ChainI = 0;
4117 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4118 // Serializing loads here may result in excessive register pressure, and
4119 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4120 // could recover a bit by hoisting nodes upward in the chain by recognizing
4121 // they are side-effect free or do not alias. The optimizer should really
4122 // avoid this case by converting large object/array copies to llvm.memcpy
4123 // (MaxParallelChains should always remain as failsafe).
4124 if (ChainI == MaxParallelChains) {
4125 assert(PendingLoads.empty() && "PendingLoads must be serialized first")(static_cast<void> (0));
4126 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4127 makeArrayRef(Chains.data(), ChainI));
4128 Root = Chain;
4129 ChainI = 0;
4130 }
4131 SDValue A = DAG.getNode(ISD::ADD, dl,
4132 PtrVT, Ptr,
4133 DAG.getConstant(Offsets[i], dl, PtrVT),
4134 Flags);
4135
4136 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4137 MachinePointerInfo(SV, Offsets[i]), Alignment,
4138 MMOFlags, AAInfo, Ranges);
4139 Chains[ChainI] = L.getValue(1);
4140
4141 if (MemVTs[i] != ValueVTs[i])
4142 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4143
4144 Values[i] = L;
4145 }
4146
4147 if (!ConstantMemory) {
4148 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4149 makeArrayRef(Chains.data(), ChainI));
4150 if (isVolatile)
4151 DAG.setRoot(Chain);
4152 else
4153 PendingLoads.push_back(Chain);
4154 }
4155
4156 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4157 DAG.getVTList(ValueVTs), Values));
4158}
4159
4160void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4161 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&(static_cast<void> (0))
4162 "call visitStoreToSwiftError when backend supports swifterror")(static_cast<void> (0));
4163
4164 SmallVector<EVT, 4> ValueVTs;
4165 SmallVector<uint64_t, 4> Offsets;
4166 const Value *SrcV = I.getOperand(0);
4167 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4168 SrcV->getType(), ValueVTs, &Offsets);
4169 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&(static_cast<void> (0))
4170 "expect a single EVT for swifterror")(static_cast<void> (0));
4171
4172 SDValue Src = getValue(SrcV);
4173 // Create a virtual register, then update the virtual register.
4174 Register VReg =
4175 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4176 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4177 // Chain can be getRoot or getControlRoot.
4178 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4179 SDValue(Src.getNode(), Src.getResNo()));
4180 DAG.setRoot(CopyNode);
4181}
4182
4183void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4184 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&(static_cast<void> (0))
4185 "call visitLoadFromSwiftError when backend supports swifterror")(static_cast<void> (0));
4186
4187 assert(!I.isVolatile() &&(static_cast<void> (0))
4188 !I.hasMetadata(LLVMContext::MD_nontemporal) &&(static_cast<void> (0))
4189 !I.hasMetadata(LLVMContext::MD_invariant_load) &&(static_cast<void> (0))
4190 "Support volatile, non temporal, invariant for load_from_swift_error")(static_cast<void> (0));
4191
4192 const Value *SV = I.getOperand(0);
4193 Type *Ty = I.getType();
4194 AAMDNodes AAInfo;
4195 I.getAAMetadata(AAInfo);
4196 assert((static_cast<void> (0))
4197 (!AA ||(static_cast<void> (0))
4198 !AA->pointsToConstantMemory(MemoryLocation((static_cast<void> (0))
4199 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),(static_cast<void> (0))
4200 AAInfo))) &&(static_cast<void> (0))
4201 "load_from_swift_error should not be constant memory")(static_cast<void> (0));
4202
4203 SmallVector<EVT, 4> ValueVTs;
4204 SmallVector<uint64_t, 4> Offsets;
4205 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4206 ValueVTs, &Offsets);
4207 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&(static_cast<void> (0))
4208 "expect a single EVT for swifterror")(static_cast<void> (0));
4209
4210 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4211 SDValue L = DAG.getCopyFromReg(
4212 getRoot(), getCurSDLoc(),
4213 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4214
4215 setValue(&I, L);
4216}
4217
4218void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4219 if (I.isAtomic())
4220 return visitAtomicStore(I);
4221
4222 const Value *SrcV = I.getOperand(0);
4223 const Value *PtrV = I.getOperand(1);
4224
4225 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4226 if (TLI.supportSwiftError()) {
4227 // Swifterror values can come from either a function parameter with
4228 // swifterror attribute or an alloca with swifterror attribute.
4229 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4230 if (Arg->hasSwiftErrorAttr())
4231 return visitStoreToSwiftError(I);
4232 }
4233
4234 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4235 if (Alloca->isSwiftError())
4236 return visitStoreToSwiftError(I);
4237 }
4238 }
4239
4240 SmallVector<EVT, 4> ValueVTs, MemVTs;
4241 SmallVector<uint64_t, 4> Offsets;
4242 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4243 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4244 unsigned NumValues = ValueVTs.size();
4245 if (NumValues == 0)
4246 return;
4247
4248 // Get the lowered operands. Note that we do this after
4249 // checking if NumResults is zero, because with zero results
4250 // the operands won't have values in the map.
4251 SDValue Src = getValue(SrcV);
4252 SDValue Ptr = getValue(PtrV);
4253
4254 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4255 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4256 SDLoc dl = getCurSDLoc();
4257 Align Alignment = I.getAlign();
4258 AAMDNodes AAInfo;
4259 I.getAAMetadata(AAInfo);
4260
4261 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4262
4263 // An aggregate load cannot wrap around the address space, so offsets to its
4264 // parts don't wrap either.
4265 SDNodeFlags Flags;
4266 Flags.setNoUnsignedWrap(true);
4267
4268 unsigned ChainI = 0;
4269 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4270 // See visitLoad comments.
4271 if (ChainI == MaxParallelChains) {
4272 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4273 makeArrayRef(Chains.data(), ChainI));
4274 Root = Chain;
4275 ChainI = 0;
4276 }
4277 SDValue Add =
4278 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(Offsets[i]), dl, Flags);
4279 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4280 if (MemVTs[i] != ValueVTs[i])
4281 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4282 SDValue St =
4283 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4284 Alignment, MMOFlags, AAInfo);
4285 Chains[ChainI] = St;
4286 }
4287
4288 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4289 makeArrayRef(Chains.data(), ChainI));
4290 DAG.setRoot(StoreNode);
4291}
4292
4293void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4294 bool IsCompressing) {
4295 SDLoc sdl = getCurSDLoc();
4296
4297 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4298 MaybeAlign &Alignment) {
4299 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4300 Src0 = I.getArgOperand(0);
4301 Ptr = I.getArgOperand(1);
4302 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4303 Mask = I.getArgOperand(3);
4304 };
4305 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4306 MaybeAlign &Alignment) {
4307 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4308 Src0 = I.getArgOperand(0);
4309 Ptr = I.getArgOperand(1);
4310 Mask = I.getArgOperand(2);
4311 Alignment = None;
4312 };
4313
4314 Value *PtrOperand, *MaskOperand, *Src0Operand;
4315 MaybeAlign Alignment;
4316 if (IsCompressing)
4317 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4318 else
4319 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4320
4321 SDValue Ptr = getValue(PtrOperand);
4322 SDValue Src0 = getValue(Src0Operand);
4323 SDValue Mask = getValue(MaskOperand);
4324 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4325
4326 EVT VT = Src0.getValueType();
4327 if (!Alignment)
4328 Alignment = DAG.getEVTAlign(VT);
4329
4330 AAMDNodes AAInfo;
4331 I.getAAMetadata(AAInfo);
4332
4333 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4334 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4335 // TODO: Make MachineMemOperands aware of scalable
4336 // vectors.
4337 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
4338 SDValue StoreNode =
4339 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4340 ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4341 DAG.setRoot(StoreNode);
4342 setValue(&I, StoreNode);
4343}
4344
4345// Get a uniform base for the Gather/Scatter intrinsic.
4346// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4347// We try to represent it as a base pointer + vector of indices.
4348// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4349// The first operand of the GEP may be a single pointer or a vector of pointers
4350// Example:
4351// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4352// or
4353// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4354// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4355//
4356// When the first GEP operand is a single pointer - it is the uniform base we
4357// are looking for. If first operand of the GEP is a splat vector - we
4358// extract the splat value and use it as a uniform base.
4359// In all other cases the function returns 'false'.
4360static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4361 ISD::MemIndexType &IndexType, SDValue &Scale,
4362 SelectionDAGBuilder *SDB, const BasicBlock *CurBB) {
4363 SelectionDAG& DAG = SDB->DAG;
4364 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4365 const DataLayout &DL = DAG.getDataLayout();
4366
4367 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type")(static_cast<void> (0));
4368
4369 // Handle splat constant pointer.
4370 if (auto *C = dyn_cast<Constant>(Ptr)) {
4371 C = C->getSplatValue();
4372 if (!C)
4373 return false;
4374
4375 Base = SDB->getValue(C);
4376
4377 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4378 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4379 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4380 IndexType = ISD::SIGNED_SCALED;
4381 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4382 return true;
4383 }
4384
4385 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4386 if (!GEP || GEP->getParent() != CurBB)
4387 return false;
4388
4389 if (GEP->getNumOperands() != 2)
4390 return false;
4391
4392 const Value *BasePtr = GEP->getPointerOperand();
4393 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4394
4395 // Make sure the base is scalar and the index is a vector.
4396 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4397 return false;
4398
4399 Base = SDB->getValue(BasePtr);
4400 Index = SDB->getValue(IndexVal);
4401 IndexType = ISD::SIGNED_SCALED;
4402 Scale = DAG.getTargetConstant(
4403 DL.getTypeAllocSize(GEP->getResultElementType()),
4404 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4405 return true;
4406}
4407
4408void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4409 SDLoc sdl = getCurSDLoc();
4410
4411 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4412 const Value *Ptr = I.getArgOperand(1);
4413 SDValue Src0 = getValue(I.getArgOperand(0));
4414 SDValue Mask = getValue(I.getArgOperand(3));
4415 EVT VT = Src0.getValueType();
4416 Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4417 ->getMaybeAlignValue()
4418 .getValueOr(DAG.getEVTAlign(VT.getScalarType()));
4419 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4420
4421 AAMDNodes AAInfo;
4422 I.getAAMetadata(AAInfo);
4423
4424 SDValue Base;
4425 SDValue Index;
4426 ISD::MemIndexType IndexType;
4427 SDValue Scale;
4428 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4429 I.getParent());
4430
4431 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4432 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4433 MachinePointerInfo(AS), MachineMemOperand::MOStore,
4434 // TODO: Make MachineMemOperands aware of scalable
4435 // vectors.
4436 MemoryLocation::UnknownSize, Alignment, AAInfo);
4437 if (!UniformBase) {
4438 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4439 Index = getValue(Ptr);
4440 IndexType = ISD::SIGNED_UNSCALED;
4441 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4442 }
4443
4444 EVT IdxVT = Index.getValueType();
4445 EVT EltTy = IdxVT.getVectorElementType();
4446 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4447 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4448 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4449 }
4450
4451 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4452 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4453 Ops, MMO, IndexType, false);
4454 DAG.setRoot(Scatter);
4455 setValue(&I, Scatter);
4456}
4457
4458void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4459 SDLoc sdl = getCurSDLoc();
4460
4461 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4462 MaybeAlign &Alignment) {
4463 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4464 Ptr = I.getArgOperand(0);
4465 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4466 Mask = I.getArgOperand(2);
4467 Src0 = I.getArgOperand(3);
4468 };
4469 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4470 MaybeAlign &Alignment) {
4471 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4472 Ptr = I.getArgOperand(0);
4473 Alignment = None;
4474 Mask = I.getArgOperand(1);
4475 Src0 = I.getArgOperand(2);
4476 };
4477
4478 Value *PtrOperand, *MaskOperand, *Src0Operand;
4479 MaybeAlign Alignment;
4480 if (IsExpanding)
4481 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4482 else
4483 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4484
4485 SDValue Ptr = getValue(PtrOperand);
4486 SDValue Src0 = getValue(Src0Operand);
4487 SDValue Mask = getValue(MaskOperand);
4488 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4489
4490 EVT VT = Src0.getValueType();
4491 if (!Alignment)
4492 Alignment = DAG.getEVTAlign(VT);
4493
4494 AAMDNodes AAInfo;
4495 I.getAAMetadata(AAInfo);
4496 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4497
4498 // Do not serialize masked loads of constant memory with anything.
4499 MemoryLocation ML;
4500 if (VT.isScalableVector())
4501 ML = MemoryLocation::getAfter(PtrOperand);
4502 else
4503 ML = MemoryLocation(PtrOperand, LocationSize::precise(
4504 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4505 AAInfo);
4506 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4507
4508 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4509
4510 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4511 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4512 // TODO: Make MachineMemOperands aware of scalable
4513 // vectors.
4514 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
4515
4516 SDValue Load =
4517 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4518 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4519 if (AddToChain)
4520 PendingLoads.push_back(Load.getValue(1));
4521 setValue(&I, Load);
4522}
4523
4524void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4525 SDLoc sdl = getCurSDLoc();
4526
4527 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4528 const Value *Ptr = I.getArgOperand(0);
4529 SDValue Src0 = getValue(I.getArgOperand(3));
4530 SDValue Mask = getValue(I.getArgOperand(2));
4531
4532 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4533 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4534 Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4535 ->getMaybeAlignValue()
4536 .getValueOr(DAG.getEVTAlign(VT.getScalarType()));
4537
4538 AAMDNodes AAInfo;
4539 I.getAAMetadata(AAInfo);
4540 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4541
4542 SDValue Root = DAG.getRoot();
4543 SDValue Base;
4544 SDValue Index;
4545 ISD::MemIndexType IndexType;
4546 SDValue Scale;
4547 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4548 I.getParent());
4549 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4550 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4551 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4552 // TODO: Make MachineMemOperands aware of scalable
4553 // vectors.
4554 MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
4555
4556 if (!UniformBase) {
4557 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4558 Index = getValue(Ptr);
4559 IndexType = ISD::SIGNED_UNSCALED;
4560 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4561 }
4562
4563 EVT IdxVT = Index.getValueType();
4564 EVT EltTy = IdxVT.getVectorElementType();
4565 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4566 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4567 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4568 }
4569
4570 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4571 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4572 Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4573
4574 PendingLoads.push_back(Gather.getValue(1));
4575 setValue(&I, Gather);
4576}
4577
4578void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4579 SDLoc dl = getCurSDLoc();
4580 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4581 AtomicOrdering FailureOrdering = I.getFailureOrdering();
4582 SyncScope::ID SSID = I.getSyncScopeID();
4583
4584 SDValue InChain = getRoot();
4585
4586 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4587 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4588
4589 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4590 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4591
4592 MachineFunction &MF = DAG.getMachineFunction();
4593 MachineMemOperand *MMO = MF.getMachineMemOperand(
4594 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4595 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4596 FailureOrdering);
4597
4598 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4599 dl, MemVT, VTs, InChain,
4600 getValue(I.getPointerOperand()),
4601 getValue(I.getCompareOperand()),
4602 getValue(I.getNewValOperand()), MMO);
4603
4604 SDValue OutChain = L.getValue(2);
4605
4606 setValue(&I, L);
4607 DAG.setRoot(OutChain);
4608}
4609
4610void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4611 SDLoc dl = getCurSDLoc();
4612 ISD::NodeType NT;
4613 switch (I.getOperation()) {
4614 default: llvm_unreachable("Unknown atomicrmw operation")__builtin_unreachable();
4615 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4616 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4617 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4618 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4619 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4620 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4621 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4622 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4623 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4624 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4625 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4626 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4627 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4628 }
4629 AtomicOrdering Ordering = I.getOrdering();
4630 SyncScope::ID SSID = I.getSyncScopeID();
4631
4632 SDValue InChain = getRoot();
4633
4634 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4635 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4636 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4637
4638 MachineFunction &MF = DAG.getMachineFunction();
4639 MachineMemOperand *MMO = MF.getMachineMemOperand(
4640 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4641 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4642
4643 SDValue L =
4644 DAG.getAtomic(NT, dl, MemVT, InChain,
4645 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4646 MMO);
4647
4648 SDValue OutChain = L.getValue(1);
4649
4650 setValue(&I, L);
4651 DAG.setRoot(OutChain);
4652}
4653
4654void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4655 SDLoc dl = getCurSDLoc();
4656 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4657 SDValue Ops[3];
4658 Ops[0] = getRoot();
4659 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4660 TLI.getFenceOperandTy(DAG.getDataLayout()));
4661 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4662 TLI.getFenceOperandTy(DAG.getDataLayout()));
4663 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4664}
4665
4666void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4667 SDLoc dl = getCurSDLoc();
4668 AtomicOrdering Order = I.getOrdering();
4669 SyncScope::ID SSID = I.getSyncScopeID();
4670
4671 SDValue InChain = getRoot();
4672
4673 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4674 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4675 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4676
4677 if (!TLI.supportsUnalignedAtomics() &&
4678 I.getAlignment() < MemVT.getSizeInBits() / 8)
4679 report_fatal_error("Cannot generate unaligned atomic load");
4680
4681 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4682
4683 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4684 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4685 I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4686
4687 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4688
4689 SDValue Ptr = getValue(I.getPointerOperand());
4690
4691 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4692 // TODO: Once this is better exercised by tests, it should be merged with
4693 // the normal path for loads to prevent future divergence.
4694 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4695 if (MemVT != VT)
4696 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4697
4698 setValue(&I, L);
4699 SDValue OutChain = L.getValue(1);
4700 if (!I.isUnordered())
4701 DAG.setRoot(OutChain);
4702 else
4703 PendingLoads.push_back(OutChain);
4704 return;
4705 }
4706
4707 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4708 Ptr, MMO);
4709
4710 SDValue OutChain = L.getValue(1);
4711 if (MemVT != VT)
4712 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4713
4714 setValue(&I, L);
4715 DAG.setRoot(OutChain);
4716}
4717
4718void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4719 SDLoc dl = getCurSDLoc();
4720
4721 AtomicOrdering Ordering = I.getOrdering();
4722 SyncScope::ID SSID = I.getSyncScopeID();
4723
4724 SDValue InChain = getRoot();
4725
4726 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4727 EVT MemVT =
4728 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4729
4730 if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4731 report_fatal_error("Cannot generate unaligned atomic store");
4732
4733 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4734
4735 MachineFunction &MF = DAG.getMachineFunction();
4736 MachineMemOperand *MMO = MF.getMachineMemOperand(
4737 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4738 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4739
4740 SDValue Val = getValue(I.getValueOperand());
4741 if (Val.getValueType() != MemVT)
4742 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4743 SDValue Ptr = getValue(I.getPointerOperand());
4744
4745 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4746 // TODO: Once this is better exercised by tests, it should be merged with
4747 // the normal path for stores to prevent future divergence.
4748 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4749 DAG.setRoot(S);
4750 return;
4751 }
4752 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4753 Ptr, Val, MMO);
4754
4755
4756 DAG.setRoot(OutChain);
4757}
4758
4759/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4760/// node.
4761void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4762 unsigned Intrinsic) {
4763 // Ignore the callsite's attributes. A specific call site may be marked with
4764 // readnone, but the lowering code will expect the chain based on the
4765 // definition.
4766 const Function *F = I.getCalledFunction();
4767 bool HasChain = !F->doesNotAccessMemory();
4768 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4769
4770 // Build the operand list.
4771 SmallVector<SDValue, 8> Ops;
4772 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4773 if (OnlyLoad) {
4774 // We don't need to serialize loads against other loads.
4775 Ops.push_back(DAG.getRoot());
4776 } else {
4777 Ops.push_back(getRoot());
4778 }
4779 }
4780
4781 // Info is set by getTgtMemInstrinsic
4782 TargetLowering::IntrinsicInfo Info;
4783 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4784 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4785 DAG.getMachineFunction(),
4786 Intrinsic);
4787
4788 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4789 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4790 Info.opc == ISD::INTRINSIC_W_CHAIN)
4791 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4792 TLI.getPointerTy(DAG.getDataLayout())));
4793
4794 // Add all operands of the call to the operand list.
4795 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4796 const Value *Arg = I.getArgOperand(i);
4797 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4798 Ops.push_back(getValue(Arg));
4799 continue;
4800 }
4801
4802 // Use TargetConstant instead of a regular constant for immarg.
4803 EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4804 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4805 assert(CI->getBitWidth() <= 64 &&(static_cast<void> (0))
4806 "large intrinsic immediates not handled")(static_cast<void> (0));
4807 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4808 } else {
4809 Ops.push_back(
4810 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4811 }
4812 }
4813
4814 SmallVector<EVT, 4> ValueVTs;
4815 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4816
4817 if (HasChain)
4818 ValueVTs.push_back(MVT::Other);
4819
4820 SDVTList VTs = DAG.getVTList(ValueVTs);
4821
4822 // Propagate fast-math-flags from IR to node(s).
4823 SDNodeFlags Flags;
4824 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
4825 Flags.copyFMF(*FPMO);
4826 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
4827
4828 // Create the node.
4829 SDValue Result;
4830 if (IsTgtIntrinsic) {
4831 // This is target intrinsic that touches memory
4832 AAMDNodes AAInfo;
4833 I.getAAMetadata(AAInfo);
4834 Result =
4835 DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4836 MachinePointerInfo(Info.ptrVal, Info.offset),
4837 Info.align, Info.flags, Info.size, AAInfo);
4838 } else if (!HasChain) {
4839 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4840 } else if (!I.getType()->isVoidTy()) {
4841 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4842 } else {
4843 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4844 }
4845
4846 if (HasChain) {
4847 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4848 if (OnlyLoad)
4849 PendingLoads.push_back(Chain);
4850 else
4851 DAG.setRoot(Chain);
4852 }
4853
4854 if (!I.getType()->isVoidTy()) {
4855 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4856 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4857 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4858 } else
4859 Result = lowerRangeToAssertZExt(DAG, I, Result);
4860
4861 MaybeAlign Alignment = I.getRetAlign();
4862 if (!Alignment)
4863 Alignment = F->getAttributes().getRetAlignment();
4864 // Insert `assertalign` node if there's an alignment.
4865 if (InsertAssertAlign && Alignment) {
4866 Result =
4867 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
4868 }
4869
4870 setValue(&I, Result);
4871 }
4872}
4873
4874/// GetSignificand - Get the significand and build it into a floating-point
4875/// number with exponent of 1:
4876///
4877/// Op = (Op & 0x007fffff) | 0x3f800000;
4878///
4879/// where Op is the hexadecimal representation of floating point value.
4880static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4881 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4882 DAG.getConstant(0x007fffff, dl, MVT::i32));
4883 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4884 DAG.getConstant(0x3f800000, dl, MVT::i32));
4885 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4886}
4887
4888/// GetExponent - Get the exponent:
4889///
4890/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4891///
4892/// where Op is the hexadecimal representation of floating point value.
4893static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4894 const TargetLowering &TLI, const SDLoc &dl) {
4895 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4896 DAG.getConstant(0x7f800000, dl, MVT::i32));
4897 SDValue t1 = DAG.getNode(
4898 ISD::SRL, dl, MVT::i32, t0,
4899 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4900 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4901 DAG.getConstant(127, dl, MVT::i32));
4902 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4903}
4904
4905/// getF32Constant - Get 32-bit floating point constant.
4906static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4907 const SDLoc &dl) {
4908 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4909 MVT::f32);
4910}
4911
4912static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4913 SelectionDAG &DAG) {
4914 // TODO: What fast-math-flags should be set on the floating-point nodes?
4915
4916 // IntegerPartOfX = ((int32_t)(t0);
4917 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4918
4919 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4920 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4921 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4922
4923 // IntegerPartOfX <<= 23;
4924 IntegerPartOfX = DAG.getNode(
4925 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4926 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4927 DAG.getDataLayout())));
4928
4929 SDValue TwoToFractionalPartOfX;
4930 if (LimitFloatPrecision <= 6) {
4931 // For floating-point precision of 6:
4932 //
4933 // TwoToFractionalPartOfX =
4934 // 0.997535578f +
4935 // (0.735607626f + 0.252464424f * x) * x;
4936 //
4937 // error 0.0144103317, which is 6 bits
4938 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4939 getF32Constant(DAG, 0x3e814304, dl));
4940 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4941 getF32Constant(DAG, 0x3f3c50c8, dl));
4942 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4943 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4944 getF32Constant(DAG, 0x3f7f5e7e, dl));
4945 } else if (LimitFloatPrecision <= 12) {
4946 // For floating-point precision of 12:
4947 //
4948 // TwoToFractionalPartOfX =
4949 // 0.999892986f +
4950 // (0.696457318f +
4951 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4952 //
4953 // error 0.000107046256, which is 13 to 14 bits
4954 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4955 getF32Constant(DAG, 0x3da235e3, dl));
4956 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4957 getF32Constant(DAG, 0x3e65b8f3, dl));
4958 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4959 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4960 getF32Constant(DAG, 0x3f324b07, dl));
4961 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4962 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4963 getF32Constant(DAG, 0x3f7ff8fd, dl));
4964 } else { // LimitFloatPrecision <= 18
4965 // For floating-point precision of 18:
4966 //
4967 // TwoToFractionalPartOfX =
4968 // 0.999999982f +
4969 // (0.693148872f +
4970 // (0.240227044f +
4971 // (0.554906021e-1f +
4972 // (0.961591928e-2f +
4973 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4974 // error 2.47208000*10^(-7), which is better than 18 bits
4975 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4976 getF32Constant(DAG, 0x3924b03e, dl));
4977 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4978 getF32Constant(DAG, 0x3ab24b87, dl));
4979 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4980 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4981 getF32Constant(DAG, 0x3c1d8c17, dl));
4982 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4983 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4984 getF32Constant(DAG, 0x3d634a1d, dl));
4985 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4986 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4987 getF32Constant(DAG, 0x3e75fe14, dl));
4988 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4989 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4990 getF32Constant(DAG, 0x3f317234, dl));
4991 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4992 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4993 getF32Constant(DAG, 0x3f800000, dl));
4994 }
4995
4996 // Add the exponent into the result in integer domain.
4997 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4998 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4999 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5000}
5001
5002/// expandExp - Lower an exp intrinsic. Handles the special sequences for
5003/// limited-precision mode.
5004static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5005 const TargetLowering &TLI, SDNodeFlags Flags) {
5006 if (Op.getValueType() == MVT::f32 &&
5007 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5008
5009 // Put the exponent in the right bit position for later addition to the
5010 // final result:
5011 //
5012 // t0 = Op * log2(e)
5013
5014 // TODO: What fast-math-flags should be set here?
5015 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5016 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5017 return getLimitedPrecisionExp2(t0, dl, DAG);
5018 }
5019
5020 // No special expansion.
5021 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5022}
5023
5024/// expandLog - Lower a log intrinsic. Handles the special sequences for
5025/// limited-precision mode.
5026static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5027 const TargetLowering &TLI, SDNodeFlags Flags) {
5028 // TODO: What fast-math-flags should be set on the floating-point nodes?
5029
5030 if (Op.getValueType() == MVT::f32 &&
5031 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5032 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5033
5034 // Scale the exponent by log(2).
5035 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5036 SDValue LogOfExponent =
5037 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5038 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5039
5040 // Get the significand and build it into a floating-point number with
5041 // exponent of 1.
5042 SDValue X = GetSignificand(DAG, Op1, dl);
5043
5044 SDValue LogOfMantissa;
5045 if (LimitFloatPrecision <= 6) {
5046 // For floating-point precision of 6:
5047 //
5048 // LogofMantissa =
5049 // -1.1609546f +
5050 // (1.4034025f - 0.23903021f * x) * x;
5051 //
5052 // error 0.0034276066, which is better than 8 bits
5053 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5054 getF32Constant(DAG, 0xbe74c456, dl));
5055 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5056 getF32Constant(DAG, 0x3fb3a2b1, dl));
5057 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5058 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5059 getF32Constant(DAG, 0x3f949a29, dl));
5060 } else if (LimitFloatPrecision <= 12) {
5061 // For floating-point precision of 12:
5062 //
5063 // LogOfMantissa =
5064 // -1.7417939f +
5065 // (2.8212026f +
5066 // (-1.4699568f +
5067 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5068 //
5069 // error 0.000061011436, which is 14 bits
5070 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5071 getF32Constant(DAG, 0xbd67b6d6, dl));
5072 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5073 getF32Constant(DAG, 0x3ee4f4b8, dl));
5074 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5075 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5076 getF32Constant(DAG, 0x3fbc278b, dl));
5077 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5078 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5079 getF32Constant(DAG, 0x40348e95, dl));
5080 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5081 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5082 getF32Constant(DAG, 0x3fdef31a, dl));
5083 } else { // LimitFloatPrecision <= 18
5084 // For floating-point precision of 18:
5085 //
5086 // LogOfMantissa =
5087 // -2.1072184f +
5088 // (4.2372794f +
5089 // (-3.7029485f +
5090 // (2.2781945f +
5091 // (-0.87823314f +
5092 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5093 //
5094 // error 0.0000023660568, which is better than 18 bits
5095 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5096 getF32Constant(DAG, 0xbc91e5ac, dl));
5097 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5098 getF32Constant(DAG, 0x3e4350aa, dl));
5099 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5100 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5101 getF32Constant(DAG, 0x3f60d3e3, dl));
5102 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5103 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5104 getF32Constant(DAG, 0x4011cdf0, dl));
5105 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5106 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5107 getF32Constant(DAG, 0x406cfd1c, dl));
5108 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5109 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5110 getF32Constant(DAG, 0x408797cb, dl));
5111 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5112 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5113 getF32Constant(DAG, 0x4006dcab, dl));
5114 }
5115
5116 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5117 }
5118
5119 // No special expansion.
5120 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5121}
5122
5123/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5124/// limited-precision mode.
5125static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5126 const TargetLowering &TLI, SDNodeFlags Flags) {
5127 // TODO: What fast-math-flags should be set on the floating-point nodes?
5128
5129 if (Op.getValueType() == MVT::f32 &&
5130 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5131 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5132
5133 // Get the exponent.
5134 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5135
5136 // Get the significand and build it into a floating-point number with
5137 // exponent of 1.
5138 SDValue X = GetSignificand(DAG, Op1, dl);
5139
5140 // Different possible minimax approximations of significand in
5141 // floating-point for various degrees of accuracy over [1,2].
5142 SDValue Log2ofMantissa;
5143 if (LimitFloatPrecision <= 6) {
5144 // For floating-point precision of 6:
5145 //
5146 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5147 //
5148 // error 0.0049451742, which is more than 7 bits
5149 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5150 getF32Constant(DAG, 0xbeb08fe0, dl));
5151 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5152 getF32Constant(DAG, 0x40019463, dl));
5153 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5154 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5155 getF32Constant(DAG, 0x3fd6633d, dl));
5156 } else if (LimitFloatPrecision <= 12) {
5157 // For floating-point precision of 12:
5158 //
5159 // Log2ofMantissa =
5160 // -2.51285454f +
5161 // (4.07009056f +
5162 // (-2.12067489f +
5163 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5164 //
5165 // error 0.0000876136000, which is better than 13 bits
5166 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5167 getF32Constant(DAG, 0xbda7262e, dl));
5168 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5169 getF32Constant(DAG, 0x3f25280b, dl));
5170 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5171 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5172 getF32Constant(DAG, 0x4007b923, dl));
5173 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5174 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5175 getF32Constant(DAG, 0x40823e2f, dl));
5176 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5177 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5178 getF32Constant(DAG, 0x4020d29c, dl));
5179 } else { // LimitFloatPrecision <= 18
5180 // For floating-point precision of 18:
5181 //
5182 // Log2ofMantissa =
5183 // -3.0400495f +
5184 // (6.1129976f +
5185 // (-5.3420409f +
5186 // (3.2865683f +
5187 // (-1.2669343f +
5188 // (0.27515199f -
5189 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5190 //
5191 // error 0.0000018516, which is better than 18 bits
5192 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5193 getF32Constant(DAG, 0xbcd2769e, dl));
5194 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5195 getF32Constant(DAG, 0x3e8ce0b9, dl));
5196 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5197 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5198 getF32Constant(DAG, 0x3fa22ae7, dl));
5199 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5200 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5201 getF32Constant(DAG, 0x40525723, dl));
5202 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5203 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5204 getF32Constant(DAG, 0x40aaf200, dl));
5205 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5206 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5207 getF32Constant(DAG, 0x40c39dad, dl));
5208 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5209 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5210 getF32Constant(DAG, 0x4042902c, dl));
5211 }
5212
5213 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5214 }
5215
5216 // No special expansion.
5217 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5218}
5219
5220/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5221/// limited-precision mode.
5222static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5223 const TargetLowering &TLI, SDNodeFlags Flags) {
5224 // TODO: What fast-math-flags should be set on the floating-point nodes?
5225
5226 if (Op.getValueType() == MVT::f32 &&
5227 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5228 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5229
5230 // Scale the exponent by log10(2) [0.30102999f].
5231 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5232 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5233 getF32Constant(DAG, 0x3e9a209a, dl));
5234
5235 // Get the significand and build it into a floating-point number with
5236 // exponent of 1.
5237 SDValue X = GetSignificand(DAG, Op1, dl);
5238
5239 SDValue Log10ofMantissa;
5240 if (LimitFloatPrecision <= 6) {
5241 // For floating-point precision of 6:
5242 //
5243 // Log10ofMantissa =
5244 // -0.50419619f +
5245 // (0.60948995f - 0.10380950f * x) * x;
5246 //
5247 // error 0.0014886165, which is 6 bits
5248 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5249 getF32Constant(DAG, 0xbdd49a13, dl));
5250 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5251 getF32Constant(DAG, 0x3f1c0789, dl));
5252 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5253 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5254 getF32Constant(DAG, 0x3f011300, dl));
5255 } else if (LimitFloatPrecision <= 12) {
5256 // For floating-point precision of 12:
5257 //
5258 // Log10ofMantissa =
5259 // -0.64831180f +
5260 // (0.91751397f +
5261 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5262 //
5263 // error 0.00019228036, which is better than 12 bits
5264 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5265 getF32Constant(DAG, 0x3d431f31, dl));
5266 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5267 getF32Constant(DAG, 0x3ea21fb2, dl));
5268 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5269 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5270 getF32Constant(DAG, 0x3f6ae232, dl));
5271 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5272 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5273 getF32Constant(DAG, 0x3f25f7c3, dl));
5274 } else { // LimitFloatPrecision <= 18
5275 // For floating-point precision of 18:
5276 //
5277 // Log10ofMantissa =
5278 // -0.84299375f +
5279 // (1.5327582f +
5280 // (-1.0688956f +
5281 // (0.49102474f +
5282 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5283 //
5284 // error 0.0000037995730, which is better than 18 bits
5285 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5286 getF32Constant(DAG, 0x3c5d51ce, dl));
5287 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5288 getF32Constant(DAG, 0x3e00685a, dl));
5289 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5290 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5291 getF32Constant(DAG, 0x3efb6798, dl));
5292 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5293 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5294 getF32Constant(DAG, 0x3f88d192, dl));
5295 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5296 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5297 getF32Constant(DAG, 0x3fc4316c, dl));
5298 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5299 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5300 getF32Constant(DAG, 0x3f57ce70, dl));
5301 }
5302
5303 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5304 }
5305
5306 // No special expansion.
5307 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5308}
5309
5310/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5311/// limited-precision mode.
5312static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5313 const TargetLowering &TLI, SDNodeFlags Flags) {
5314 if (Op.getValueType() == MVT::f32 &&
5315 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5316 return getLimitedPrecisionExp2(Op, dl, DAG);
5317
5318 // No special expansion.
5319 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5320}
5321
5322/// visitPow - Lower a pow intrinsic. Handles the special sequences for
5323/// limited-precision mode with x == 10.0f.
5324static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5325 SelectionDAG &DAG, const TargetLowering &TLI,
5326 SDNodeFlags Flags) {
5327 bool IsExp10 = false;
5328 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5329 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5330 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5331 APFloat Ten(10.0f);
5332 IsExp10 = LHSC->isExactlyValue(Ten);
5333 }
5334 }
5335
5336 // TODO: What fast-math-flags should be set on the FMUL node?
5337 if (IsExp10) {
5338 // Put the exponent in the right bit position for later addition to the
5339 // final result:
5340 //
5341 // #define LOG2OF10 3.3219281f
5342 // t0 = Op * LOG2OF10;
5343 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5344 getF32Constant(DAG, 0x40549a78, dl));
5345 return getLimitedPrecisionExp2(t0, dl, DAG);
5346 }
5347
5348 // No special expansion.
5349 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5350}
5351
5352/// ExpandPowI - Expand a llvm.powi intrinsic.
5353static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5354 SelectionDAG &DAG) {
5355 // If RHS is a constant, we can expand this out to a multiplication tree,
5356 // otherwise we end up lowering to a call to __powidf2 (for example). When
5357 // optimizing for size, we only want to do this if the expansion would produce
5358 // a small number of multiplies, otherwise we do the full expansion.
5359 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5360 // Get the exponent as a positive value.
5361 unsigned Val = RHSC->getSExtValue();
5362 if ((int)Val < 0) Val = -Val;
5363
5364 // powi(x, 0) -> 1.0
5365 if (Val == 0)
5366 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5367
5368 bool OptForSize = DAG.shouldOptForSize();
5369 if (!OptForSize ||
5370 // If optimizing for size, don't insert too many multiplies.
5371 // This inserts up to 5 multiplies.
5372 countPopulation(Val) + Log2_32(Val) < 7) {
5373 // We use the simple binary decomposition method to generate the multiply
5374 // sequence. There are more optimal ways to do this (for example,
5375 // powi(x,15) generates one more multiply than it should), but this has
5376 // the benefit of being both really simple and much better than a libcall.
5377 SDValue Res; // Logically starts equal to 1.0
5378 SDValue CurSquare = LHS;
5379 // TODO: Intrinsics should have fast-math-flags that propagate to these
5380 // nodes.
5381 while (Val) {
5382 if (Val & 1) {
5383 if (Res.getNode())
5384 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5385 else
5386 Res = CurSquare; // 1.0*CurSquare.
5387 }
5388
5389 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5390 CurSquare, CurSquare);
5391 Val >>= 1;
5392 }
5393
5394 // If the original was negative, invert the result, producing 1/(x*x*x).
5395 if (RHSC->getSExtValue() < 0)
5396 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5397 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5398 return Res;
5399 }
5400 }
5401
5402 // Otherwise, expand to a libcall.
5403 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5404}
5405
5406static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5407 SDValue LHS, SDValue RHS, SDValue Scale,
5408 SelectionDAG &DAG, const TargetLowering &TLI) {
5409 EVT VT = LHS.getValueType();
5410 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5411 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5412 LLVMContext &Ctx = *DAG.getContext();
5413
5414 // If the type is legal but the operation isn't, this node might survive all
5415 // the way to operation legalization. If we end up there and we do not have
5416 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5417 // node.
5418
5419 // Coax the legalizer into expanding the node during type legalization instead
5420 // by bumping the size by one bit. This will force it to Promote, enabling the
5421 // early expansion and avoiding the need to expand later.
5422
5423 // We don't have to do this if Scale is 0; that can always be expanded, unless
5424 // it's a saturating signed operation. Those can experience true integer
5425 // division overflow, a case which we must avoid.
5426
5427 // FIXME: We wouldn't have to do this (or any of the early
5428 // expansion/promotion) if it was possible to expand a libcall of an
5429 // illegal type during operation legalization. But it's not, so things
5430 // get a bit hacky.
5431 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5432 if ((ScaleInt > 0 || (Saturating && Signed)) &&
5433 (TLI.isTypeLegal(VT) ||
5434 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5435 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5436 Opcode, VT, ScaleInt);
5437 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5438 EVT PromVT;
5439 if (VT.isScalarInteger())
5440 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5441 else if (VT.isVector()) {
5442 PromVT = VT.getVectorElementType();
5443 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5444 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5445 } else
5446 llvm_unreachable("Wrong VT for DIVFIX?")__builtin_unreachable();
5447 if (Signed) {
5448 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5449 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5450 } else {
5451 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5452 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5453 }
5454 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5455 // For saturating operations, we need to shift up the LHS to get the
5456 // proper saturation width, and then shift down again afterwards.
5457 if (Saturating)
5458 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5459 DAG.getConstant(1, DL, ShiftTy));
5460 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5461 if (Saturating)
5462 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5463 DAG.getConstant(1, DL, ShiftTy));
5464 return DAG.getZExtOrTrunc(Res, DL, VT);
5465 }
5466 }
5467
5468 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5469}
5470
5471// getUnderlyingArgRegs - Find underlying registers used for a truncated,
5472// bitcasted, or split argument. Returns a list of <Register, size in bits>
5473static void
5474getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5475 const SDValue &N) {
5476 switch (N.getOpcode()) {
5477 case ISD::CopyFromReg: {
5478 SDValue Op = N.getOperand(1);
5479 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5480 Op.getValueType().getSizeInBits());
5481 return;
5482 }
5483 case ISD::BITCAST:
5484 case ISD::AssertZext:
5485 case ISD::AssertSext:
5486 case ISD::TRUNCATE:
5487 getUnderlyingArgRegs(Regs, N.getOperand(0));
5488 return;
5489 case ISD::BUILD_PAIR:
5490 case ISD::BUILD_VECTOR:
5491 case ISD::CONCAT_VECTORS:
5492 for (SDValue Op : N->op_values())
5493 getUnderlyingArgRegs(Regs, Op);
5494 return;
5495 default:
5496 return;
5497 }
5498}
5499
5500/// If the DbgValueInst is a dbg_value of a function argument, create the
5501/// corresponding DBG_VALUE machine instruction for it now. At the end of
5502/// instruction selection, they will be inserted to the entry BB.
5503/// We don't currently support this for variadic dbg_values, as they shouldn't
5504/// appear for function arguments or in the prologue.
5505bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5506 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5507 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5508 const Argument *Arg = dyn_cast<Argument>(V);
5509 if (!Arg)
5510 return false;
5511
5512 MachineFunction &MF = DAG.getMachineFunction();
5513 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5514
5515 // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5516 // we've been asked to pursue.
5517 auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5518 bool Indirect) {
5519 if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5520 // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5521 // pointing at the VReg, which will be patched up later.
5522 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5523 auto MIB = BuildMI(MF, DL, Inst);
5524 MIB.addReg(Reg, RegState::Debug);
5525 MIB.addImm(0);
5526 MIB.addMetadata(Variable);
5527 auto *NewDIExpr = FragExpr;
5528 // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5529 // the DIExpression.
5530 if (Indirect)
5531 NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5532 MIB.addMetadata(NewDIExpr);
5533 return MIB;
5534 } else {
5535 // Create a completely standard DBG_VALUE.
5536 auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5537 return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5538 }
5539 };
5540
5541 if (!IsDbgDeclare) {
5542 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5543 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5544 // the entry block.
5545 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5546 if (!IsInEntryBlock)
5547 return false;
5548
5549 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5550 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5551 // variable that also is a param.
5552 //
5553 // Although, if we are at the top of the entry block already, we can still
5554 // emit using ArgDbgValue. This might catch some situations when the
5555 // dbg.value refers to an argument that isn't used in the entry block, so
5556 // any CopyToReg node would be optimized out and the only way to express
5557 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5558 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5559 // we should only emit as ArgDbgValue if the Variable is an argument to the
5560 // current function, and the dbg.value intrinsic is found in the entry
5561 // block.
5562 bool VariableIsFunctionInputArg = Variable->isParameter() &&
5563 !DL->getInlinedAt();
5564 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5565 if (!IsInPrologue && !VariableIsFunctionInputArg)
5566 return false;
5567
5568 // Here we assume that a function argument on IR level only can be used to
5569 // describe one input parameter on source level. If we for example have
5570 // source code like this
5571 //
5572 // struct A { long x, y; };
5573 // void foo(struct A a, long b) {
5574 // ...
5575 // b = a.x;
5576 // ...
5577 // }
5578 //
5579 // and IR like this
5580 //
5581 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5582 // entry:
5583 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5584 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5585 // call void @llvm.dbg.value(metadata i32 %b, "b",
5586 // ...
5587 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5588 // ...
5589 //
5590 // then the last dbg.value is describing a parameter "b" using a value that
5591 // is an argument. But since we already has used %a1 to describe a parameter
5592 // we should not handle that last dbg.value here (that would result in an
5593 // incorrect hoisting of the DBG_VALUE to the function entry).
5594 // Notice that we allow one dbg.value per IR level argument, to accommodate
5595 // for the situation with fragments above.
5596 if (VariableIsFunctionInputArg) {
5597 unsigned ArgNo = Arg->getArgNo();
5598 if (ArgNo >= FuncInfo.DescribedArgs.size())
5599 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5600 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5601 return false;
5602 FuncInfo.DescribedArgs.set(ArgNo);
5603 }
5604 }
5605
5606 bool IsIndirect = false;
5607 Optional<MachineOperand> Op;
5608 // Some arguments' frame index is recorded during argument lowering.
5609 int FI = FuncInfo.getArgumentFrameIndex(Arg);
5610 if (FI != std::numeric_limits<int>::max())
5611 Op = MachineOperand::CreateFI(FI);
5612
5613 SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5614 if (!Op && N.getNode()) {
5615 getUnderlyingArgRegs(ArgRegsAndSizes, N);
5616 Register Reg;
5617 if (ArgRegsAndSizes.size() == 1)
5618 Reg = ArgRegsAndSizes.front().first;
5619
5620 if (Reg && Reg.isVirtual()) {
5621 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5622 Register PR = RegInfo.getLiveInPhysReg(Reg);
5623 if (PR)
5624 Reg = PR;
5625 }
5626 if (Reg) {
5627 Op = MachineOperand::CreateReg(Reg, false);
5628 IsIndirect = IsDbgDeclare;
5629 }
5630 }
5631
5632 if (!Op && N.getNode()) {
5633 // Check if frame index is available.
5634 SDValue LCandidate = peekThroughBitcasts(N);
5635 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5636 if (FrameIndexSDNode *FINode =
5637 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5638 Op = MachineOperand::CreateFI(FINode->getIndex());
5639 }
5640
5641 if (!Op) {
5642 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5643 auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5644 SplitRegs) {
5645 unsigned Offset = 0;
5646 for (auto RegAndSize : SplitRegs) {
5647 // If the expression is already a fragment, the current register
5648 // offset+size might extend beyond the fragment. In this case, only
5649 // the register bits that are inside the fragment are relevant.
5650 int RegFragmentSizeInBits = RegAndSize.second;
5651 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5652 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5653 // The register is entirely outside the expression fragment,
5654 // so is irrelevant for debug info.
5655 if (Offset >= ExprFragmentSizeInBits)
5656 break;
5657 // The register is partially outside the expression fragment, only
5658 // the low bits within the fragment are relevant for debug info.
5659 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5660 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5661 }
5662 }
5663
5664 auto FragmentExpr = DIExpression::createFragmentExpression(
5665 Expr, Offset, RegFragmentSizeInBits);
5666 Offset += RegAndSize.second;
5667 // If a valid fragment expression cannot be created, the variable's
5668 // correct value cannot be determined and so it is set as Undef.
5669 if (!FragmentExpr) {
5670 SDDbgValue *SDV = DAG.getConstantDbgValue(
5671 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5672 DAG.AddDbgValue(SDV, false);
5673 continue;
5674 }
5675 MachineInstr *NewMI =
5676 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr, IsDbgDeclare);
5677 FuncInfo.ArgDbgValues.push_back(NewMI);
5678 }
5679 };
5680
5681 // Check if ValueMap has reg number.
5682 DenseMap<const Value *, Register>::const_iterator
5683 VMI = FuncInfo.ValueMap.find(V);
5684 if (VMI != FuncInfo.ValueMap.end()) {
5685 const auto &TLI = DAG.getTargetLoweringInfo();
5686 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5687 V->getType(), None);
5688 if (RFV.occupiesMultipleRegs()) {
5689 splitMultiRegDbgValue(RFV.getRegsAndSizes());
5690 return true;
5691 }
5692
5693 Op = MachineOperand::CreateReg(VMI->second, false);
5694 IsIndirect = IsDbgDeclare;
5695 } else if (ArgRegsAndSizes.size() > 1) {
5696 // This was split due to the calling convention, and no virtual register
5697 // mapping exists for the value.
5698 splitMultiRegDbgValue(ArgRegsAndSizes);
5699 return true;
5700 }
5701 }
5702
5703 if (!Op)
5704 return false;
5705
5706 assert(Variable->isValidLocationForIntrinsic(DL) &&(static_cast<void> (0))
5707 "Expected inlined-at fields to agree")(static_cast<void> (0));
5708 MachineInstr *NewMI = nullptr;
5709
5710 if (Op->isReg())
5711 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5712 else
5713 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5714 Variable, Expr);
5715
5716 FuncInfo.ArgDbgValues.push_back(NewMI);
5717 return true;
5718}
5719
5720/// Return the appropriate SDDbgValue based on N.
5721SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5722 DILocalVariable *Variable,
5723 DIExpression *Expr,
5724 const DebugLoc &dl,
5725 unsigned DbgSDNodeOrder) {
5726 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5727 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5728 // stack slot locations.
5729 //
5730 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5731 // debug values here after optimization:
5732 //
5733 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5734 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5735 //
5736 // Both describe the direct values of their associated variables.
5737 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5738 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5739 }
5740 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5741 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5742}
5743
5744static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5745 switch (Intrinsic) {
5746 case Intrinsic::smul_fix:
5747 return ISD::SMULFIX;
5748 case Intrinsic::umul_fix:
5749 return ISD::UMULFIX;
5750 case Intrinsic::smul_fix_sat:
5751 return ISD::SMULFIXSAT;
5752 case Intrinsic::umul_fix_sat:
5753 return ISD::UMULFIXSAT;
5754 case Intrinsic::sdiv_fix:
5755 return ISD::SDIVFIX;
5756 case Intrinsic::udiv_fix:
5757 return ISD::UDIVFIX;
5758 case Intrinsic::sdiv_fix_sat:
5759 return ISD::SDIVFIXSAT;
5760 case Intrinsic::udiv_fix_sat:
5761 return ISD::UDIVFIXSAT;
5762 default:
5763 llvm_unreachable("Unhandled fixed point intrinsic")__builtin_unreachable();
5764 }
5765}
5766
5767void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5768 const char *FunctionName) {
5769 assert(FunctionName && "FunctionName must not be nullptr")(static_cast<void> (0));
5770 SDValue Callee = DAG.getExternalSymbol(
5771 FunctionName,
5772 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5773 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
5774}
5775
5776/// Given a @llvm.call.preallocated.setup, return the corresponding
5777/// preallocated call.
5778static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5779 assert(cast<CallBase>(PreallocatedSetup)(static_cast<void> (0))
5780 ->getCalledFunction()(static_cast<void> (0))
5781 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&(static_cast<void> (0))
5782 "expected call_preallocated_setup Value")(static_cast<void> (0));
5783 for (auto *U : PreallocatedSetup->users()) {
5784 auto *UseCall = cast<CallBase>(U);
5785 const Function *Fn = UseCall->getCalledFunction();
5786 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5787 return UseCall;
5788 }
5789 }
5790 llvm_unreachable("expected corresponding call to preallocated setup/arg")__builtin_unreachable();
5791}
5792
5793/// Lower the call to the specified intrinsic function.
5794void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5795 unsigned Intrinsic) {
5796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5797 SDLoc sdl = getCurSDLoc();
5798 DebugLoc dl = getCurDebugLoc();
5799 SDValue Res;
5800
5801 SDNodeFlags Flags;
5802 if (auto *FPOp
1.1
'FPOp' is null
= dyn_cast<FPMathOperator>(&I))
1
Assuming the object is not a 'FPMathOperator'
2
Taking false branch
5803 Flags.copyFMF(*FPOp);
5804
5805 switch (Intrinsic) {
3
Control jumps to 'case eh_sjlj_callsite:' at line 6169
5806 default:
5807 // By default, turn this into a target intrinsic node.
5808 visitTargetIntrinsic(I, Intrinsic);
5809 return;
5810 case Intrinsic::vscale: {
5811 match(&I, m_VScale(DAG.getDataLayout()));
5812 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5813 setValue(&I,
5814 DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5815 return;
5816 }
5817 case Intrinsic::vastart: visitVAStart(I); return;
5818 case Intrinsic::vaend: visitVAEnd(I); return;
5819 case Intrinsic::vacopy: visitVACopy(I); return;
5820 case Intrinsic::returnaddress:
5821 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5822 TLI.getPointerTy(DAG.getDataLayout()),
5823 getValue(I.getArgOperand(0))));
5824 return;
5825 case Intrinsic::addressofreturnaddress:
5826 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5827 TLI.getPointerTy(DAG.getDataLayout())));
5828 return;
5829 case Intrinsic::sponentry:
5830 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5831 TLI.getFrameIndexTy(DAG.getDataLayout())));
5832 return;
5833 case Intrinsic::frameaddress:
5834 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5835 TLI.getFrameIndexTy(DAG.getDataLayout()),
5836 getValue(I.getArgOperand(0))));
5837 return;
5838 case Intrinsic::read_volatile_register:
5839 case Intrinsic::read_register: {
5840 Value *Reg = I.getArgOperand(0);
5841 SDValue Chain = getRoot();
5842 SDValue RegName =
5843 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5844 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5845 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5846 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5847 setValue(&I, Res);
5848 DAG.setRoot(Res.getValue(1));
5849 return;
5850 }
5851 case Intrinsic::write_register: {
5852 Value *Reg = I.getArgOperand(0);
5853 Value *RegValue = I.getArgOperand(1);
5854 SDValue Chain = getRoot();
5855 SDValue RegName =
5856 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5857 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5858 RegName, getValue(RegValue)));
5859 return;
5860 }
5861 case Intrinsic::memcpy: {
5862 const auto &MCI = cast<MemCpyInst>(I);
5863 SDValue Op1 = getValue(I.getArgOperand(0));
5864 SDValue Op2 = getValue(I.getArgOperand(1));
5865 SDValue Op3 = getValue(I.getArgOperand(2));
5866 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5867 Align DstAlign = MCI.getDestAlign().valueOrOne();
5868 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5869 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5870 bool isVol = MCI.isVolatile();
5871 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5872 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5873 // node.
5874 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5875 AAMDNodes AAInfo;
5876 I.getAAMetadata(AAInfo);
5877 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5878 /* AlwaysInline */ false, isTC,
5879 MachinePointerInfo(I.getArgOperand(0)),
5880 MachinePointerInfo(I.getArgOperand(1)), AAInfo);
5881 updateDAGForMaybeTailCall(MC);
5882 return;
5883 }
5884 case Intrinsic::memcpy_inline: {
5885 const auto &MCI = cast<MemCpyInlineInst>(I);
5886 SDValue Dst = getValue(I.getArgOperand(0));
5887 SDValue Src = getValue(I.getArgOperand(1));
5888 SDValue Size = getValue(I.getArgOperand(2));
5889 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size")(static_cast<void> (0));
5890 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5891 Align DstAlign = MCI.getDestAlign().valueOrOne();
5892 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5893 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5894 bool isVol = MCI.isVolatile();
5895 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5896 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5897 // node.
5898 AAMDNodes AAInfo;
5899 I.getAAMetadata(AAInfo);
5900 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5901 /* AlwaysInline */ true, isTC,
5902 MachinePointerInfo(I.getArgOperand(0)),
5903 MachinePointerInfo(I.getArgOperand(1)), AAInfo);
5904 updateDAGForMaybeTailCall(MC);
5905 return;
5906 }
5907 case Intrinsic::memset: {
5908 const auto &MSI = cast<MemSetInst>(I);
5909 SDValue Op1 = getValue(I.getArgOperand(0));
5910 SDValue Op2 = getValue(I.getArgOperand(1));
5911 SDValue Op3 = getValue(I.getArgOperand(2));
5912 // @llvm.memset defines 0 and 1 to both mean no alignment.
5913 Align Alignment = MSI.getDestAlign().valueOrOne();
5914 bool isVol = MSI.isVolatile();
5915 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5916 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5917 AAMDNodes AAInfo;
5918 I.getAAMetadata(AAInfo);
5919 SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
5920 MachinePointerInfo(I.getArgOperand(0)), AAInfo);
5921 updateDAGForMaybeTailCall(MS);
5922 return;
5923 }
5924 case Intrinsic::memmove: {
5925 const auto &MMI = cast<MemMoveInst>(I);
5926 SDValue Op1 = getValue(I.getArgOperand(0));
5927 SDValue Op2 = getValue(I.getArgOperand(1));
5928 SDValue Op3 = getValue(I.getArgOperand(2));
5929 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5930 Align DstAlign = MMI.getDestAlign().valueOrOne();
5931 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
5932 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5933 bool isVol = MMI.isVolatile();
5934 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5935 // FIXME: Support passing different dest/src alignments to the memmove DAG
5936 // node.
5937 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5938 AAMDNodes AAInfo;
5939 I.getAAMetadata(AAInfo);
5940 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5941 isTC, MachinePointerInfo(I.getArgOperand(0)),
5942 MachinePointerInfo(I.getArgOperand(1)), AAInfo);
5943 updateDAGForMaybeTailCall(MM);
5944 return;
5945 }
5946 case Intrinsic::memcpy_element_unordered_atomic: {
5947 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5948 SDValue Dst = getValue(MI.getRawDest());
5949 SDValue Src = getValue(MI.getRawSource());
5950 SDValue Length = getValue(MI.getLength());
5951
5952 unsigned DstAlign = MI.getDestAlignment();
5953 unsigned SrcAlign = MI.getSourceAlignment();
5954 Type *LengthTy = MI.getLength()->getType();
5955 unsigned ElemSz = MI.getElementSizeInBytes();
5956 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5957 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5958 SrcAlign, Length, LengthTy, ElemSz, isTC,
5959 MachinePointerInfo(MI.getRawDest()),
5960 MachinePointerInfo(MI.getRawSource()));
5961 updateDAGForMaybeTailCall(MC);
5962 return;
5963 }
5964 case Intrinsic::memmove_element_unordered_atomic: {
5965 auto &MI = cast<AtomicMemMoveInst>(I);
5966 SDValue Dst = getValue(MI.getRawDest());
5967 SDValue Src = getValue(MI.getRawSource());
5968 SDValue Length = getValue(MI.getLength());
5969
5970 unsigned DstAlign = MI.getDestAlignment();
5971 unsigned SrcAlign = MI.getSourceAlignment();
5972 Type *LengthTy = MI.getLength()->getType();
5973 unsigned ElemSz = MI.getElementSizeInBytes();
5974 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5975 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5976 SrcAlign, Length, LengthTy, ElemSz, isTC,
5977 MachinePointerInfo(MI.getRawDest()),
5978 MachinePointerInfo(MI.getRawSource()));
5979 updateDAGForMaybeTailCall(MC);
5980 return;
5981 }
5982 case Intrinsic::memset_element_unordered_atomic: {
5983 auto &MI = cast<AtomicMemSetInst>(I);
5984 SDValue Dst = getValue(MI.getRawDest());
5985 SDValue Val = getValue(MI.getValue());
5986 SDValue Length = getValue(MI.getLength());
5987
5988 unsigned DstAlign = MI.getDestAlignment();
5989 Type *LengthTy = MI.getLength()->getType();
5990 unsigned ElemSz = MI.getElementSizeInBytes();
5991 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5992 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5993 LengthTy, ElemSz, isTC,
5994 MachinePointerInfo(MI.getRawDest()));
5995 updateDAGForMaybeTailCall(MC);
5996 return;
5997 }
5998 case Intrinsic::call_preallocated_setup: {
5999 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6000 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6001 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6002 getRoot(), SrcValue);
6003 setValue(&I, Res);
6004 DAG.setRoot(Res);
6005 return;
6006 }
6007 case Intrinsic::call_preallocated_arg: {
6008 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6009 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6010 SDValue Ops[3];
6011 Ops[0] = getRoot();
6012 Ops[1] = SrcValue;
6013 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6014 MVT::i32); // arg index
6015 SDValue Res = DAG.getNode(
6016 ISD::PREALLOCATED_ARG, sdl,
6017 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6018 setValue(&I, Res);
6019 DAG.setRoot(Res.getValue(1));
6020 return;
6021 }
6022 case Intrinsic::dbg_addr:
6023 case Intrinsic::dbg_declare: {
6024 // Assume dbg.addr and dbg.declare can not currently use DIArgList, i.e.
6025 // they are non-variadic.
6026 const auto &DI = cast<DbgVariableIntrinsic>(I);
6027 assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList")(static_cast<void> (0));
6028 DILocalVariable *Variable = DI.getVariable();
6029 DIExpression *Expression = DI.getExpression();
6030 dropDanglingDebugInfo(Variable, Expression);
6031 assert(Variable && "Missing variable")(static_cast<void> (0));
6032 LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DIdo { } while (false)
6033 << "\n")do { } while (false);
6034 // Check if address has undef value.
6035 const Value *Address = DI.getVariableLocationOp(0);
6036 if (!Address || isa<UndefValue>(Address) ||
6037 (Address->use_empty() && !isa<Argument>(Address))) {
6038 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DIdo { } while (false)
6039 << " (bad/undef/unused-arg address)\n")do { } while (false);
6040 return;
6041 }
6042
6043 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
6044
6045 // Check if this variable can be described by a frame index, typically
6046 // either as a static alloca or a byval parameter.
6047 int FI = std::numeric_limits<int>::max();
6048 if (const auto *AI =
6049 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
6050 if (AI->isStaticAlloca()) {
6051 auto I = FuncInfo.StaticAllocaMap.find(AI);
6052 if (I != FuncInfo.StaticAllocaMap.end())
6053 FI = I->second;
6054 }
6055 } else if (const auto *Arg = dyn_cast<Argument>(
6056 Address->stripInBoundsConstantOffsets())) {
6057 FI = FuncInfo.getArgumentFrameIndex(Arg);
6058 }
6059
6060 // llvm.dbg.addr is control dependent and always generates indirect
6061 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
6062 // the MachineFunction variable table.
6063 if (FI != std::numeric_limits<int>::max()) {
6064 if (Intrinsic == Intrinsic::dbg_addr) {
6065 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
6066 Variable, Expression, FI, getRoot().getNode(), /*IsIndirect*/ true,
6067 dl, SDNodeOrder);
6068 DAG.AddDbgValue(SDV, isParameter);
6069 } else {
6070 LLVM_DEBUG(dbgs() << "Skipping " << DIdo { } while (false)
6071 << " (variable info stashed in MF side table)\n")do { } while (false);
6072 }
6073 return;
6074 }
6075
6076 SDValue &N = NodeMap[Address];
6077 if (!N.getNode() && isa<Argument>(Address))
6078 // Check unused arguments map.
6079 N = UnusedArgNodeMap[Address];
6080 SDDbgValue *SDV;
6081 if (N.getNode()) {
6082 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
6083 Address = BCI->getOperand(0);
6084 // Parameters are handled specially.
6085 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
6086 if (isParameter && FINode) {
6087 // Byval parameter. We have a frame index at this point.
6088 SDV =
6089 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
6090 /*IsIndirect*/ true, dl, SDNodeOrder);
6091 } else if (isa<Argument>(Address)) {
6092 // Address is an argument, so try to emit its dbg value using
6093 // virtual register info from the FuncInfo.ValueMap.
6094 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
6095 return;
6096 } else {
6097 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
6098 true, dl, SDNodeOrder);
6099 }
6100 DAG.AddDbgValue(SDV, isParameter);
6101 } else {
6102 // If Address is an argument then try to emit its dbg value using
6103 // virtual register info from the FuncInfo.ValueMap.
6104 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
6105 N)) {
6106 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DIdo { } while (false)
6107 << " (could not emit func-arg dbg_value)\n")do { } while (false);
6108 }
6109 }
6110 return;
6111 }
6112 case Intrinsic::dbg_label: {
6113 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6114 DILabel *Label = DI.getLabel();
6115 assert(Label && "Missing label")(static_cast<void> (0));
6116
6117 SDDbgLabel *SDV;
6118 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6119 DAG.AddDbgLabel(SDV);
6120 return;
6121 }
6122 case Intrinsic::dbg_value: {
6123 const DbgValueInst &DI = cast<DbgValueInst>(I);
6124 assert(DI.getVariable() && "Missing variable")(static_cast<void> (0));
6125
6126 DILocalVariable *Variable = DI.getVariable();
6127 DIExpression *Expression = DI.getExpression();
6128 dropDanglingDebugInfo(Variable, Expression);
6129 SmallVector<Value *, 4> Values(DI.getValues());
6130 if (Values.empty())
6131 return;
6132
6133 if (std::count(Values.begin(), Values.end(), nullptr))
6134 return;
6135
6136 bool IsVariadic = DI.hasArgList();
6137 if (!handleDebugValue(Values, Variable, Expression, dl, DI.getDebugLoc(),
6138 SDNodeOrder, IsVariadic))
6139 addDanglingDebugInfo(&DI, dl, SDNodeOrder);
6140 return;
6141 }
6142
6143 case Intrinsic::eh_typeid_for: {
6144 // Find the type id for the given typeinfo.
6145 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6146 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6147 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6148 setValue(&I, Res);
6149 return;
6150 }
6151
6152 case Intrinsic::eh_return_i32:
6153 case Intrinsic::eh_return_i64:
6154 DAG.getMachineFunction().setCallsEHReturn(true);
6155 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6156 MVT::Other,
6157 getControlRoot(),
6158 getValue(I.getArgOperand(0)),
6159 getValue(I.getArgOperand(1))));
6160 return;
6161 case Intrinsic::eh_unwind_init:
6162 DAG.getMachineFunction().setCallsUnwindInit(true);
6163 return;
6164 case Intrinsic::eh_dwarf_cfa:
6165 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6166 TLI.getPointerTy(DAG.getDataLayout()),
6167 getValue(I.getArgOperand(0))));
6168 return;
6169 case Intrinsic::eh_sjlj_callsite: {
6170 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6171 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
4
Assuming the object is not a 'ConstantInt'
5
'CI' initialized to a null pointer value
6172 assert(CI && "Non-constant call site value in eh.sjlj.callsite!")(static_cast<void> (0));
6173 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!")(static_cast<void> (0));
6174
6175 MMI.setCurrentCallSite(CI->getZExtValue());
6
Called C++ object pointer is null
6176 return;
6177 }
6178 case Intrinsic::eh_sjlj_functioncontext: {
6179 // Get and store the index of the function context.
6180 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6181 AllocaInst *FnCtx =
6182 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6183 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6184 MFI.setFunctionContextIndex(FI);
6185 return;
6186 }
6187 case Intrinsic::eh_sjlj_setjmp: {
6188 SDValue Ops[2];
6189 Ops[0] = getRoot();
6190 Ops[1] = getValue(I.getArgOperand(0));
6191 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6192 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6193 setValue(&I, Op.getValue(0));
6194 DAG.setRoot(Op.getValue(1));
6195 return;
6196 }
6197 case Intrinsic::eh_sjlj_longjmp:
6198 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6199 getRoot(), getValue(I.getArgOperand(0))));
6200 return;
6201 case Intrinsic::eh_sjlj_setup_dispatch:
6202 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6203 getRoot()));
6204 return;
6205 case Intrinsic::masked_gather:
6206 visitMaskedGather(I);
6207 return;
6208 case Intrinsic::masked_load:
6209 visitMaskedLoad(I);
6210 return;
6211 case Intrinsic::masked_scatter:
6212 visitMaskedScatter(I);
6213 return;
6214 case Intrinsic::masked_store:
6215 visitMaskedStore(I);
6216 return;
6217 case Intrinsic::masked_expandload:
6218 visitMaskedLoad(I, true /* IsExpanding */);
6219 return;
6220 case Intrinsic::masked_compressstore:
6221 visitMaskedStore(I, true /* IsCompressing */);
6222 return;
6223 case Intrinsic::powi:
6224 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6225 getValue(I.getArgOperand(1)), DAG));
6226 return;
6227 case Intrinsic::log:
6228 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6229 return;
6230 case Intrinsic::log2:
6231 setValue(&I,
6232 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6233 return;
6234 case Intrinsic::log10:
6235 setValue(&I,
6236 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6237 return;
6238 case Intrinsic::exp:
6239 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6240 return;
6241 case Intrinsic::exp2:
6242 setValue(&I,
6243 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6244 return;
6245 case Intrinsic::pow:
6246 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6247 getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6248 return;
6249 case Intrinsic::sqrt:
6250 case Intrinsic::fabs:
6251 case Intrinsic::sin:
6252 case Intrinsic::cos:
6253 case Intrinsic::floor:
6254 case Intrinsic::ceil:
6255 case Intrinsic::trunc:
6256 case Intrinsic::rint:
6257 case Intrinsic::nearbyint:
6258 case Intrinsic::round:
6259 case Intrinsic::roundeven:
6260 case Intrinsic::canonicalize: {
6261 unsigned Opcode;
6262 switch (Intrinsic) {
6263 default: llvm_unreachable("Impossible intrinsic")__builtin_unreachable(); // Can't reach here.
6264 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6265 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6266 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6267 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6268 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6269 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6270 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6271 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6272 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6273 case Intrinsic::round: Opcode = ISD::FROUND; break;
6274 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6275 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6276 }
6277
6278 setValue(&I, DAG.getNode(Opcode, sdl,
6279 getValue(I.getArgOperand(0)).getValueType(),
6280 getValue(I.getArgOperand(0)), Flags));
6281 return;
6282 }
6283 case Intrinsic::lround:
6284 case Intrinsic::llround:
6285 case Intrinsic::lrint:
6286 case Intrinsic::llrint: {
6287 unsigned Opcode;
6288 switch (Intrinsic) {
6289 default: llvm_unreachable("Impossible intrinsic")__builtin_unreachable(); // Can't reach here.
6290 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6291 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6292 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6293 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6294 }
6295
6296 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6297 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6298 getValue(I.getArgOperand(0))));
6299 return;
6300 }
6301 case Intrinsic::minnum:
6302 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6303 getValue(I.getArgOperand(0)).getValueType(),
6304 getValue(I.getArgOperand(0)),
6305 getValue(I.getArgOperand(1)), Flags));
6306 return;
6307 case Intrinsic::maxnum:
6308 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6309 getValue(I.getArgOperand(0)).getValueType(),
6310 getValue(I.getArgOperand(0)),
6311 getValue(I.getArgOperand(1)), Flags));
6312 return;
6313 case Intrinsic::minimum:
6314 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6315 getValue(I.getArgOperand(0)).getValueType(),
6316 getValue(I.getArgOperand(0)),
6317 getValue(I.getArgOperand(1)), Flags));
6318 return;
6319 case Intrinsic::maximum:
6320 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6321 getValue(I.getArgOperand(0)).getValueType(),
6322 getValue(I.getArgOperand(0)),
6323 getValue(I.getArgOperand(1)), Flags));
6324 return;
6325 case Intrinsic::copysign:
6326 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6327 getValue(I.getArgOperand(0)).getValueType(),
6328 getValue(I.getArgOperand(0)),
6329 getValue(I.getArgOperand(1)), Flags));
6330 return;
6331 case Intrinsic::arithmetic_fence: {
6332 setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6333 getValue(I.getArgOperand(0)).getValueType(),
6334 getValue(I.getArgOperand(0)), Flags));
6335 return;
6336 }
6337 case Intrinsic::fma:
6338 setValue(&I, DAG.getNode(
6339 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6340 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6341 getValue(I.getArgOperand(2)), Flags));
6342 return;
6343#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6344 case Intrinsic::INTRINSIC:
6345#include "llvm/IR/ConstrainedOps.def"
6346 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6347 return;
6348#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6349#include "llvm/IR/VPIntrinsics.def"
6350 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6351 return;
6352 case Intrinsic::fmuladd: {
6353 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6354 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6355 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6356 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6357 getValue(I.getArgOperand(0)).getValueType(),
6358 getValue(I.getArgOperand(0)),
6359 getValue(I.getArgOperand(1)),
6360 getValue(I.getArgOperand(2)), Flags));
6361 } else {
6362 // TODO: Intrinsic calls should have fast-math-flags.
6363 SDValue Mul = DAG.getNode(
6364 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6365 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6366 SDValue Add = DAG.getNode(ISD::FADD, sdl,
6367 getValue(I.getArgOperand(0)).getValueType(),
6368 Mul, getValue(I.getArgOperand(2)), Flags);
6369 setValue(&I, Add);
6370 }
6371 return;
6372 }
6373 case Intrinsic::convert_to_fp16:
6374 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6375 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6376 getValue(I.getArgOperand(0)),
6377 DAG.getTargetConstant(0, sdl,
6378 MVT::i32))));
6379 return;
6380 case Intrinsic::convert_from_fp16:
6381 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6382 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6383 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6384 getValue(I.getArgOperand(0)))));
6385 return;
6386 case Intrinsic::fptosi_sat: {
6387 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6388 setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6389 getValue(I.getArgOperand(0)),
6390 DAG.getValueType(VT.getScalarType())));
6391 return;
6392 }
6393 case Intrinsic::fptoui_sat: {
6394 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6395 setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6396 getValue(I.getArgOperand(0)),
6397 DAG.getValueType(VT.getScalarType())));
6398 return;
6399 }
6400 case Intrinsic::set_rounding:
6401 Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6402 {getRoot(), getValue(I.getArgOperand(0))});
6403 setValue(&I, Res);
6404 DAG.setRoot(Res.getValue(0));
6405 return;
6406 case Intrinsic::pcmarker: {
6407 SDValue Tmp = getValue(I.getArgOperand(0));
6408 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6409 return;
6410 }
6411 case Intrinsic::readcyclecounter: {
6412 SDValue Op = getRoot();
6413 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6414 DAG.getVTList(MVT::i64, MVT::Other), Op);
6415 setValue(&I, Res);
6416 DAG.setRoot(Res.getValue(1));
6417 return;
6418 }
6419 case Intrinsic::bitreverse:
6420 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6421 getValue(I.getArgOperand(0)).getValueType(),
6422 getValue(I.getArgOperand(0))));
6423 return;
6424 case Intrinsic::bswap:
6425 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6426 getValue(I.getArgOperand(0)).getValueType(),
6427 getValue(I.getArgOperand(0))));
6428 return;
6429 case Intrinsic::cttz: {
6430 SDValue Arg = getValue(I.getArgOperand(0));
6431 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6432 EVT Ty = Arg.getValueType();
6433 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6434 sdl, Ty, Arg));
6435 return;
6436 }
6437 case Intrinsic::ctlz: {
6438 SDValue Arg = getValue(I.getArgOperand(0));
6439 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6440 EVT Ty = Arg.getValueType();
6441 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6442 sdl, Ty, Arg));
6443 return;
6444 }
6445 case Intrinsic::ctpop: {
6446 SDValue Arg = getValue(I.getArgOperand(0));
6447 EVT Ty = Arg.getValueType();
6448 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6449 return;
6450 }
6451 case Intrinsic::fshl:
6452 case Intrinsic::fshr: {
6453 bool IsFSHL = Intrinsic == Intrinsic::fshl;
6454 SDValue X = getValue(I.getArgOperand(0));
6455 SDValue Y = getValue(I.getArgOperand(1));
6456 SDValue Z = getValue(I.getArgOperand(2));
6457 EVT VT = X.getValueType();
6458
6459 if (X == Y) {
6460 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6461 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6462 } else {
6463 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6464 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6465 }
6466 return;
6467 }
6468 case Intrinsic::sadd_sat: {
6469 SDValue Op1 = getValue(I.getArgOperand(0));
6470 SDValue Op2 = getValue(I.getArgOperand(1));
6471 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6472 return;
6473 }
6474 case Intrinsic::uadd_sat: {
6475 SDValue Op1 = getValue(I.getArgOperand(0));
6476 SDValue Op2 = getValue(I.getArgOperand(1));
6477 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6478 return;
6479 }
6480 case Intrinsic::ssub_sat: {
6481 SDValue Op1 = getValue(I.getArgOperand(0));
6482 SDValue Op2 = getValue(I.getArgOperand(1));
6483 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6484 return;
6485 }
6486 case Intrinsic::usub_sat: {
6487 SDValue Op1 = getValue(I.getArgOperand(0));
6488 SDValue Op2 = getValue(I.getArgOperand(1));
6489 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6490 return;
6491 }
6492 case Intrinsic::sshl_sat: {
6493 SDValue Op1 = getValue(I.getArgOperand(0));
6494 SDValue Op2 = getValue(I.getArgOperand(1));
6495 setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6496 return;
6497 }
6498 case Intrinsic::ushl_sat: {
6499 SDValue Op1 = getValue(I.getArgOperand(0));
6500 SDValue Op2 = getValue(I.getArgOperand(1));
6501 setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6502 return;
6503 }
6504 case Intrinsic::smul_fix:
6505 case Intrinsic::umul_fix:
6506 case Intrinsic::smul_fix_sat:
6507 case Intrinsic::umul_fix_sat: {
6508 SDValue Op1 = getValue(I.getArgOperand(0));
6509 SDValue Op2 = getValue(I.getArgOperand(1));
6510 SDValue Op3 = getValue(I.getArgOperand(2));
6511 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6512 Op1.getValueType(), Op1, Op2, Op3));
6513 return;
6514 }
6515 case Intrinsic::sdiv_fix:
6516 case Intrinsic::udiv_fix:
6517 case Intrinsic::sdiv_fix_sat:
6518 case Intrinsic::udiv_fix_sat: {
6519 SDValue Op1 = getValue(I.getArgOperand(0));
6520 SDValue Op2 = getValue(I.getArgOperand(1));
6521 SDValue Op3 = getValue(I.getArgOperand(2));
6522 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6523 Op1, Op2, Op3, DAG, TLI));
6524 return;
6525 }
6526 case Intrinsic::smax: {
6527 SDValue Op1 = getValue(I.getArgOperand(0));
6528 SDValue Op2 = getValue(I.getArgOperand(1));
6529 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6530 return;
6531 }
6532 case Intrinsic::smin: {
6533 SDValue Op1 = getValue(I.getArgOperand(0));
6534 SDValue Op2 = getValue(I.getArgOperand(1));
6535 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6536 return;
6537 }
6538 case Intrinsic::umax: {
6539 SDValue Op1 = getValue(I.getArgOperand(0));
6540 SDValue Op2 = getValue(I.getArgOperand(1));
6541 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6542 return;
6543 }
6544 case Intrinsic::umin: {
6545 SDValue Op1 = getValue(I.getArgOperand(0));
6546 SDValue Op2 = getValue(I.getArgOperand(1));
6547 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6548 return;
6549 }
6550 case Intrinsic::abs: {
6551 // TODO: Preserve "int min is poison" arg in SDAG?
6552 SDValue Op1 = getValue(I.getArgOperand(0));
6553 setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6554 return;
6555 }
6556 case Intrinsic::stacksave: {
6557 SDValue Op = getRoot();
6558 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6559 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6560 setValue(&I, Res);
6561 DAG.setRoot(Res.getValue(1));
6562 return;
6563 }
6564 case Intrinsic::stackrestore:
6565 Res = getValue(I.getArgOperand(0));
6566 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6567 return;
6568 case Intrinsic::get_dynamic_area_offset: {
6569 SDValue Op = getRoot();
6570 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6571 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6572 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6573 // target.
6574 if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6575 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6576 " intrinsic!");
6577 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6578 Op);
6579 DAG.setRoot(Op);
6580 setValue(&I, Res);
6581 return;
6582 }
6583 case Intrinsic::stackguard: {
6584 MachineFunction &MF = DAG.getMachineFunction();
6585 const Module &M = *MF.getFunction().getParent();
6586 SDValue Chain = getRoot();
6587 if (TLI.useLoadStackGuardNode()) {
6588 Res = getLoadStackGuard(DAG, sdl, Chain);
6589 } else {
6590 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6591 const Value *Global = TLI.getSDagStackGuard(M);
6592 Align Align = DL->getPrefTypeAlign(Global->getType());
6593 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6594 MachinePointerInfo(Global, 0), Align,
6595 MachineMemOperand::MOVolatile);
6596 }
6597 if (TLI.useStackGuardXorFP())
6598 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6599 DAG.setRoot(Chain);
6600 setValue(&I, Res);
6601 return;
6602 }
6603 case Intrinsic::stackprotector: {
6604 // Emit code into the DAG to store the stack guard onto the stack.
6605 MachineFunction &MF = DAG.getMachineFunction();
6606 MachineFrameInfo &MFI = MF.getFrameInfo();
6607 SDValue Src, Chain = getRoot();
6608
6609 if (TLI.useLoadStackGuardNode())
6610 Src = getLoadStackGuard(DAG, sdl, Chain);
6611 else
6612 Src = getValue(I.getArgOperand(0)); // The guard's value.
6613
6614 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6615
6616 int FI = FuncInfo.StaticAllocaMap[Slot];
6617 MFI.setStackProtectorIndex(FI);
6618 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6619
6620 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6621
6622 // Store the stack protector onto the stack.
6623 Res = DAG.getStore(
6624 Chain, sdl, Src, FIN,
6625 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6626 MaybeAlign(), MachineMemOperand::MOVolatile);
6627 setValue(&I, Res);
6628 DAG.setRoot(Res);
6629 return;
6630 }
6631 case Intrinsic::objectsize:
6632 llvm_unreachable("llvm.objectsize.* should have been lowered already")__builtin_unreachable();
6633
6634 case Intrinsic::is_constant:
6635 llvm_unreachable("llvm.is.constant.* should have been lowered already")__builtin_unreachable();
6636
6637 case Intrinsic::annotation:
6638 case Intrinsic::ptr_annotation:
6639 case Intrinsic::launder_invariant_group:
6640 case Intrinsic::strip_invariant_group:
6641 // Drop the intrinsic, but forward the value
6642 setValue(&I, getValue(I.getOperand(0)));
6643 return;
6644
6645 case Intrinsic::assume:
6646 case Intrinsic::experimental_noalias_scope_decl:
6647 case Intrinsic::var_annotation:
6648 case Intrinsic::sideeffect:
6649 // Discard annotate attributes, noalias scope declarations, assumptions, and
6650 // artificial side-effects.
6651 return;
6652
6653 case Intrinsic::codeview_annotation: {
6654 // Emit a label associated with this metadata.
6655 MachineFunction &MF = DAG.getMachineFunction();
6656 MCSymbol *Label =
6657 MF.getMMI().getContext().createTempSymbol("annotation", true);
6658 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6659 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6660 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6661 DAG.setRoot(Res);
6662 return;
6663 }
6664
6665 case Intrinsic::init_trampoline: {
6666 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6667
6668 SDValue Ops[6];
6669 Ops[0] = getRoot();
6670 Ops[1] = getValue(I.getArgOperand(0));
6671 Ops[2] = getValue(I.getArgOperand(1));
6672 Ops[3] = getValue(I.getArgOperand(2));
6673 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6674 Ops[5] = DAG.getSrcValue(F);
6675
6676 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6677
6678 DAG.setRoot(Res);
6679 return;
6680 }
6681 case Intrinsic::adjust_trampoline:
6682 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6683 TLI.getPointerTy(DAG.getDataLayout()),
6684 getValue(I.getArgOperand(0))));
6685 return;
6686 case Intrinsic::gcroot: {
6687 assert(DAG.getMachineFunction().getFunction().hasGC() &&(static_cast<void> (0))
6688 "only valid in functions with gc specified, enforced by Verifier")(static_cast<void> (0));
6689 assert(GFI && "implied by previous")(static_cast<void> (0));
6690 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6691 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6692
6693 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6694 GFI->addStackRoot(FI->getIndex(), TypeMap);
6695 return;
6696 }
6697 case Intrinsic::gcread:
6698 case Intrinsic::gcwrite:
6699 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!")__builtin_unreachable();
6700 case Intrinsic::flt_rounds:
6701 Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
6702 setValue(&I, Res);
6703 DAG.setRoot(Res.getValue(1));
6704 return;
6705
6706 case Intrinsic::expect:
6707 // Just replace __builtin_expect(exp, c) with EXP.
6708 setValue(&I, getValue(I.getArgOperand(0)));
6709 return;
6710
6711 case Intrinsic::ubsantrap:
6712 case Intrinsic::debugtrap:
6713 case Intrinsic::trap: {
6714 StringRef TrapFuncName =
6715 I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
6716 if (TrapFuncName.empty()) {
6717 switch (Intrinsic) {
6718 case Intrinsic::trap:
6719 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
6720 break;
6721 case Intrinsic::debugtrap:
6722 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
6723 break;
6724 case Intrinsic::ubsantrap:
6725 DAG.setRoot(DAG.getNode(
6726 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
6727 DAG.getTargetConstant(
6728 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
6729 MVT::i32)));
6730 break;
6731 default: llvm_unreachable("unknown trap intrinsic")__builtin_unreachable();
6732 }
6733 return;
6734 }
6735 TargetLowering::ArgListTy Args;
6736 if (Intrinsic == Intrinsic::ubsantrap) {
6737 Args.push_back(TargetLoweringBase::ArgListEntry());
6738 Args[0].Val = I.getArgOperand(0);
6739 Args[0].Node = getValue(Args[0].Val);
6740 Args[0].Ty = Args[0].Val->getType();
6741 }
6742
6743 TargetLowering::CallLoweringInfo CLI(DAG);
6744 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6745 CallingConv::C, I.getType(),
6746 DAG.getExternalSymbol(TrapFuncName.data(),
6747 TLI.getPointerTy(DAG.getDataLayout())),
6748 std::move(Args));
6749
6750 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6751 DAG.setRoot(Result.second);
6752 return;
6753 }
6754
6755 case Intrinsic::uadd_with_overflow:
6756 case Intrinsic::sadd_with_overflow:
6757 case Intrinsic::usub_with_overflow:
6758 case Intrinsic::ssub_with_overflow:
6759 case Intrinsic::umul_with_overflow:
6760 case Intrinsic::smul_with_overflow: {
6761 ISD::NodeType Op;
6762 switch (Intrinsic) {
6763 default: llvm_unreachable("Impossible intrinsic")__builtin_unreachable(); // Can't reach here.
6764 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6765 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6766 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6767 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6768 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6769 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6770 }
6771 SDValue Op1 = getValue(I.getArgOperand(0));
6772 SDValue Op2 = getValue(I.getArgOperand(1));
6773
6774 EVT ResultVT = Op1.getValueType();
6775 EVT OverflowVT = MVT::i1;
6776 if (ResultVT.isVector())
6777 OverflowVT = EVT::getVectorVT(
6778 *Context, OverflowVT, ResultVT.getVectorElementCount());
6779
6780 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6781 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6782 return;
6783 }
6784 case Intrinsic::prefetch: {
6785 SDValue Ops[5];
6786 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6787 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6788 Ops[0] = DAG.getRoot();
6789 Ops[1] = getValue(I.getArgOperand(0));
6790 Ops[2] = getValue(I.getArgOperand(1));
6791 Ops[3] = getValue(I.getArgOperand(2));
6792 Ops[4] = getValue(I.getArgOperand(3));
6793 SDValue Result = DAG.getMemIntrinsicNode(
6794 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
6795 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
6796 /* align */ None, Flags);
6797
6798 // Chain the prefetch in parallell with any pending loads, to stay out of
6799 // the way of later optimizations.
6800 PendingLoads.push_back(Result);
6801 Result = getRoot();
6802 DAG.setRoot(Result);
6803 return;
6804 }
6805 case Intrinsic::lifetime_start:
6806 case Intrinsic::lifetime_end: {
6807 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6808 // Stack coloring is not enabled in O0, discard region information.
6809 if (TM.getOptLevel() == CodeGenOpt::None)
6810 return;
6811
6812 const int64_t ObjectSize =
6813 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6814 Value *const ObjectPtr = I.getArgOperand(1);
6815 SmallVector<const Value *, 4> Allocas;
6816 getUnderlyingObjects(ObjectPtr, Allocas);
6817
6818 for (const Value *Alloca : Allocas) {
6819 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
6820
6821 // Could not find an Alloca.
6822 if (!LifetimeObject)
6823 continue;
6824
6825 // First check that the Alloca is static, otherwise it won't have a
6826 // valid frame index.
6827 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6828 if (SI == FuncInfo.StaticAllocaMap.end())
6829 return;
6830
6831 const int FrameIndex = SI->second;
6832 int64_t Offset;
6833 if (GetPointerBaseWithConstantOffset(
6834 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6835 Offset = -1; // Cannot determine offset from alloca to lifetime object.
6836 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6837 Offset);
6838 DAG.setRoot(Res);
6839 }
6840 return;
6841 }
6842 case Intrinsic::pseudoprobe: {
6843 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
6844 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6845 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
6846 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
6847 DAG.setRoot(Res);
6848 return;
6849 }
6850 case Intrinsic::invariant_start:
6851 // Discard region information.
6852 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6853 return;
6854 case Intrinsic::invariant_end:
6855 // Discard region information.
6856 return;
6857 case Intrinsic::clear_cache:
6858 /// FunctionName may be null.
6859 if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6860 lowerCallToExternalSymbol(I, FunctionName);
6861 return;
6862 case Intrinsic::donothing:
6863 case Intrinsic::seh_try_begin:
6864 case Intrinsic::seh_scope_begin:
6865 case Intrinsic::seh_try_end:
6866 case Intrinsic::seh_scope_end:
6867 // ignore
6868 return;
6869 case Intrinsic::experimental_stackmap:
6870 visitStackmap(I);
6871 return;
6872 case Intrinsic::experimental_patchpoint_void:
6873 case Intrinsic::experimental_patchpoint_i64:
6874 visitPatchpoint(I);
6875 return;
6876 case Intrinsic::experimental_gc_statepoint:
6877 LowerStatepoint(cast<GCStatepointInst>(I));
6878 return;
6879 case Intrinsic::experimental_gc_result:
6880 visitGCResult(cast<GCResultInst>(I));
6881 return;
6882 case Intrinsic::experimental_gc_relocate:
6883 visitGCRelocate(cast<GCRelocateInst>(I));
6884 return;
6885 case Intrinsic::instrprof_increment:
6886 llvm_unreachable("instrprof failed to lower an increment")__builtin_unreachable();
6887 case Intrinsic::instrprof_value_profile:
6888 llvm_unreachable("instrprof failed to lower a value profiling call")__builtin_unreachable();
6889 case Intrinsic::localescape: {
6890 MachineFunction &MF = DAG.getMachineFunction();
6891 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6892
6893 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6894 // is the same on all targets.
6895 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6896 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6897 if (isa<ConstantPointerNull>(Arg))
6898 continue; // Skip null pointers. They represent a hole in index space.
6899 AllocaInst *Slot = cast<AllocaInst>(Arg);
6900 assert(FuncInfo.StaticAllocaMap.count(Slot) &&(static_cast<void> (0))
6901 "can only escape static allocas")(static_cast<void> (0));
6902 int FI = FuncInfo.StaticAllocaMap[Slot];
6903 MCSymbol *FrameAllocSym =
6904 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6905 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6906 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6907 TII->get(TargetOpcode::LOCAL_ESCAPE))
6908 .addSym(FrameAllocSym)
6909 .addFrameIndex(FI);
6910 }
6911
6912 return;
6913 }
6914
6915 case Intrinsic::localrecover: {
6916 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6917 MachineFunction &MF = DAG.getMachineFunction();
6918
6919 // Get the symbol that defines the frame offset.
6920 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6921 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6922 unsigned IdxVal =
6923 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6924 MCSymbol *FrameAllocSym =
6925 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6926 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6927
6928 Value *FP = I.getArgOperand(1);
6929 SDValue FPVal = getValue(FP);
6930 EVT PtrVT = FPVal.getValueType();
6931
6932 // Create a MCSymbol for the label to avoid any target lowering
6933 // that would make this PC relative.
6934 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6935 SDValue OffsetVal =
6936 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6937
6938 // Add the offset to the FP.
6939 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6940 setValue(&I, Add);
6941
6942 return;
6943 }
6944
6945 case Intrinsic::eh_exceptionpointer:
6946 case Intrinsic::eh_exceptioncode: {
6947 // Get the exception pointer vreg, copy from it, and resize it to fit.
6948 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6949 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6950 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6951 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6952 SDValue N =
6953 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6954 if (Intrinsic == Intrinsic::eh_exceptioncode)
6955 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6956 setValue(&I, N);
6957 return;
6958 }
6959 case Intrinsic::xray_customevent: {
6960 // Here we want to make sure that the intrinsic behaves as if it has a
6961 // specific calling convention, and only for x86_64.
6962 // FIXME: Support other platforms later.
6963 const auto &Triple = DAG.getTarget().getTargetTriple();
6964 if (Triple.getArch() != Triple::x86_64)
6965 return;
6966
6967 SDLoc DL = getCurSDLoc();
6968 SmallVector<SDValue, 8> Ops;
6969
6970 // We want to say that we always want the arguments in registers.
6971 SDValue LogEntryVal = getValue(I.getArgOperand(0));
6972 SDValue StrSizeVal = getValue(I.getArgOperand(1));
6973 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6974 SDValue Chain = getRoot();
6975 Ops.push_back(LogEntryVal);
6976 Ops.push_back(StrSizeVal);
6977 Ops.push_back(Chain);
6978
6979 // We need to enforce the calling convention for the callsite, so that
6980 // argument ordering is enforced correctly, and that register allocation can
6981 // see that some registers may be assumed clobbered and have to preserve
6982 // them across calls to the intrinsic.
6983 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6984 DL, NodeTys, Ops);
6985 SDValue patchableNode = SDValue(MN, 0);
6986 DAG.setRoot(patchableNode);
6987 setValue(&I, patchableNode);
6988 return;
6989 }
6990 case Intrinsic::xray_typedevent: {
6991 // Here we want to make sure that the intrinsic behaves as if it has a
6992 // specific calling convention, and only for x86_64.
6993 // FIXME: Support other platforms later.
6994 const auto &Triple = DAG.getTarget().getTargetTriple();
6995 if (Triple.getArch() != Triple::x86_64)
6996 return;
6997
6998 SDLoc DL = getCurSDLoc();
6999 SmallVector<SDValue, 8> Ops;
7000
7001 // We want to say that we always want the arguments in registers.
7002 // It's unclear to me how manipulating the selection DAG here forces callers
7003 // to provide arguments in registers instead of on the stack.
7004 SDValue LogTypeId = getValue(I.getArgOperand(0));
7005 SDValue LogEntryVal = getValue(I.getArgOperand(1));
7006 SDValue StrSizeVal = getValue(I.getArgOperand(2));
7007 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7008 SDValue Chain = getRoot();
7009 Ops.push_back(LogTypeId);
7010 Ops.push_back(LogEntryVal);
7011 Ops.push_back(StrSizeVal);
7012 Ops.push_back(Chain);
7013
7014 // We need to enforce the calling convention for the callsite, so that
7015 // argument ordering is enforced correctly, and that register allocation can
7016 // see that some registers may be assumed clobbered and have to preserve
7017 // them across calls to the intrinsic.
7018 MachineSDNode *MN = DAG.getMachineNode(
7019 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
7020 SDValue patchableNode = SDValue(MN, 0);
7021 DAG.setRoot(patchableNode);
7022 setValue(&I, patchableNode);
7023 return;
7024 }
7025 case Intrinsic::experimental_deoptimize:
7026 LowerDeoptimizeCall(&I);
7027 return;
7028 case Intrinsic::experimental_stepvector:
7029 visitStepVector(I);
7030 return;
7031 case Intrinsic::vector_reduce_fadd:
7032 case Intrinsic::vector_reduce_fmul:
7033 case Intrinsic::vector_reduce_add:
7034 case Intrinsic::vector_reduce_mul:
7035 case Intrinsic::vector_reduce_and:
7036 case Intrinsic::vector_reduce_or:
7037 case Intrinsic::vector_reduce_xor:
7038 case Intrinsic::vector_reduce_smax:
7039 case Intrinsic::vector_reduce_smin:
7040 case Intrinsic::vector_reduce_umax:
7041 case Intrinsic::vector_reduce_umin:
7042 case Intrinsic::vector_reduce_fmax:
7043 case Intrinsic::vector_reduce_fmin:
7044 visitVectorReduce(I, Intrinsic);
7045 return;
7046
7047 case Intrinsic::icall_branch_funnel: {
7048 SmallVector<SDValue, 16> Ops;
7049 Ops.push_back(getValue(I.getArgOperand(0)));
7050
7051 int64_t Offset;
7052 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7053 I.getArgOperand(1), Offset, DAG.getDataLayout()));
7054 if (!Base)
7055 report_fatal_error(
7056 "llvm.icall.branch.funnel operand must be a GlobalValue");
7057 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
7058
7059 struct BranchFunnelTarget {
7060 int64_t Offset;
7061 SDValue Target;
7062 };
7063 SmallVector<BranchFunnelTarget, 8> Targets;
7064
7065 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
7066 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7067 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7068 if (ElemBase != Base)
7069 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7070 "to the same GlobalValue");
7071
7072 SDValue Val = getValue(I.getArgOperand(Op + 1));
7073 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7074 if (!GA)
7075 report_fatal_error(
7076 "llvm.icall.branch.funnel operand must be a GlobalValue");
7077 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7078 GA->getGlobal(), getCurSDLoc(),
7079 Val.getValueType(), GA->getOffset())});
7080 }
7081 llvm::sort(Targets,
7082 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7083 return T1.Offset < T2.Offset;
7084 });
7085
7086 for (auto &T : Targets) {
7087 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
7088 Ops.push_back(T.Target);
7089 }
7090
7091 Ops.push_back(DAG.getRoot()); // Chain
7092 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
7093 getCurSDLoc(), MVT::Other, Ops),
7094 0);
7095 DAG.setRoot(N);
7096 setValue(&I, N);
7097 HasTailCall = true;
7098 return;
7099 }
7100
7101 case Intrinsic::wasm_landingpad_index:
7102 // Information this intrinsic contained has been transferred to
7103 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7104 // delete it now.
7105 return;
7106
7107 case Intrinsic::aarch64_settag:
7108 case Intrinsic::aarch64_settag_zero: {
7109 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7110 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7111 SDValue Val = TSI.EmitTargetCodeForSetTag(
7112 DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
7113 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7114 ZeroMemory);
7115 DAG.setRoot(Val);
7116 setValue(&I, Val);
7117 return;
7118 }
7119 case Intrinsic::ptrmask: {
7120 SDValue Ptr = getValue(I.getOperand(0));
7121 SDValue Const = getValue(I.getOperand(1));
7122
7123 EVT PtrVT = Ptr.getValueType();
7124 setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr,
7125 DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT)));
7126 return;
7127 }
7128 case Intrinsic::get_active_lane_mask: {
7129 auto DL = getCurSDLoc();
7130 SDValue Index = getValue(I.getOperand(0));
7131 SDValue TripCount = getValue(I.getOperand(1));
7132 Type *ElementTy = I.getOperand(0)->getType();
7133 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7134 unsigned VecWidth = VT.getVectorNumElements();
7135
7136 SmallVector<SDValue, 16> OpsTripCount;
7137 SmallVector<SDValue, 16> OpsIndex;
7138 SmallVector<SDValue, 16> OpsStepConstants;
7139 for (unsigned i = 0; i < VecWidth; i++) {
7140 OpsTripCount.push_back(TripCount);
7141 OpsIndex.push_back(Index);
7142 OpsStepConstants.push_back(
7143 DAG.getConstant(i, DL, EVT::getEVT(ElementTy)));
7144 }
7145
7146 EVT CCVT = EVT::getVectorVT(I.getContext(), MVT::i1, VecWidth);
7147
7148 auto VecTy = EVT::getEVT(FixedVectorType::get(ElementTy, VecWidth));
7149 SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex);
7150 SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants);
7151 SDValue VectorInduction = DAG.getNode(
7152 ISD::UADDO, DL, DAG.getVTList(VecTy, CCVT), VectorIndex, VectorStep);
7153 SDValue VectorTripCount = DAG.getBuildVector(VecTy, DL, OpsTripCount);
7154 SDValue SetCC = DAG.getSetCC(DL, CCVT, VectorInduction.getValue(0),
7155 VectorTripCount, ISD::CondCode::SETULT);
7156 setValue(&I, DAG.getNode(ISD::AND, DL, CCVT,
7157 DAG.getNOT(DL, VectorInduction.getValue(1), CCVT),
7158 SetCC));
7159 return;
7160 }
7161 case Intrinsic::experimental_vector_insert: {
7162 auto DL = getCurSDLoc();
7163
7164 SDValue Vec = getValue(I.getOperand(0));
7165 SDValue SubVec = getValue(I.getOperand(1));
7166 SDValue Index = getValue(I.getOperand(2));
7167
7168 // The intrinsic's index type is i64, but the SDNode requires an index type
7169 // suitable for the target. Convert the index as required.
7170 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7171 if (Index.getValueType() != VectorIdxTy)
7172 Index = DAG.getVectorIdxConstant(
7173 cast<ConstantSDNode>(Index)->getZExtValue(), DL);
7174
7175 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7176 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ResultVT, Vec, SubVec,
7177 Index));
7178 return;
7179 }
7180 case Intrinsic::experimental_vector_extract: {
7181 auto DL = getCurSDLoc();
7182
7183 SDValue Vec = getValue(I.getOperand(0));
7184 SDValue Index = getValue(I.getOperand(1));
7185 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7186
7187 // The intrinsic's index type is i64, but the SDNode requires an index type
7188 // suitable for the target. Convert the index as required.
7189 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7190 if (Index.getValueType() != VectorIdxTy)
7191 Index = DAG.getVectorIdxConstant(
7192 cast<ConstantSDNode>(Index)->getZExtValue(), DL);
7193
7194 setValue(&I, DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, Index));
7195 return;
7196 }
7197 case Intrinsic::experimental_vector_reverse:
7198 visitVectorReverse(I);
7199 return;
7200 case Intrinsic::experimental_vector_splice:
7201 visitVectorSplice(I);
7202 return;
7203 }
7204}
7205
7206void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7207 const ConstrainedFPIntrinsic &FPI) {
7208 SDLoc sdl = getCurSDLoc();
7209
7210 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7211 SmallVector<EVT, 4> ValueVTs;
7212 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
7213 ValueVTs.push_back(MVT::Other); // Out chain
7214
7215 // We do not need to serialize constrained FP intrinsics against
7216 // each other or against (nonvolatile) loads, so they can be
7217 // chained like loads.
7218 SDValue Chain = DAG.getRoot();
7219 SmallVector<SDValue, 4> Opers;
7220 Opers.push_back(Chain);
7221 if (FPI.isUnaryOp()) {
7222 Opers.push_back(getValue(FPI.getArgOperand(0)));
7223 } else if (FPI.isTernaryOp()) {
7224 Opers.push_back(getValue(FPI.getArgOperand(0)));
7225 Opers.push_back(getValue(FPI.getArgOperand(1)));
7226 Opers.push_back(getValue(FPI.getArgOperand(2)));
7227 } else {
7228 Opers.push_back(getValue(FPI.getArgOperand(0)));
7229 Opers.push_back(getValue(FPI.getArgOperand(1)));
7230 }
7231
7232 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7233 assert(Result.getNode()->getNumValues() == 2)(static_cast<void> (0));
7234
7235 // Push node to the appropriate list so that future instructions can be
7236 // chained up correctly.
7237 SDValue OutChain = Result.getValue(1);
7238 switch (EB) {
7239 case fp::ExceptionBehavior::ebIgnore:
7240 // The only reason why ebIgnore nodes still need to be chained is that
7241 // they might depend on the current rounding mode, and therefore must
7242 // not be moved across instruction that may change that mode.
7243 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7244 case fp::ExceptionBehavior::ebMayTrap:
7245 // These must not be moved across calls or instructions that may change
7246 // floating-point exception masks.
7247 PendingConstrainedFP.push_back(OutChain);
7248 break;
7249 case fp::ExceptionBehavior::ebStrict:
7250 // These must not be moved across calls or instructions that may change
7251 // floating-point exception masks or read floating-point exception flags.
7252 // In addition, they cannot be optimized out even if unused.
7253 PendingConstrainedFPStrict.push_back(OutChain);
7254 break;
7255 }
7256 };
7257
7258 SDVTList VTs = DAG.getVTList(ValueVTs);
7259 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
7260
7261 SDNodeFlags Flags;
7262 if (EB == fp::ExceptionBehavior::ebIgnore)
7263 Flags.setNoFPExcept(true);
7264
7265 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7266 Flags.copyFMF(*FPOp);
7267
7268 unsigned Opcode;
7269 switch (FPI.getIntrinsicID()) {
7270 default: llvm_unreachable("Impossible intrinsic")__builtin_unreachable(); // Can't reach here.
7271#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7272 case Intrinsic::INTRINSIC: \
7273 Opcode = ISD::STRICT_##DAGN; \
7274 break;
7275#include "llvm/IR/ConstrainedOps.def"
7276 case Intrinsic::experimental_constrained_fmuladd: {
7277 Opcode = ISD::STRICT_FMA;
7278 // Break fmuladd into fmul and fadd.
7279 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7280 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(),
7281 ValueVTs[0])) {
7282 Opers.pop_back();
7283 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7284 pushOutChain(Mul, EB);
7285 Opcode = ISD::STRICT_FADD;
7286 Opers.clear();
7287 Opers.push_back(Mul.getValue(1));
7288 Opers.push_back(Mul.getValue(0));
7289 Opers.push_back(getValue(FPI.getArgOperand(2)));
7290 }
7291 break;
7292 }
7293 }
7294
7295 // A few strict DAG nodes carry additional operands that are not
7296 // set up by the default code above.
7297 switch (Opcode) {
7298 default: break;
7299 case ISD::STRICT_FP_ROUND:
7300 Opers.push_back(
7301 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7302 break;
7303 case ISD::STRICT_FSETCC:
7304 case ISD::STRICT_FSETCCS: {
7305 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7306 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7307 if (TM.Options.NoNaNsFPMath)
7308 Condition = getFCmpCodeWithoutNaN(Condition);
7309 Opers.push_back(DAG.getCondCode(Condition));
7310 break;
7311 }
7312 }
7313
7314 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7315 pushOutChain(Result, EB);
7316
7317 SDValue FPResult = Result.getValue(0);
7318 setValue(&FPI, FPResult);
7319}
7320
7321static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7322 Optional<unsigned> ResOPC;
7323 switch (VPIntrin.getIntrinsicID()) {
7324#define BEGIN_REGISTER_VP_INTRINSIC(INTRIN, ...) case Intrinsic::INTRIN:
7325#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) ResOPC = ISD::VPSDID;
7326#define END_REGISTER_VP_INTRINSIC(...) break;
7327#include "llvm/IR/VPIntrinsics.def"
7328 }
7329
7330 if (!ResOPC.hasValue())
7331 llvm_unreachable(__builtin_unreachable()
7332 "Inconsistency: no SDNode available for this VPIntrinsic!")__builtin_unreachable();
7333
7334 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7335 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7336 if (VPIntrin.getFastMathFlags().allowReassoc())
7337 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7338 : ISD::VP_REDUCE_FMUL;
7339 }
7340
7341 return ResOPC.getValue();
7342}
7343
7344void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
7345 SmallVector<SDValue, 7> &OpValues,
7346 bool isGather) {
7347 SDLoc DL = getCurSDLoc();
7348 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7349 Value *PtrOperand = VPIntrin.getArgOperand(0);
7350 MaybeAlign Alignment = DAG.getEVTAlign(VT);
7351 AAMDNodes AAInfo;
7352 VPIntrin.getAAMetadata(AAInfo);
7353 const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
7354 SDValue LD;
7355 bool AddToChain = true;
7356 if (!isGather) {
7357 // Do not serialize variable-length loads of constant memory with
7358 // anything.
7359 MemoryLocation ML;
7360 if (VT.isScalableVector())
7361 ML = MemoryLocation::getAfter(PtrOperand);
7362 else
7363 ML = MemoryLocation(
7364 PtrOperand,
7365 LocationSize::precise(
7366 DAG.getDataLayout().getTypeStoreSize(VPIntrin.getType())),
7367 AAInfo);
7368 AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7369 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7370 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7371 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7372 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
7373 LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7374 MMO, false /*IsExpanding */);
7375 } else {
7376 unsigned AS =
7377 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7378 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7379 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7380 MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7381 SDValue Base, Index, Scale;
7382 ISD::MemIndexType IndexType;
7383 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7384 this, VPIntrin.getParent());
7385 if (!UniformBase) {
7386 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7387 Index = getValue(PtrOperand);
7388 IndexType = ISD::SIGNED_UNSCALED;
7389 Scale =
7390 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7391 }
7392 EVT IdxVT = Index.getValueType();
7393 EVT EltTy = IdxVT.getVectorElementType();
7394 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7395 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7396 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7397 }
7398 LD = DAG.getGatherVP(
7399 DAG.getVTList(VT, MVT::Other), VT, DL,
7400 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7401 IndexType);
7402 }
7403 if (AddToChain)
7404 PendingLoads.push_back(LD.getValue(1));
7405 setValue(&VPIntrin, LD);
7406}
7407
7408void SelectionDAGBuilder::visitVPStoreScatter(const VPIntrinsic &VPIntrin,
7409 SmallVector<SDValue, 7> &OpValues,
7410 bool isScatter) {
7411 SDLoc DL = getCurSDLoc();
7412 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7413 Value *PtrOperand = VPIntrin.getArgOperand(1);
7414 EVT VT = OpValues[0].getValueType();
7415 MaybeAlign Alignment = DAG.getEVTAlign(VT);
7416 AAMDNodes AAInfo;
7417 VPIntrin.getAAMetadata(AAInfo);
7418 SDValue ST;
7419 if (!isScatter) {
7420 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7421 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7422 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
7423 ST =
7424 DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], OpValues[1],
7425 OpValues[2], OpValues[3], MMO, false /* IsTruncating */);
7426 } else {
7427 unsigned AS =
7428 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7429 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7430 MachinePointerInfo(AS), MachineMemOperand::MOStore,
7431 MemoryLocation::UnknownSize, *Alignment, AAInfo);
7432 SDValue Base, Index, Scale;
7433 ISD::MemIndexType IndexType;
7434 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7435 this, VPIntrin.getParent());
7436 if (!UniformBase) {
7437 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7438 Index = getValue(PtrOperand);
7439 IndexType = ISD::SIGNED_UNSCALED;
7440 Scale =
7441 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7442 }
7443 EVT IdxVT = Index.getValueType();
7444 EVT EltTy = IdxVT.getVectorElementType();
7445 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7446 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7447 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7448 }
7449 ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7450 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7451 OpValues[2], OpValues[3]},
7452 MMO, IndexType);
7453 }
7454 DAG.setRoot(ST);
7455 setValue(&VPIntrin, ST);
7456}
7457
7458void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
7459 const VPIntrinsic &VPIntrin) {
7460 SDLoc DL = getCurSDLoc();
7461 unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
7462
7463 SmallVector<EVT, 4> ValueVTs;
7464 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7465 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
7466 SDVTList VTs = DAG.getVTList(ValueVTs);
7467
7468 auto EVLParamPos =
7469 VPIntrinsic::getVectorLengthParamPos(VPIntrin.getIntrinsicID());
7470
7471 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
7472 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&(static_cast<void> (0))
7473 "Unexpected target EVL type")(static_cast<void> (0));
7474
7475 // Request operands.
7476 SmallVector<SDValue, 7> OpValues;
7477 for (unsigned I = 0; I < VPIntrin.getNumArgOperands(); ++I) {
7478 auto Op = getValue(VPIntrin.getArgOperand(I));
7479 if (I == EVLParamPos)
7480 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
7481 OpValues.push_back(Op);
7482 }
7483
7484 switch (Opcode) {
7485 default: {
7486 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues);
7487 setValue(&VPIntrin, Result);
7488 break;
7489 }
7490 case ISD::VP_LOAD:
7491 case ISD::VP_GATHER:
7492 visitVPLoadGather(VPIntrin, ValueVTs[0], OpValues,
7493 Opcode == ISD::VP_GATHER);
7494 break;
7495 case ISD::VP_STORE:
7496 case ISD::VP_SCATTER:
7497 visitVPStoreScatter(VPIntrin, OpValues, Opcode == ISD::VP_SCATTER);
7498 break;
7499 }
7500}
7501
7502SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
7503 const BasicBlock *EHPadBB,
7504 MCSymbol *&BeginLabel) {
7505 MachineFunction &MF = DAG.getMachineFunction();
7506 MachineModuleInfo &MMI = MF.getMMI();
7507
7508 // Insert a label before the invoke call to mark the try range. This can be
7509 // used to detect deletion of the invoke via the MachineModuleInfo.
7510 BeginLabel = MMI.getContext().createTempSymbol();
7511
7512 // For SjLj, keep track of which landing pads go with which invokes
7513 // so as to maintain the ordering of pads in the LSDA.
7514 unsigned CallSiteIndex = MMI.getCurrentCallSite();
7515 if (CallSiteIndex) {
7516 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7517 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7518
7519 // Now that the call site is handled, stop tracking it.
7520 MMI.setCurrentCallSite(0);
7521 }
7522
7523 return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
7524}
7525
7526SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
7527 const BasicBlock *EHPadBB,
7528 MCSymbol *BeginLabel) {
7529 assert(BeginLabel && "BeginLabel should've been set")(static_cast<void> (0));
7530
7531 MachineFunction &MF = DAG.getMachineFunction();
7532 MachineModuleInfo &MMI = MF.getMMI();
7533
7534 // Insert a label at the end of the invoke call to mark the try range. This
7535 // can be used to detect deletion of the invoke via the MachineModuleInfo.
7536 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7537 Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
7538
7539 // Inform MachineModuleInfo of range.
7540 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7541 // There is a platform (e.g. wasm) that uses funclet style IR but does not
7542 // actually use outlined funclets and their LSDA info style.
7543 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7544 assert(II && "II should've been set")(static_cast<void> (0));
7545 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
7546 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
7547 } else if (!isScopedEHPersonality(Pers)) {
7548 assert(EHPadBB)(static_cast<void> (0));
7549 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7550 }
7551
7552 return Chain;
7553}
7554
7555std::pair<SDValue, SDValue>
7556SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7557 const BasicBlock *EHPadBB) {
7558 MCSymbol *BeginLabel = nullptr;
7559
7560 if (EHPadBB) {
7561 // Both PendingLoads and PendingExports must be flushed here;
7562 // this call might not return.
7563 (void)getRoot();
7564 DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
7565 CLI.setChain(getRoot());
7566 }
7567
7568 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7569 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7570
7571 assert((CLI.IsTailCall || Result.second.getNode()) &&(static_cast<void> (0))
7572 "Non-null chain expected with non-tail call!")(static_cast<void> (0));
7573 assert((Result.second.getNode() || !Result.first.getNode()) &&(static_cast<void> (0))
7574 "Null value expected with tail call!")(static_cast<void> (0));
7575
7576 if (!Result.second.getNode()) {
7577 // As a special case, a null chain means that a tail call has been emitted
7578 // and the DAG root is already updated.
7579 HasTailCall = true;
7580
7581 // Since there's no actual continuation from this block, nothing can be
7582 // relying on us setting vregs for them.
7583 PendingExports.clear();
7584 } else {
7585 DAG.setRoot(Result.second);
7586 }
7587
7588 if (EHPadBB) {
7589 DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
7590 BeginLabel));
7591 }
7592
7593 return Result;
7594}
7595
7596void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
7597 bool isTailCall,
7598 bool isMustTailCall,
7599 const BasicBlock *EHPadBB) {
7600 auto &DL = DAG.getDataLayout();
7601 FunctionType *FTy = CB.getFunctionType();
7602 Type *RetTy = CB.getType();
7603
7604 TargetLowering::ArgListTy Args;
7605 Args.reserve(CB.arg_size());
7606
7607 const Value *SwiftErrorVal = nullptr;
7608 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7609
7610 if (isTailCall) {
7611 // Avoid emitting tail calls in functions with the disable-tail-calls
7612 // attribute.
7613 auto *Caller = CB.getParent()->getParent();
7614 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7615 "true" && !isMustTailCall)
7616 isTailCall = false;
7617
7618 // We can't tail call inside a function with a swifterror argument. Lowering
7619 // does not support this yet. It would have to move into the swifterror
7620 // register before the call.
7621 if (TLI.supportSwiftError() &&
7622 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7623 isTailCall = false;
7624 }
7625
7626 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
7627 TargetLowering::ArgListEntry Entry;
7628 const Value *V = *I;
7629
7630 // Skip empty types
7631 if (V->getType()->isEmptyTy())
7632 continue;
7633
7634 SDValue ArgNode = getValue(V);
7635 Entry.Node = ArgNode; Entry.Ty = V->getType();
7636
7637 Entry.setAttributes(&CB, I - CB.arg_begin());
7638
7639 // Use swifterror virtual register as input to the call.
7640 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7641 SwiftErrorVal = V;
7642 // We find the virtual register for the actual swifterror argument.
7643 // Instead of using the Value, we use the virtual register instead.
7644 Entry.Node =
7645 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
7646 EVT(TLI.getPointerTy(DL)));
7647 }
7648
7649 Args.push_back(Entry);
7650
7651 // If we have an explicit sret argument that is an Instruction, (i.e., it
7652 // might point to function-local memory), we can't meaningfully tail-call.
7653 if (Entry.IsSRet && isa<Instruction>(V))
7654 isTailCall = false;
7655 }
7656
7657 // If call site has a cfguardtarget operand bundle, create and add an
7658 // additional ArgListEntry.
7659 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7660 TargetLowering::ArgListEntry Entry;
7661 Value *V = Bundle->Inputs[0];
7662 SDValue ArgNode = getValue(V);
7663 Entry.Node = ArgNode;
7664 Entry.Ty = V->getType();
7665 Entry.IsCFGuardTarget = true;
7666 Args.push_back(Entry);
7667 }
7668
7669 // Check if target-independent constraints permit a tail call here.
7670 // Target-dependent constraints are checked within TLI->LowerCallTo.
7671 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
7672 isTailCall = false;
7673
7674 // Disable tail calls if there is an swifterror argument. Targets have not
7675 // been updated to support tail calls.
7676 if (TLI.supportSwiftError() && SwiftErrorVal)
7677 isTailCall = false;
7678
7679 TargetLowering::CallLoweringInfo CLI(DAG);
7680 CLI.setDebugLoc(getCurSDLoc())
7681 .setChain(getRoot())
7682 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
7683 .setTailCall(isTailCall)
7684 .setConvergent(CB.isConvergent())
7685 .setIsPreallocated(
7686 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
7687 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7688
7689 if (Result.first.getNode()) {
7690 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
7691 setValue(&CB, Result.first);
7692 }
7693
7694 // The last element of CLI.InVals has the SDValue for swifterror return.
7695 // Here we copy it to a virtual register and update SwiftErrorMap for
7696 // book-keeping.
7697 if (SwiftErrorVal && TLI.supportSwiftError()) {
7698 // Get the last element of InVals.
7699 SDValue Src = CLI.InVals.back();
7700 Register VReg =
7701 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
7702 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7703 DAG.setRoot(CopyNode);
7704 }
7705}
7706
7707static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7708 SelectionDAGBuilder &Builder) {
7709 // Check to see if this load can be trivially constant folded, e.g. if the
7710 // input is from a string literal.
7711 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7712 // Cast pointer to the type we really want to load.
7713 Type *LoadTy =
7714 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7715 if (LoadVT.isVector())
7716 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
7717
7718 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7719 PointerType::getUnqual(LoadTy));
7720
7721 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7722 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7723 return Builder.getValue(LoadCst);
7724 }
7725
7726 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
7727 // still constant memory, the input chain can be the entry node.
7728 SDValue Root;
7729 bool ConstantMemory = false;
7730
7731 // Do not serialize (non-volatile) loads of constant memory with anything.
7732 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7733 Root = Builder.DAG.getEntryNode();
7734 ConstantMemory = true;
7735 } else {
7736 // Do not serialize non-volatile loads against each other.
7737 Root = Builder.DAG.getRoot();
7738 }
7739
7740 SDValue Ptr = Builder.getValue(PtrVal);
7741 SDValue LoadVal =
7742 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
7743 MachinePointerInfo(PtrVal), Align(1));
7744
7745 if (!ConstantMemory)
7746 Builder.PendingLoads.push_back(LoadVal.getValue(1));
7747 return LoadVal;
7748}
7749
7750/// Record the value for an instruction that produces an integer result,
7751/// converting the type where necessary.
7752void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7753 SDValue Value,
7754 bool IsSigned) {
7755 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7756 I.getType(), true);
7757 if (IsSigned)
7758 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7759 else
7760 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7761 setValue(&I, Value);
7762}
7763
7764/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
7765/// true and lower it. Otherwise return false, and it will be lowered like a
7766/// normal call.
7767/// The caller already checked that \p I calls the appropriate LibFunc with a
7768/// correct prototype.
7769bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
7770 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7771 const Value *Size = I.getArgOperand(2);
7772 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7773 if (CSize && CSize->getZExtValue() == 0) {
7774 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7775 I.getType(), true);
7776 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7777 return true;
7778 }
7779
7780 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7781 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7782 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7783 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7784 if (Res.first.getNode()) {
7785 processIntegerCallValue(I, Res.first, true);
7786 PendingLoads.push_back(Res.second);
7787 return true;
7788 }
7789
7790 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
7791 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
7792 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7793 return false;
7794
7795 // If the target has a fast compare for the given size, it will return a
7796 // preferred load type for that size. Require that the load VT is legal and
7797 // that the target supports unaligned loads of that type. Otherwise, return
7798 // INVALID.
7799 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7801 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7802 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7803 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7804 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7805 // TODO: Check alignment of src and dest ptrs.
7806 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7807 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7808 if (!TLI.isTypeLegal(LVT) ||
7809 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7810 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7811 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7812 }
7813
7814 return LVT;
7815 };
7816
7817 // This turns into unaligned loads. We only do this if the target natively
7818 // supports the MVT we'll be loading or if it is small enough (<= 4) that
7819 // we'll only produce a small number of byte loads.
7820 MVT LoadVT;
7821 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7822 switch (NumBitsToCompare) {
7823 default:
7824 return false;
7825 case 16:
7826 LoadVT = MVT::i16;
7827 break;
7828 case 32:
7829 LoadVT = MVT::i32;
7830 break;
7831 case 64:
7832 case 128:
7833 case 256:
7834 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7835 break;
7836 }
7837
7838 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7839 return false;
7840
7841 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7842 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7843
7844 // Bitcast to a wide integer type if the loads are vectors.
7845 if (LoadVT.isVector()) {
7846 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7847 LoadL = DAG.getBitcast(CmpVT, LoadL);
7848 LoadR = DAG.getBitcast(CmpVT, LoadR);
7849 }
7850
7851 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7852 processIntegerCallValue(I, Cmp, false);
7853 return true;
7854}
7855
7856/// See if we can lower a memchr call into an optimized form. If so, return
7857/// true and lower it. Otherwise return false, and it will be lowered like a
7858/// normal call.
7859/// The caller already checked that \p I calls the appropriate LibFunc with a
7860/// correct prototype.
7861bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7862 const Value *Src = I.getArgOperand(0);
7863 const Value *Char = I.getArgOperand(1);
7864 const Value *Length = I.getArgOperand(2);
7865
7866 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7867 std::pair<SDValue, SDValue> Res =
7868 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7869 getValue(Src), getValue(Char), getValue(Length),
7870 MachinePointerInfo(Src));
7871 if (Res.first.getNode()) {
7872 setValue(&I, Res.first);
7873 PendingLoads.push_back(Res.second);
7874 return true;
7875 }
7876
7877 return false;
7878}
7879
7880/// See if we can lower a mempcpy call into an optimized form. If so, return
7881/// true and lower it. Otherwise return false, and it will be lowered like a
7882/// normal call.
7883/// The caller already checked that \p I calls the appropriate LibFunc with a
7884/// correct prototype.
7885bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7886 SDValue Dst = getValue(I.getArgOperand(0));
7887 SDValue Src = getValue(I.getArgOperand(1));
7888 SDValue Size = getValue(I.getArgOperand(2));
7889
7890 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
7891 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
7892 // DAG::getMemcpy needs Alignment to be defined.
7893 Align Alignment = std::min(DstAlign, SrcAlign);
7894
7895 bool isVol = false;
7896 SDLoc sdl = getCurSDLoc();
7897
7898 // In the mempcpy context we need to pass in a false value for isTailCall
7899 // because the return pointer needs to be adjusted by the size of
7900 // the copied memory.
7901 SDValue Root = isVol ? getRoot() : getMemoryRoot();
7902 AAMDNodes AAInfo;
7903 I.getAAMetadata(AAInfo);
7904 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
7905 /*isTailCall=*/false,
7906 MachinePointerInfo(I.getArgOperand(0)),
7907 MachinePointerInfo(I.getArgOperand(1)), AAInfo);
7908 assert(MC.getNode() != nullptr &&(static_cast<void> (0))
7909 "** memcpy should not be lowered as TailCall in mempcpy context **")(static_cast<void> (0));
7910 DAG.setRoot(MC);
7911
7912 // Check if Size needs to be truncated or extended.
7913 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7914
7915 // Adjust return pointer to point just past the last dst byte.
7916 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7917 Dst, Size);
7918 setValue(&I, DstPlusSize);
7919 return true;
7920}
7921
7922/// See if we can lower a strcpy call into an optimized form. If so, return
7923/// true and lower it, otherwise return false and it will be lowered like a
7924/// normal call.
7925/// The caller already checked that \p I calls the appropriate LibFunc with a
7926/// correct prototype.
7927bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7928 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7929
7930 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7931 std::pair<SDValue, SDValue> Res =
7932 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7933 getValue(Arg0), getValue(Arg1),
7934 MachinePointerInfo(Arg0),
7935 MachinePointerInfo(Arg1), isStpcpy);
7936 if (Res.first.getNode()) {
7937 setValue(&I, Res.first);
7938 DAG.setRoot(Res.second);
7939 return true;
7940 }
7941
7942 return false;
7943}
7944
7945/// See if we can lower a strcmp call into an optimized form. If so, return
7946/// true and lower it, otherwise return false and it will be lowered like a
7947/// normal call.
7948/// The caller already checked that \p I calls the appropriate LibFunc with a
7949/// correct prototype.
7950bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7951 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7952
7953 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7954 std::pair<SDValue, SDValue> Res =
7955 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7956 getValue(Arg0), getValue(Arg1),
7957 MachinePointerInfo(Arg0),
7958 MachinePointerInfo(Arg1));
7959 if (Res.first.getNode()) {
7960 processIntegerCallValue(I, Res.first, true);
7961 PendingLoads.push_back(Res.second);
7962 return true;
7963 }
7964
7965 return false;
7966}
7967
7968/// See if we can lower a strlen call into an optimized form. If so, return
7969/// true and lower it, otherwise return false and it will be lowered like a
7970/// normal call.
7971/// The caller already checked that \p I calls the appropriate LibFunc with a
7972/// correct prototype.
7973bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7974 const Value *Arg0 = I.getArgOperand(0);
7975
7976 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7977 std::pair<SDValue, SDValue> Res =
7978 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7979 getValue(Arg0), MachinePointerInfo(Arg0));
7980 if (Res.first.getNode()) {
7981 processIntegerCallValue(I, Res.first, false);
7982 PendingLoads.push_back(Res.second);
7983 return true;
7984 }
7985
7986 return false;
7987}
7988
7989/// See if we can lower a strnlen call into an optimized form. If so, return
7990/// true and lower it, otherwise return false and it will be lowered like a
7991/// normal call.
7992/// The caller already checked that \p I calls the appropriate LibFunc with a
7993/// correct prototype.
7994bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7995 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7996
7997 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7998 std::pair<SDValue, SDValue> Res =
7999 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8000 getValue(Arg0), getValue(Arg1),
8001 MachinePointerInfo(Arg0));
8002 if (Res.first.getNode()) {
8003 processIntegerCallValue(I, Res.first, false);
8004 PendingLoads.push_back(Res.second);
8005 return true;
8006 }
8007
8008 return false;
8009}
8010
8011/// See if we can lower a unary floating-point operation into an SDNode with
8012/// the specified Opcode. If so, return true and lower it, otherwise return
8013/// false and it will be lowered like a normal call.
8014/// The caller already checked that \p I calls the appropriate LibFunc with a
8015/// correct prototype.
8016bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8017 unsigned Opcode) {
8018 // We already checked this call's prototype; verify it doesn't modify errno.
8019 if (!I.onlyReadsMemory())
8020 return false;
8021
8022 SDNodeFlags Flags;
8023 Flags.copyFMF(cast<FPMathOperator>(I));
8024
8025 SDValue Tmp = getValue(I.getArgOperand(0));
8026 setValue(&I,
8027 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8028 return true;
8029}
8030
8031/// See if we can lower a binary floating-point operation into an SDNode with
8032/// the specified Opcode. If so, return true and lower it. Otherwise return
8033/// false, and it will be lowered like a normal call.
8034/// The caller already checked that \p I calls the appropriate LibFunc with a
8035/// correct prototype.
8036bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8037 unsigned Opcode) {
8038 // We already checked this call's prototype; verify it doesn't modify errno.
8039 if (!I.onlyReadsMemory())
8040 return false;
8041
8042 SDNodeFlags Flags;
8043 Flags.copyFMF(cast<FPMathOperator>(I));
8044
8045 SDValue Tmp0 = getValue(I.getArgOperand(0));
8046 SDValue Tmp1 = getValue(I.getArgOperand(1));
8047 EVT VT = Tmp0.getValueType();
8048 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8049 return true;
8050}
8051
8052void SelectionDAGBuilder::visitCall(const CallInst &I) {
8053 // Handle inline assembly differently.
8054 if (I.isInlineAsm()) {
8055 visitInlineAsm(I);
8056 return;
8057 }
8058
8059 if (Function *F = I.getCalledFunction()) {
8060 if (F->hasFnAttribute("dontcall")) {
8061 unsigned LocCookie = 0;
8062 if (MDNode *MD = I.getMetadata("srcloc"))
8063 LocCookie =
8064 mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue();
8065 DiagnosticInfoDontCall D(F->getName(), LocCookie);
8066 DAG.getContext()->diagnose(D);
8067 }
8068
8069 if (F->isDeclaration()) {
8070 // Is this an LLVM intrinsic or a target-specific intrinsic?
8071 unsigned IID = F->getIntrinsicID();
8072 if (!IID)
8073 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8074 IID = II->getIntrinsicID(F);
8075
8076 if (IID) {
8077 visitIntrinsicCall(I, IID);
8078 return;
8079 }
8080 }
8081
8082 // Check for well-known libc/libm calls. If the function is internal, it
8083 // can't be a library call. Don't do the check if marked as nobuiltin for
8084 // some reason or the call site requires strict floating point semantics.
8085 LibFunc Func;
8086 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8087 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8088 LibInfo->hasOptimizedCodeGen(Func)) {
8089 switch (Func) {
8090 default: break;
8091 case LibFunc_bcmp:
8092 if (visitMemCmpBCmpCall(I))
8093 return;
8094 break;
8095 case LibFunc_copysign:
8096 case LibFunc_copysignf:
8097 case LibFunc_copysignl:
8098 // We already checked this call's prototype; verify it doesn't modify
8099 // errno.
8100 if (I.onlyReadsMemory()) {
8101 SDValue LHS = getValue(I.getArgOperand(0));
8102 SDValue RHS = getValue(I.getArgOperand(1));
8103 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8104 LHS.getValueType(), LHS, RHS));
8105 return;
8106 }
8107 break;
8108 case LibFunc_fabs:
8109 case LibFunc_fabsf:
8110 case LibFunc_fabsl:
8111 if (visitUnaryFloatCall(I, ISD::FABS))
8112 return;
8113 break;
8114 case LibFunc_fmin:
8115 case LibFunc_fminf:
8116 case LibFunc_fminl:
8117 if (visitBinaryFloatCall(I, ISD::FMINNUM))
8118 return;
8119 break;
8120 case LibFunc_fmax:
8121 case LibFunc_fmaxf:
8122 case LibFunc_fmaxl:
8123 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8124 return;
8125 break;
8126 case LibFunc_sin:
8127 case LibFunc_sinf:
8128 case LibFunc_sinl:
8129 if (visitUnaryFloatCall(I, ISD::FSIN))
8130 return;
8131 break;
8132 case LibFunc_cos:
8133 case LibFunc_cosf:
8134 case LibFunc_cosl:
8135 if (visitUnaryFloatCall(I, ISD::FCOS))
8136 return;
8137 break;
8138 case LibFunc_sqrt:
8139 case LibFunc_sqrtf:
8140 case LibFunc_sqrtl:
8141 case LibFunc_sqrt_finite:
8142 case LibFunc_sqrtf_finite:
8143 case LibFunc_sqrtl_finite:
8144 if (visitUnaryFloatCall(I, ISD::FSQRT))
8145 return;
8146 break;
8147 case LibFunc_floor:
8148 case LibFunc_floorf:
8149 case LibFunc_floorl:
8150 if (visitUnaryFloatCall(I, ISD::FFLOOR))
8151 return;
8152 break;
8153 case LibFunc_nearbyint:
8154 case LibFunc_nearbyintf:
8155 case LibFunc_nearbyintl:
8156 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8157 return;
8158 break;
8159 case LibFunc_ceil:
8160 case LibFunc_ceilf:
8161 case LibFunc_ceill:
8162 if (visitUnaryFloatCall(I, ISD::FCEIL))
8163 return;
8164 break;
8165 case LibFunc_rint:
8166 case LibFunc_rintf:
8167 case LibFunc_rintl:
8168 if (visitUnaryFloatCall(I, ISD::FRINT))
8169 return;
8170 break;
8171 case LibFunc_round:
8172 case LibFunc_roundf:
8173 case LibFunc_roundl:
8174 if (visitUnaryFloatCall(I, ISD::FROUND))
8175 return;
8176 break;
8177 case LibFunc_trunc:
8178 case LibFunc_truncf:
8179 case LibFunc_truncl:
8180 if (visitUnaryFloatCall(I, ISD::FTRUNC))
8181 return;
8182 break;
8183 case LibFunc_log2:
8184 case LibFunc_log2f:
8185 case LibFunc_log2l:
8186 if (visitUnaryFloatCall(I, ISD::FLOG2))
8187 return;
8188 break;
8189 case LibFunc_exp2:
8190 case LibFunc_exp2f:
8191 case LibFunc_exp2l:
8192 if (visitUnaryFloatCall(I, ISD::FEXP2))
8193 return;
8194 break;
8195 case LibFunc_memcmp:
8196 if (visitMemCmpBCmpCall(I))
8197 return;
8198 break;
8199 case LibFunc_mempcpy:
8200 if (visitMemPCpyCall(I))
8201 return;
8202 break;
8203 case LibFunc_memchr:
8204 if (visitMemChrCall(I))
8205 return;
8206 break;
8207 case LibFunc_strcpy:
8208 if (visitStrCpyCall(I, false))
8209 return;
8210 break;
8211 case LibFunc_stpcpy:
8212 if (visitStrCpyCall(I, true))
8213 return;
8214 break;
8215 case LibFunc_strcmp:
8216 if (visitStrCmpCall(I))
8217 return;
8218 break;
8219 case LibFunc_strlen:
8220 if (visitStrLenCall(I))
8221 return;
8222 break;
8223 case LibFunc_strnlen:
8224 if (visitStrNLenCall(I))
8225 return;
8226 break;
8227 }
8228 }
8229 }
8230
8231 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8232 // have to do anything here to lower funclet bundles.
8233 // CFGuardTarget bundles are lowered in LowerCallTo.
8234 assert(!I.hasOperandBundlesOtherThan((static_cast<void> (0))
8235 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,(static_cast<void> (0))
8236 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,(static_cast<void> (0))
8237 LLVMContext::OB_clang_arc_attachedcall}) &&(static_cast<void> (0))
8238 "Cannot lower calls with arbitrary operand bundles!")(static_cast<void> (0));
8239
8240 SDValue Callee = getValue(I.getCalledOperand());
8241
8242 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8243 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8244 else
8245 // Check if we can potentially perform a tail call. More detailed checking
8246 // is be done within LowerCallTo, after more information about the call is
8247 // known.
8248 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8249}
8250
8251namespace {
8252
8253/// AsmOperandInfo - This contains information for each constraint that we are
8254/// lowering.
8255class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8256public:
8257 /// CallOperand - If this is the result output operand or a clobber
8258 /// this is null, otherwise it is the incoming operand to the CallInst.
8259 /// This gets modified as the asm is processed.
8260 SDValue CallOperand;
8261
8262 /// AssignedRegs - If this is a register or register class operand, this
8263 /// contains the set of register corresponding to the operand.
8264 RegsForValue AssignedRegs;
8265
8266 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8267 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8268 }
8269
8270 /// Whether or not this operand accesses memory
8271 bool hasMemory(const TargetLowering &TLI) const {
8272 // Indirect operand accesses access memory.
8273 if (isIndirect)
8274 return true;
8275
8276 for (const auto &Code : Codes)
8277 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8278 return true;
8279
8280 return false;
8281 }
8282
8283 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
8284 /// corresponds to. If there is no Value* for this operand, it returns
8285 /// MVT::Other.
8286 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
8287 const DataLayout &DL) const {
8288 if (!CallOperandVal) return MVT::Other;
8289
8290 if (isa<BasicBlock>(CallOperandVal))
8291 return TLI.getProgramPointerTy(DL);
8292
8293 llvm::Type *OpTy = CallOperandVal->getType();
8294
8295 // FIXME: code duplicated from TargetLowering::ParseConstraints().
8296 // If this is an indirect operand, the operand is a pointer to the
8297 // accessed type.
8298 if (isIndirect) {
8299 PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
8300 if (!PtrTy)
8301 report_fatal_error("Indirect operand for inline asm not a pointer!");
8302 OpTy = PtrTy->getElementType();
8303 }
8304
8305 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
8306 if (StructType *STy = dyn_cast<StructType>(OpTy))
8307 if (STy->getNumElements() == 1)
8308 OpTy = STy->getElementType(0);
8309
8310 // If OpTy is not a single value, it may be a struct/union that we
8311 // can tile with integers.
8312 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
8313 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
8314 switch (BitSize) {
8315 default: break;
8316 case 1:
8317 case 8:
8318 case 16:
8319 case 32:
8320 case 64:
8321 case 128:
8322 OpTy = IntegerType::get(Context, BitSize);
8323 break;
8324 }
8325 }
8326
8327 return TLI.getAsmOperandValueType(DL, OpTy, true);
8328 }
8329};
8330
8331
8332} // end anonymous namespace
8333
8334/// Make sure that the output operand \p OpInfo and its corresponding input
8335/// operand \p MatchingOpInfo have compatible constraint types (otherwise error
8336/// out).
8337static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
8338 SDISelAsmOperandInfo &MatchingOpInfo,
8339 SelectionDAG &DAG) {
8340 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
8341 return;
8342
8343 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
8344 const auto &TLI = DAG.getTargetLoweringInfo();
8345
8346 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
8347 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
8348 OpInfo.ConstraintVT);
8349 std::pair<unsigned, const TargetRegisterClass *> InputRC =
8350 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
8351 MatchingOpInfo.ConstraintVT);
8352 if ((OpInfo.ConstraintVT.isInteger() !=
8353 MatchingOpInfo.ConstraintVT.isInteger()) ||
8354 (MatchRC.second != InputRC.second)) {
8355 // FIXME: error out in a more elegant fashion
8356 report_fatal_error("Unsupported asm: input constraint"
8357 " with a matching output constraint of"
8358 " incompatible type!");
8359 }
8360 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
8361}
8362
8363/// Get a direct memory input to behave well as an indirect operand.
8364/// This may introduce stores, hence the need for a \p Chain.
8365/// \return The (possibly updated) chain.
8366static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
8367 SDISelAsmOperandInfo &OpInfo,
8368 SelectionDAG &DAG) {
8369 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8370
8371 // If we don't have an indirect input, put it in the constpool if we can,
8372 // otherwise spill it to a stack slot.
8373 // TODO: This isn't quite right. We need to handle these according to
8374 // the addressing mode that the constraint wants. Also, this may take
8375 // an additional register for the computation and we don't want that
8376 // either.
8377
8378 // If the operand is a float, integer, or vector constant, spill to a
8379 // constant pool entry to get its address.
8380 const Value *OpVal = OpInfo.CallOperandVal;
8381 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
8382 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
8383 OpInfo.CallOperand = DAG.getConstantPool(
8384 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
8385 return Chain;
8386 }
8387
8388 // Otherwise, create a stack slot and emit a store to it before the asm.
8389 Type *Ty = OpVal->getType();
8390 auto &DL = DAG.getDataLayout();
8391 uint64_t TySize = DL.getTypeAllocSize(Ty);
8392 MachineFunction &MF = DAG.getMachineFunction();
8393 int SSFI = MF.getFrameInfo().CreateStackObject(
8394 TySize, DL.getPrefTypeAlign(Ty), false);
8395 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
8396 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
8397 MachinePointerInfo::getFixedStack(MF, SSFI),
8398 TLI.getMemValueType(DL, Ty));
8399 OpInfo.CallOperand = StackSlot;
8400
8401 return Chain;
8402}
8403
8404/// GetRegistersForValue - Assign registers (virtual or physical) for the
8405/// specified operand. We prefer to assign virtual registers, to allow the
8406/// register allocator to handle the assignment process. However, if the asm
8407/// uses features that we can't model on machineinstrs, we have SDISel do the
8408/// allocation. This produces generally horrible, but correct, code.
8409///
8410/// OpInfo describes the operand
8411/// RefOpInfo describes the matching operand if any, the operand otherwise
8412static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
8413 SDISelAsmOperandInfo &OpInfo,
8414 SDISelAsmOperandInfo &RefOpInfo) {
8415 LLVMContext &Context = *DAG.getContext();
8416 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8417
8418 MachineFunction &MF = DAG.getMachineFunction();
8419 SmallVector<unsigned, 4> Regs;
8420 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8421
8422 // No work to do for memory operations.
8423 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
8424 return;
8425
8426 // If this is a constraint for a single physreg, or a constraint for a
8427 // register class, find it.
8428 unsigned AssignedReg;
8429 const TargetRegisterClass *RC;
8430 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
8431 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
8432 // RC is unset only on failure. Return immediately.
8433 if (!RC)
8434 return;
8435
8436 // Get the actual register value type. This is important, because the user
8437 // may have asked for (e.g.) the AX register in i32 type. We need to
8438 // remember that AX is actually i16 to get the right extension.
8439 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
8440
8441 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
8442 // If this is an FP operand in an integer register (or visa versa), or more
8443 // generally if the operand value disagrees with the register class we plan
8444 // to stick it in, fix the operand type.
8445 //
8446 // If this is an input value, the bitcast to the new type is done now.
8447 // Bitcast for output value is done at the end of visitInlineAsm().
8448 if ((OpInfo.Type == InlineAsm::isOutput ||
8449 OpInfo.Type == InlineAsm::isInput) &&
8450 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
8451 // Try to convert to the first EVT that the reg class contains. If the
8452 // types are identical size, use a bitcast to convert (e.g. two differing
8453 // vector types). Note: output bitcast is done at the end of
8454 // visitInlineAsm().
8455 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
8456 // Exclude indirect inputs while they are unsupported because the code
8457 // to perform the load is missing and thus OpInfo.CallOperand still
8458 // refers to the input address rather than the pointed-to value.
8459 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
8460 OpInfo.CallOperand =
8461 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
8462 OpInfo.ConstraintVT = RegVT;
8463 // If the operand is an FP value and we want it in integer registers,
8464 // use the corresponding integer type. This turns an f64 value into
8465 // i64, which can be passed with two i32 values on a 32-bit machine.
8466 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
8467 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
8468 if (OpInfo.Type == InlineAsm::isInput)
8469 OpInfo.CallOperand =
8470 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
8471 OpInfo.ConstraintVT = VT;
8472 }
8473 }
8474 }
8475
8476 // No need to allocate a matching input constraint since the constraint it's
8477 // matching to has already been allocated.
8478 if (OpInfo.isMatchingInputConstraint())
8479 return;
8480
8481 EVT ValueVT = OpInfo.ConstraintVT;
8482 if (OpInfo.ConstraintVT == MVT::Other)
8483 ValueVT = RegVT;
8484
8485 // Initialize NumRegs.
8486 unsigned NumRegs = 1;
8487 if (OpInfo.ConstraintVT != MVT::Other)
8488 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
8489
8490 // If this is a constraint for a specific physical register, like {r17},
8491 // assign it now.
8492
8493 // If this associated to a specific register, initialize iterator to correct
8494 // place. If virtual, make sure we have enough registers
8495
8496 // Initialize iterator if necessary
8497 TargetRegisterClass::iterator I = RC->begin();
8498 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8499
8500 // Do not check for single registers.
8501 if (AssignedReg) {
8502 for (; *I != AssignedReg; ++I)
8503 assert(I != RC->end() && "AssignedReg should be member of RC")(static_cast<void> (0));
8504 }
8505
8506 for (; NumRegs; --NumRegs, ++I) {
8507 assert(I != RC->end() && "Ran out of registers to allocate!")(static_cast<void> (0));
8508 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
8509 Regs.push_back(R);
8510 }
8511
8512 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
8513}
8514
8515static unsigned
8516findMatchingInlineAsmOperand(unsigned OperandNo,
8517 const std::vector<SDValue> &AsmNodeOperands) {
8518 // Scan until we find the definition we already emitted of this operand.
8519 unsigned CurOp = InlineAsm::Op_FirstOperand;
8520 for (; OperandNo; --OperandNo) {
8521 // Advance to the next operand.
8522 unsigned OpFlag =
8523 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8524 assert((InlineAsm::isRegDefKind(OpFlag) ||(static_cast<void> (0))
8525 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||(static_cast<void> (0))
8526 InlineAsm::isMemKind(OpFlag)) &&(static_cast<void> (0))
8527 "Skipped past definitions?")(static_cast<void> (0));
8528 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
8529 }
8530 return CurOp;
8531}
8532
8533namespace {
8534
8535class ExtraFlags {
8536 unsigned Flags = 0;
8537
8538public:
8539 explicit ExtraFlags(const CallBase &Call) {
8540 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8541 if (IA->hasSideEffects())
8542 Flags |= InlineAsm::Extra_HasSideEffects;
8543 if (IA->isAlignStack())
8544 Flags |= InlineAsm::Extra_IsAlignStack;
8545 if (Call.isConvergent())
8546 Flags |= InlineAsm::Extra_IsConvergent;
8547 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8548 }
8549
8550 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8551 // Ideally, we would only check against memory constraints. However, the
8552 // meaning of an Other constraint can be target-specific and we can't easily
8553 // reason about it. Therefore, be conservative and set MayLoad/MayStore
8554 // for Other constraints as well.
8555 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8556 OpInfo.ConstraintType == TargetLowering::C_Other) {
8557 if (OpInfo.Type == InlineAsm::isInput)
8558 Flags |= InlineAsm::Extra_MayLoad;
8559 else if (OpInfo.Type == InlineAsm::isOutput)
8560 Flags |= InlineAsm::Extra_MayStore;
8561 else if (OpInfo.Type == InlineAsm::isClobber)
8562 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8563 }
8564 }
8565
8566 unsigned get() const { return Flags; }
8567};
8568
8569} // end anonymous namespace
8570
8571/// visitInlineAsm - Handle a call to an InlineAsm object.
8572void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
8573 const BasicBlock *EHPadBB) {
8574 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8575
8576 /// ConstraintOperands - Information about all of the constraints.
8577 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
8578
8579 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8580 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8581 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
8582
8583 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8584 // AsmDialect, MayLoad, MayStore).
8585 bool HasSideEffect = IA->hasSideEffects();
8586 ExtraFlags ExtraInfo(Call);
8587
8588 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
8589 unsigned ResNo = 0; // ResNo - The result number of the next output.
8590 unsigned NumMatchingOps = 0;
8591 for (auto &T : TargetConstraints) {
8592 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8593 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8594
8595 // Compute the value type for each operand.
8596 if (OpInfo.Type == InlineAsm::isInput ||
8597 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8598 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
8599
8600 // Process the call argument. BasicBlocks are labels, currently appearing
8601 // only in asm's.
8602 if (isa<CallBrInst>(Call) &&
8603 ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() -
8604 cast<CallBrInst>(&Call)->getNumIndirectDests() -
8605 NumMatchingOps) &&
8606 (NumMatchingOps == 0 ||
8607 ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() -
8608 NumMatchingOps))) {
8609 const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8610 EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8611 OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8612 } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8613 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8614 } else {
8615 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8616 }
8617
8618 EVT VT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
8619 DAG.getDataLayout());
8620 OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other;
8621 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8622 // The return value of the call is this value. As such, there is no
8623 // corresponding argument.
8624 assert(!Call.getType()->isVoidTy() && "Bad inline asm!")(static_cast<void> (0));
8625 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
8626 OpInfo.ConstraintVT = TLI.getSimpleValueType(
8627 DAG.getDataLayout(), STy->getElementType(ResNo));
8628 } else {
8629 assert(ResNo == 0 && "Asm only has one result!")(static_cast<void> (0));
8630 OpInfo.ConstraintVT = TLI.getAsmOperandValueType(
8631 DAG.getDataLayout(), Call.getType()).getSimpleVT();
8632 }
8633 ++ResNo;
8634 } else {
8635 OpInfo.ConstraintVT = MVT::Other;
8636 }
8637
8638 if (OpInfo.hasMatchingInput())
8639 ++NumMatchingOps;
8640
8641 if (!HasSideEffect)
8642 HasSideEffect = OpInfo.hasMemory(TLI);
8643
8644 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8645 // FIXME: Could we compute this on OpInfo rather than T?
8646
8647 // Compute the constraint code and ConstraintType to use.
8648 TLI.ComputeConstraintToUse(T, SDValue());
8649
8650 if (T.ConstraintType == TargetLowering::C_Immediate &&
8651 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8652 // We've delayed emitting a diagnostic like the "n" constraint because
8653 // inlining could cause an integer showing up.
8654 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
8655 "' expects an integer constant "
8656 "expression");
8657
8658 ExtraInfo.update(T);
8659 }
8660
8661 // We won't need to flush pending loads if this asm doesn't touch
8662 // memory and is nonvolatile.
8663 SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8664
8665 bool EmitEHLabels = isa<InvokeInst>(Call) && IA->canThrow();
8666 if (EmitEHLabels) {
8667 assert(EHPadBB && "InvokeInst must have an EHPadBB")(static_cast<void> (0));
8668 }
8669 bool IsCallBr = isa<CallBrInst>(Call);
8670
8671 if (IsCallBr || EmitEHLabels) {
8672 // If this is a callbr or invoke we need to flush pending exports since
8673 // inlineasm_br and invoke are terminators.
8674 // We need to do this before nodes are glued to the inlineasm_br node.
8675 Chain = getControlRoot();
8676 }
8677
8678 MCSymbol *BeginLabel = nullptr;
8679 if (EmitEHLabels) {
8680 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
8681 }
8682
8683 // Second pass over the constraints: compute which constraint option to use.
8684 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8685 // If this is an output operand with a matching input operand, look up the
8686 // matching input. If their types mismatch, e.g. one is an integer, the
8687 // other is floating point, or their sizes are different, flag it as an
8688 // error.
8689 if (OpInfo.hasMatchingInput()) {
8690 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8691 patchMatchingInput(OpInfo, Input, DAG);
8692 }
8693
8694 // Compute the constraint code and ConstraintType to use.
8695 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8696
8697 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8698 OpInfo.Type == InlineAsm::isClobber)
8699 continue;
8700
8701 // If this is a memory input, and if the operand is not indirect, do what we
8702 // need to provide an address for the memory input.
8703 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8704 !OpInfo.isIndirect) {
8705 assert((OpInfo.isMultipleAlternative ||(static_cast<void> (0))
8706 (OpInfo.Type == InlineAsm::isInput)) &&(static_cast<void> (0))
8707 "Can only indirectify direct input operands!")(static_cast<void> (0));
8708
8709 // Memory operands really want the address of the value.
8710 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8711
8712 // There is no longer a Value* corresponding to this operand.
8713 OpInfo.CallOperandVal = nullptr;
8714
8715 // It is now an indirect operand.
8716 OpInfo.isIndirect = true;
8717 }
8718
8719 }
8720
8721 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8722 std::vector<SDValue> AsmNodeOperands;
8723 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
8724 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8725 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
8726
8727 // If we have a !srcloc metadata node associated with it, we want to attach
8728 // this to the ultimately generated inline asm machineinstr. To do this, we
8729 // pass in the third operand as this (potentially null) inline asm MDNode.
8730 const MDNode *SrcLoc = Call.getMetadata("srcloc");
8731 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8732
8733 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8734 // bits as operand 3.
8735 AsmNodeOperands.push_back(DAG.getTargetConstant(
8736 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8737
8738 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8739 // this, assign virtual and physical registers for inputs and otput.
8740 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8741 // Assign Registers.
8742 SDISelAsmOperandInfo &RefOpInfo =
8743 OpInfo.isMatchingInputConstraint()
8744 ? ConstraintOperands[OpInfo.getMatchedOperand()]
8745 : OpInfo;
8746 GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8747
8748 auto DetectWriteToReservedRegister = [&]() {
8749 const MachineFunction &MF = DAG.getMachineFunction();
8750 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8751 for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
8752 if (Register::isPhysicalRegister(Reg) &&
8753 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
8754 const char *RegName = TRI.getName(Reg);
8755 emitInlineAsmError(Call, "write to reserved register '" +
8756 Twine(RegName) + "'");
8757 return true;
8758 }
8759 }
8760 return false;
8761 };
8762
8763 switch (OpInfo.Type) {
8764 case InlineAsm::isOutput:
8765 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8766 unsigned ConstraintID =
8767 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8768 assert(ConstraintID != InlineAsm::Constraint_Unknown &&(static_cast<void> (0))
8769 "Failed to convert memory constraint code to constraint id.")(static_cast<void> (0));
8770
8771 // Add information to the INLINEASM node to know about this output.
8772 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8773 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8774 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8775 MVT::i32));
8776 AsmNodeOperands.push_back(OpInfo.CallOperand);
8777 } else {
8778 // Otherwise, this outputs to a register (directly for C_Register /
8779 // C_RegisterClass, and a target-defined fashion for
8780 // C_Immediate/C_Other). Find a register that we can use.
8781 if (OpInfo.AssignedRegs.Regs.empty()) {
8782 emitInlineAsmError(
8783 Call, "couldn't allocate output register for constraint '" +
8784 Twine(OpInfo.ConstraintCode) + "'");
8785 return;
8786 }
8787
8788 if (DetectWriteToReservedRegister())
8789 return;
8790
8791 // Add information to the INLINEASM node to know that this register is
8792 // set.
8793 OpInfo.AssignedRegs.AddInlineAsmOperands(
8794 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8795 : InlineAsm::Kind_RegDef,
8796 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8797 }
8798 break;
8799
8800 case InlineAsm::isInput: {
8801 SDValue InOperandVal = OpInfo.CallOperand;
8802
8803 if (OpInfo.isMatchingInputConstraint()) {
8804 // If this is required to match an output register we have already set,
8805 // just use its register.
8806 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8807 AsmNodeOperands);
8808 unsigned OpFlag =
8809 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8810 if (InlineAsm::isRegDefKind(OpFlag) ||
8811 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8812 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8813 if (OpInfo.isIndirect) {
8814 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8815 emitInlineAsmError(Call, "inline asm not supported yet: "
8816 "don't know how to handle tied "
8817 "indirect register inputs");
8818 return;
8819 }
8820
8821 SmallVector<unsigned, 4> Regs;
8822 MachineFunction &MF = DAG.getMachineFunction();
8823 MachineRegisterInfo &MRI = MF.getRegInfo();
8824 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8825 auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
8826 Register TiedReg = R->getReg();
8827 MVT RegVT = R->getSimpleValueType(0);
8828 const TargetRegisterClass *RC = TiedReg.isVirtual() ?
8829 MRI.getRegClass(TiedReg) : TRI.getMinimalPhysRegClass(TiedReg);
8830 unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8831 for (unsigned i = 0; i != NumRegs; ++i)
8832 Regs.push_back(MRI.createVirtualRegister(RC));
8833
8834 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8835
8836 SDLoc dl = getCurSDLoc();
8837 // Use the produced MatchedRegs object to
8838 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call);
8839 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8840 true, OpInfo.getMatchedOperand(), dl,
8841 DAG, AsmNodeOperands);
8842 break;
8843 }
8844
8845 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!")(static_cast<void> (0));
8846 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&(static_cast<void> (0))
8847 "Unexpected number of operands")(static_cast<void> (0));
8848 // Add information to the INLINEASM node to know about this input.
8849 // See InlineAsm.h isUseOperandTiedToDef.
8850 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8851 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8852 OpInfo.getMatchedOperand());
8853 AsmNodeOperands.push_back(DAG.getTargetConstant(
8854 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8855 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8856 break;
8857 }
8858
8859 // Treat indirect 'X' constraint as memory.
8860 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8861 OpInfo.isIndirect)
8862 OpInfo.ConstraintType = TargetLowering::C_Memory;
8863
8864 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8865 OpInfo.ConstraintType == TargetLowering::C_Other) {
8866 std::vector<SDValue> Ops;
8867 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8868 Ops, DAG);
8869 if (Ops.empty()) {
8870 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8871 if (isa<ConstantSDNode>(InOperandVal)) {
8872 emitInlineAsmError(Call, "value out of range for constraint '" +
8873 Twine(OpInfo.ConstraintCode) + "'");
8874 return;
8875 }
8876
8877 emitInlineAsmError(Call,
8878 "invalid operand for inline asm constraint '" +
8879 Twine(OpInfo.ConstraintCode) + "'");
8880 return;
8881 }
8882
8883 // Add information to the INLINEASM node to know about this input.
8884 unsigned ResOpType =
8885 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8886 AsmNodeOperands.push_back(DAG.getTargetConstant(
8887 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8888 llvm::append_range(AsmNodeOperands, Ops);
8889 break;
8890 }
8891
8892 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8893 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!")(static_cast<void> (0));
8894 assert(InOperandVal.getValueType() ==(static_cast<void> (0))
8895 TLI.getPointerTy(DAG.getDataLayout()) &&(static_cast<void> (0))
8896 "Memory operands expect pointer values")(static_cast<void> (0));
8897
8898 unsigned ConstraintID =
8899 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8900 assert(ConstraintID != InlineAsm::Constraint_Unknown &&(static_cast<void> (0))
8901 "Failed to convert memory constraint code to constraint id.")(static_cast<void> (0));
8902
8903 // Add information to the INLINEASM node to know about this input.
8904 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8905 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8906 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8907 getCurSDLoc(),
8908 MVT::i32));
8909 AsmNodeOperands.push_back(InOperandVal);
8910 break;
8911 }
8912
8913 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||(static_cast<void> (0))
8914 OpInfo.ConstraintType == TargetLowering::C_Register) &&(static_cast<void> (0))
8915 "Unknown constraint type!")(static_cast<void> (0));
8916
8917 // TODO: Support this.
8918 if (OpInfo.isIndirect) {
8919 emitInlineAsmError(
8920 Call, "Don't know how to handle indirect register inputs yet "
8921 "for constraint '" +
8922 Twine(OpInfo.ConstraintCode) + "'");
8923 return;
8924 }
8925
8926 // Copy the input into the appropriate registers.
8927 if (OpInfo.AssignedRegs.Regs.empty()) {
8928 emitInlineAsmError(Call,
8929 "couldn't allocate input reg for constraint '" +
8930 Twine(OpInfo.ConstraintCode) + "'");
8931 return;
8932 }
8933
8934 if (DetectWriteToReservedRegister())
8935 return;
8936
8937 SDLoc dl = getCurSDLoc();
8938
8939 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8940 &Call);
8941
8942 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8943 dl, DAG, AsmNodeOperands);
8944 break;
8945 }
8946 case InlineAsm::isClobber:
8947 // Add the clobbered value to the operand list, so that the register
8948 // allocator is aware that the physreg got clobbered.
8949 if (!OpInfo.AssignedRegs.Regs.empty())
8950 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8951 false, 0, getCurSDLoc(), DAG,
8952 AsmNodeOperands);
8953 break;
8954 }
8955 }
8956
8957 // Finish up input operands. Set the input chain and add the flag last.
8958 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8959 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8960
8961 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8962 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8963 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8964 Flag = Chain.getValue(1);
8965
8966 // Do additional work to generate outputs.
8967
8968 SmallVector<EVT, 1> ResultVTs;
8969 SmallVector<SDValue, 1> ResultValues;
8970 SmallVector<SDValue, 8> OutChains;
8971
8972 llvm::Type *CallResultType = Call.getType();
8973 ArrayRef<Type *> ResultTypes;
8974 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
8975 ResultTypes = StructResult->elements();
8976 else if (!CallResultType->isVoidTy())
8977 ResultTypes = makeArrayRef(CallResultType);
8978
8979 auto CurResultType = ResultTypes.begin();
8980 auto handleRegAssign = [&](SDValue V) {
8981 assert(CurResultType != ResultTypes.end() && "Unexpected value")(static_cast<void> (0));
8982 assert((*CurResultType)->isSized() && "Unexpected unsized type")(static_cast<void> (0));
8983 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8984 ++CurResultType;
8985 // If the type of the inline asm call site return value is different but has
8986 // same size as the type of the asm output bitcast it. One example of this
8987 // is for vectors with different width / number of elements. This can
8988 // happen for register classes that can contain multiple different value
8989 // types. The preg or vreg allocated may not have the same VT as was
8990 // expected.
8991 //
8992 // This can also happen for a return value that disagrees with the register
8993 // class it is put in, eg. a double in a general-purpose register on a
8994 // 32-bit machine.
8995 if (ResultVT != V.getValueType() &&
8996 ResultVT.getSizeInBits() == V.getValueSizeInBits())
8997 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8998 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8999 V.getValueType().isInteger()) {
9000 // If a result value was tied to an input value, the computed result
9001 // may have a wider width than the expected result. Extract the
9002 // relevant portion.
9003 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9004 }
9005 assert(ResultVT == V.getValueType() && "Asm result value mismatch!")(static_cast<void> (0));
9006 ResultVTs.push_back(ResultVT);
9007 ResultValues.push_back(V);
9008 };
9009
9010 // Deal with output operands.
9011 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9012 if (OpInfo.Type == InlineAsm::isOutput) {
9013 SDValue Val;
9014 // Skip trivial output operands.
9015 if (OpInfo.AssignedRegs.Regs.empty())
9016 continue;
9017
9018 switch (OpInfo.ConstraintType) {
9019 case TargetLowering::C_Register:
9020 case TargetLowering::C_RegisterClass:
9021 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9022 Chain, &Flag, &Call);
9023 break;
9024 case TargetLowering::C_Immediate:
9025 case TargetLowering::C_Other:
9026 Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
9027 OpInfo, DAG);
9028 break;
9029 case TargetLowering::C_Memory:
9030 break; // Already handled.
9031 case TargetLowering::C_Unknown:
9032 assert(false && "Unexpected unknown constraint")(static_cast<void> (0));
9033 }
9034
9035 // Indirect output manifest as stores. Record output chains.
9036 if (OpInfo.isIndirect) {
9037 const Value *Ptr = OpInfo.CallOperandVal;
9038 assert(Ptr && "Expected value CallOperandVal for indirect asm operand")(static_cast<void> (0));
9039 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9040 MachinePointerInfo(Ptr));
9041 OutChains.push_back(Store);
9042 } else {
9043 // generate CopyFromRegs to associated registers.
9044 assert(!Call.getType()->isVoidTy() && "Bad inline asm!")(static_cast<void> (0));
9045 if (Val.getOpcode() == ISD::MERGE_VALUES) {
9046 for (const SDValue &V : Val->op_values())
9047 handleRegAssign(V);
9048 } else
9049 handleRegAssign(Val);
9050 }
9051 }
9052 }
9053
9054 // Set results.
9055 if (!ResultValues.empty()) {
9056 assert(CurResultType == ResultTypes.end() &&(static_cast<void> (0))
9057 "Mismatch in number of ResultTypes")(static_cast<void> (0));
9058 assert(ResultValues.size() == ResultTypes.size() &&(static_cast<void> (0))
9059 "Mismatch in number of output operands in asm result")(static_cast<void> (0));
9060
9061 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9062 DAG.getVTList(ResultVTs), ResultValues);
9063 setValue(&Call, V);
9064 }
9065
9066 // Collect store chains.
9067 if (!OutChains.empty())
9068 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9069
9070 if (EmitEHLabels) {
9071 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9072 }
9073
9074 // Only Update Root if inline assembly has a memory effect.
9075 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9076 EmitEHLabels)
9077 DAG.setRoot(Chain);
9078}
9079
9080void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9081 const Twine &Message) {
9082 LLVMContext &Ctx = *DAG.getContext();
9083 Ctx.emitError(&Call, Message);
9084
9085 // Make sure we leave the DAG in a valid state
9086 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9087 SmallVector<EVT, 1> ValueVTs;
9088 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9089
9090 if (ValueVTs.empty())
9091 return;
9092
9093 SmallVector<SDValue, 1> Ops;
9094 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9095 Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9096
9097 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9098}
9099
9100void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9101 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9102 MVT::Other, getRoot(),
9103 getValue(I.getArgOperand(0)),
9104 DAG.getSrcValue(I.getArgOperand(0))));
9105}
9106
9107void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9108 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9109 const DataLayout &DL = DAG.getDataLayout();
9110 SDValue V = DAG.getVAArg(
9111 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9112 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9113 DL.getABITypeAlign(I.getType()).value());
9114 DAG.setRoot(V.getValue(1));
9115
9116 if (I.getType()->isPointerTy())
9117 V = DAG.getPtrExtOrTrunc(
9118 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9119 setValue(&I, V);
9120}
9121
9122void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9123 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9124 MVT::Other, getRoot(),
9125 getValue(I.getArgOperand(0)),
9126 DAG.getSrcValue(I.getArgOperand(0))));
9127}
9128
9129void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9130 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9131 MVT::Other, getRoot(),
9132 getValue(I.getArgOperand(0)),
9133 getValue(I.getArgOperand(1)),
9134 DAG.getSrcValue(I.getArgOperand(0)),
9135 DAG.getSrcValue(I.getArgOperand(1))));
9136}
9137
9138SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9139 const Instruction &I,
9140 SDValue Op) {
9141 const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
9142 if (!Range)
9143 return Op;
9144
9145 ConstantRange CR = getConstantRangeFromMetadata(*Range);
9146 if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9147 return Op;
9148
9149 APInt Lo = CR.getUnsignedMin();
9150 if (!Lo.isMinValue())
9151 return Op;
9152
9153 APInt Hi = CR.getUnsignedMax();
9154 unsigned Bits = std::max(Hi.getActiveBits(),
9155 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9156
9157 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9158
9159 SDLoc SL = getCurSDLoc();
9160
9161 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9162 DAG.getValueType(SmallVT));
9163 unsigned NumVals = Op.getNode()->getNumValues();
9164 if (NumVals == 1)
9165 return ZExt;
9166
9167 SmallVector<SDValue, 4> Ops;
9168
9169 Ops.push_back(ZExt);
9170 for (unsigned I = 1; I != NumVals; ++I)
9171 Ops.push_back(Op.getValue(I));
9172
9173 return DAG.getMergeValues(Ops, SL);
9174}
9175
9176/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9177/// the call being lowered.
9178///
9179/// This is a helper for lowering intrinsics that follow a target calling
9180/// convention or require stack pointer adjustment. Only a subset of the
9181/// intrinsic's operands need to participate in the calling convention.
9182void SelectionDAGBuilder::populateCallLoweringInfo(
9183 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9184 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9185 bool IsPatchPoint) {
9186 TargetLowering::ArgListTy Args;
9187 Args.reserve(NumArgs);
9188
9189 // Populate the argument list.
9190 // Attributes for args start at offset 1, after the return attribute.
9191 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9192 ArgI != ArgE; ++ArgI) {
9193 const Value *V = Call->getOperand(ArgI);
9194
9195 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.")(static_cast<void> (0));
9196
9197 TargetLowering::ArgListEntry Entry;
9198 Entry.Node = getValue(V);
9199 Entry.Ty = V->getType();
9200 Entry.setAttributes(Call, ArgI);
9201 Args.push_back(Entry);
9202 }
9203
9204 CLI.setDebugLoc(getCurSDLoc())
9205 .setChain(getRoot())
9206 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
9207 .setDiscardResult(Call->use_empty())
9208 .setIsPatchPoint(IsPatchPoint)
9209 .setIsPreallocated(
9210 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9211}
9212
9213/// Add a stack map intrinsic call's live variable operands to a stackmap
9214/// or patchpoint target node's operand list.
9215///
9216/// Constants are converted to TargetConstants purely as an optimization to
9217/// avoid constant materialization and register allocation.
9218///
9219/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9220/// generate addess computation nodes, and so FinalizeISel can convert the
9221/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9222/// address materialization and register allocation, but may also be required
9223/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9224/// alloca in the entry block, then the runtime may assume that the alloca's
9225/// StackMap location can be read immediately after compilation and that the
9226/// location is valid at any point during execution (this is similar to the
9227/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9228/// only available in a register, then the runtime would need to trap when
9229/// execution reaches the StackMap in order to read the alloca's location.
9230static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9231 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9232 SelectionDAGBuilder &Builder) {
9233 for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) {
9234 SDValue OpVal = Builder.getValue(Call.getArgOperand(i));
9235 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
9236 Ops.push_back(
9237 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
9238 Ops.push_back(
9239 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
9240 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
9241 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
9242 Ops.push_back(Builder.DAG.getTargetFrameIndex(
9243 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
9244 } else
9245 Ops.push_back(OpVal);
9246 }
9247}
9248
9249/// Lower llvm.experimental.stackmap directly to its target opcode.
9250void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9251 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
9252 // [live variables...])
9253
9254 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.")(static_cast<void> (0));
9255
9256 SDValue Chain, InFlag, Callee, NullPtr;
9257 SmallVector<SDValue, 32> Ops;
9258
9259 SDLoc DL = getCurSDLoc();
9260 Callee = getValue(CI.getCalledOperand());
9261 NullPtr = DAG.getIntPtrConstant(0, DL, true);
9262
9263 // The stackmap intrinsic only records the live variables (the arguments
9264 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9265 // intrinsic, this won't be lowered to a function call. This means we don't
9266 // have to worry about calling conventions and target specific lowering code.
9267 // Instead we perform the call lowering right here.
9268 //
9269 // chain, flag = CALLSEQ_START(chain, 0, 0)
9270 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9271 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9272 //
9273 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9274 InFlag = Chain.getValue(1);
9275
9276 // Add the <id> and <numBytes> constants.
9277 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
9278 Ops.push_back(DAG.getTargetConstant(
9279 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
9280 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
9281 Ops.push_back(DAG.getTargetConstant(
9282 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
9283 MVT::i32));
9284
9285 // Push live variables for the stack map.
9286 addStackMapLiveVars(CI, 2, DL, Ops, *this);
9287
9288 // We are not pushing any register mask info here on the operands list,
9289 // because the stackmap doesn't clobber anything.
9290
9291 // Push the chain and the glue flag.
9292 Ops.push_back(Chain);
9293 Ops.push_back(InFlag);
9294
9295 // Create the STACKMAP node.
9296 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9297 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
9298 Chain = SDValue(SM, 0);
9299 InFlag = Chain.getValue(1);
9300
9301 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
9302
9303 // Stackmaps don't generate values, so nothing goes into the NodeMap.
9304
9305 // Set the root to the target-lowered call chain.
9306 DAG.setRoot(Chain);
9307
9308 // Inform the Frame Information that we have a stackmap in this function.
9309 FuncInfo.MF->getFrameInfo().setHasStackMap();
9310}
9311
9312/// Lower llvm.experimental.patchpoint directly to its target opcode.
9313void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
9314 const BasicBlock *EHPadBB) {
9315 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
9316 // i32 <numBytes>,
9317 // i8* <target>,
9318 // i32 <numArgs>,
9319 // [Args...],
9320 // [live variables...])
9321
9322 CallingConv::ID CC = CB.getCallingConv();
9323 bool IsAnyRegCC = CC == CallingConv::AnyReg;
9324 bool HasDef = !CB.getType()->isVoidTy();
9325 SDLoc dl = getCurSDLoc();
9326 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
9327
9328 // Handle immediate and symbolic callees.
9329 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
9330 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
9331 /*isTarget=*/true);
9332 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
9333 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
9334 SDLoc(SymbolicCallee),
9335 SymbolicCallee->getValueType(0));
9336
9337 // Get the real number of arguments participating in the call <numArgs>
9338 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
9339 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
9340
9341 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
9342 // Intrinsics include all meta-operands up to but not including CC.
9343 unsigned NumMetaOpers = PatchPointOpers::CCPos;
9344 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&(static_cast<void> (0))
9345 "Not enough arguments provided to the patchpoint intrinsic")(static_cast<void> (0));
9346
9347 // For AnyRegCC the arguments are lowered later on manually.
9348 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
9349 Type *ReturnTy =
9350 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
9351
9352 TargetLowering::CallLoweringInfo CLI(DAG);
9353 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
9354 ReturnTy, true);
9355 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
9356
9357 SDNode *CallEnd = Result.second.getNode();
9358 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
9359 CallEnd = CallEnd->getOperand(0).getNode();
9360
9361 /// Get a call instruction from the call sequence chain.
9362 /// Tail calls are not allowed.
9363 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&(static_cast<void> (0))
9364 "Expected a callseq node.")(static_cast<void> (0));
9365 SDNode *Call = CallEnd->getOperand(0).getNode();
9366 bool HasGlue = Call->getGluedNode();
9367
9368 // Replace the target specific call node with the patchable intrinsic.
9369 SmallVector<SDValue, 8> Ops;
9370
9371 // Add the <id> and <numBytes> constants.
9372 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
9373 Ops.push_back(DAG.getTargetConstant(
9374 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
9375 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
9376 Ops.push_back(DAG.getTargetConstant(
9377 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
9378 MVT::i32));
9379
9380 // Add the callee.
9381 Ops.push_back(Callee);
9382
9383 // Adjust <numArgs> to account for any arguments that have been passed on the
9384 // stack instead.
9385 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
9386 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
9387 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
9388 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
9389
9390 // Add the calling convention
9391 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
9392
9393 // Add the arguments we omitted previously. The register allocator should
9394 // place these in any free register.
9395 if (IsAnyRegCC)
9396 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
9397 Ops.push_back(getValue(CB.getArgOperand(i)));
9398
9399 // Push the arguments from the call instruction up to the register mask.
9400 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
9401 Ops.append(Call->op_begin() + 2, e);
9402
9403 // Push live variables for the stack map.
9404 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
9405
9406 // Push the register mask info.
9407 if (HasGlue)
9408 Ops.push_back(*(Call->op_end()-2));
9409 else
9410 Ops.push_back(*(Call->op_end()-1));
9411
9412 // Push the chain (this is originally the first operand of the call, but
9413 // becomes now the last or second to last operand).
9414 Ops.push_back(*(Call->op_begin()));
9415
9416 // Push the glue flag (last operand).
9417 if (HasGlue)
9418 Ops.push_back(*(Call->op_end()-1));
9419
9420 SDVTList NodeTys;
9421 if (IsAnyRegCC && HasDef) {
9422 // Create the return types based on the intrinsic definition
9423 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9424 SmallVector<EVT, 3> ValueVTs;
9425 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
9426 assert(ValueVTs.size() == 1 && "Expected only one return value type.")(static_cast<void> (0));
9427
9428 // There is always a chain and a glue type at the end
9429 ValueVTs.push_back(MVT::Other);
9430 ValueVTs.push_back(MVT::Glue);
9431 NodeTys = DAG.getVTList(ValueVTs);
9432 } else
9433 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9434
9435 // Replace the target specific call node with a PATCHPOINT node.
9436 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
9437 dl, NodeTys, Ops);
9438
9439 // Update the NodeMap.
9440 if (HasDef) {
9441 if (IsAnyRegCC)
9442 setValue(&CB, SDValue(MN, 0));
9443 else
9444 setValue(&CB, Result.first);
9445 }
9446
9447 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
9448 // call sequence. Furthermore the location of the chain and glue can change
9449 // when the AnyReg calling convention is used and the intrinsic returns a
9450 // value.
9451 if (IsAnyRegCC && HasDef) {
9452 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
9453 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
9454 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
9455 } else
9456 DAG.ReplaceAllUsesWith(Call, MN);
9457 DAG.DeleteNode(Call);
9458
9459 // Inform the Frame Information that we have a patchpoint in this function.
9460 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
9461}
9462
9463void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
9464 unsigned Intrinsic) {
9465 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9466 SDValue Op1 = getValue(I.getArgOperand(0));
9467 SDValue Op2;
9468 if (I.getNumArgOperands() > 1)
9469 Op2 = getValue(I.getArgOperand(1));
9470 SDLoc dl = getCurSDLoc();
9471 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
9472 SDValue Res;
9473 SDNodeFlags SDFlags;
9474 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
9475 SDFlags.copyFMF(*FPMO);
9476
9477 switch (Intrinsic) {
9478 case Intrinsic::vector_reduce_fadd:
9479 if (SDFlags.hasAllowReassociation())
9480 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
9481 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
9482 SDFlags);
9483 else
9484 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
9485 break;
9486 case Intrinsic::vector_reduce_fmul:
9487 if (SDFlags.hasAllowReassociation())
9488 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
9489 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
9490 SDFlags);
9491 else
9492 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
9493 break;
9494 case Intrinsic::vector_reduce_add:
9495 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
9496 break;
9497 case Intrinsic::vector_reduce_mul:
9498 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
9499 break;
9500 case Intrinsic::vector_reduce_and:
9501 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
9502 break;
9503 case Intrinsic::vector_reduce_or:
9504 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
9505 break;
9506 case Intrinsic::vector_reduce_xor:
9507 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
9508 break;
9509 case Intrinsic::vector_reduce_smax:
9510 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
9511 break;
9512 case Intrinsic::vector_reduce_smin:
9513 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
9514 break;
9515 case Intrinsic::vector_reduce_umax:
9516 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
9517 break;
9518 case Intrinsic::vector_reduce_umin:
9519 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
9520 break;
9521 case Intrinsic::vector_reduce_fmax:
9522 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
9523 break;
9524 case Intrinsic::vector_reduce_fmin:
9525 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
9526 break;
9527 default:
9528 llvm_unreachable("Unhandled vector reduce intrinsic")__builtin_unreachable();
9529 }
9530 setValue(&I, Res);
9531}
9532
9533/// Returns an AttributeList representing the attributes applied to the return
9534/// value of the given call.
9535static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
9536 SmallVector<Attribute::AttrKind, 2> Attrs;
9537 if (CLI.RetSExt)
9538 Attrs.push_back(Attribute::SExt);
9539 if (CLI.RetZExt)
9540 Attrs.push_back(Attribute::ZExt);
9541 if (CLI.IsInReg)
9542 Attrs.push_back(Attribute::InReg);
9543
9544 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
9545 Attrs);
9546}
9547
9548/// TargetLowering::LowerCallTo - This is the default LowerCallTo
9549/// implementation, which just calls LowerCall.
9550/// FIXME: When all targets are
9551/// migrated to using LowerCall, this hook should be integrated into SDISel.
9552std::pair<SDValue, SDValue>
9553TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9554 // Handle the incoming return values from the call.
9555 CLI.Ins.clear();
9556 Type *OrigRetTy = CLI.RetTy;
9557 SmallVector<EVT, 4> RetTys;
9558 SmallVector<uint64_t, 4> Offsets;
9559 auto &DL = CLI.DAG.getDataLayout();
9560 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9561
9562 if (CLI.IsPostTypeLegalization) {
9563 // If we are lowering a libcall after legalization, split the return type.
9564 SmallVector<EVT, 4> OldRetTys;
9565 SmallVector<uint64_t, 4> OldOffsets;
9566 RetTys.swap(OldRetTys);
9567 Offsets.swap(OldOffsets);
9568
9569 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9570 EVT RetVT = OldRetTys[i];
9571 uint64_t Offset = OldOffsets[i];
9572 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9573 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9574 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9575 RetTys.append(NumRegs, RegisterVT);
9576 for (unsigned j = 0; j != NumRegs; ++j)
9577 Offsets.push_back(Offset + j * RegisterVTByteSZ);
9578 }
9579 }
9580
9581 SmallVector<ISD::OutputArg, 4> Outs;
9582 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9583
9584 bool CanLowerReturn =
9585 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9586 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9587
9588 SDValue DemoteStackSlot;
9589 int DemoteStackIdx = -100;
9590 if (!CanLowerReturn) {
9591 // FIXME: equivalent assert?
9592 // assert(!CS.hasInAllocaArgument() &&
9593 // "sret demotion is incompatible with inalloca");
9594 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9595 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
9596 MachineFunction &MF = CLI.DAG.getMachineFunction();
9597 DemoteStackIdx =
9598 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
9599 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9600 DL.getAllocaAddrSpace());
9601
9602 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9603 ArgListEntry Entry;
9604 Entry.Node = DemoteStackSlot;
9605 Entry.Ty = StackSlotPtrType;
9606 Entry.IsSExt = false;
9607 Entry.IsZExt = false;
9608 Entry.IsInReg = false;
9609 Entry.IsSRet = true;
9610 Entry.IsNest = false;
9611 Entry.IsByVal = false;
9612 Entry.IsByRef = false;
9613 Entry.IsReturned = false;
9614 Entry.IsSwiftSelf = false;
9615 Entry.IsSwiftAsync = false;
9616 Entry.IsSwiftError = false;
9617 Entry.IsCFGuardTarget = false;
9618 Entry.Alignment = Alignment;
9619 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9620 CLI.NumFixedArgs += 1;
9621 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9622
9623 // sret demotion isn't compatible with tail-calls, since the sret argument
9624 // points into the callers stack frame.
9625 CLI.IsTailCall = false;
9626 } else {
9627 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9628 CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
9629 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9630 ISD::ArgFlagsTy Flags;
9631 if (NeedsRegBlock) {
9632 Flags.setInConsecutiveRegs();
9633 if (I == RetTys.size() - 1)
9634 Flags.setInConsecutiveRegsLast();
9635 }
9636 EVT VT = RetTys[I];
9637 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9638 CLI.CallConv, VT);
9639 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9640 CLI.CallConv, VT);
9641 for (unsigned i = 0; i != NumRegs; ++i) {
9642 ISD::InputArg MyFlags;
9643 MyFlags.Flags = Flags;
9644 MyFlags.VT = RegisterVT;
9645 MyFlags.ArgVT = VT;
9646 MyFlags.Used = CLI.IsReturnValueUsed;
9647 if (CLI.RetTy->isPointerTy()) {
9648 MyFlags.Flags.setPointer();
9649 MyFlags.Flags.setPointerAddrSpace(
9650 cast<PointerType>(CLI.RetTy)->getAddressSpace());
9651 }
9652 if (CLI.RetSExt)
9653 MyFlags.Flags.setSExt();
9654 if (CLI.RetZExt)
9655 MyFlags.Flags.setZExt();
9656 if (CLI.IsInReg)
9657 MyFlags.Flags.setInReg();
9658 CLI.Ins.push_back(MyFlags);
9659 }
9660 }
9661 }
9662
9663 // We push in swifterror return as the last element of CLI.Ins.
9664 ArgListTy &Args = CLI.getArgs();
9665 if (supportSwiftError()) {
9666 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9667 if (Args[i].IsSwiftError) {
9668 ISD::InputArg MyFlags;
9669 MyFlags.VT = getPointerTy(DL);
9670 MyFlags.ArgVT = EVT(getPointerTy(DL));
9671 MyFlags.Flags.setSwiftError();
9672 CLI.Ins.push_back(MyFlags);
9673 }
9674 }
9675 }
9676
9677 // Handle all of the outgoing arguments.
9678 CLI.Outs.clear();
9679 CLI.OutVals.clear();
9680 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9681 SmallVector<EVT, 4> ValueVTs;
9682 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9683 // FIXME: Split arguments if CLI.IsPostTypeLegalization
9684 Type *FinalType = Args[i].Ty;
9685 if (Args[i].IsByVal)
9686 FinalType = Args[i].IndirectType;
9687 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9688 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
9689 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9690 ++Value) {
9691 EVT VT = ValueVTs[Value];
9692 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9693 SDValue Op = SDValue(Args[i].Node.getNode(),
9694 Args[i].Node.getResNo() + Value);
9695 ISD::ArgFlagsTy Flags;
9696
9697 // Certain targets (such as MIPS), may have a different ABI alignment
9698 // for a type depending on the context. Give the target a chance to
9699 // specify the alignment it wants.
9700 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9701 Flags.setOrigAlign(OriginalAlignment);
9702
9703 if (Args[i].Ty->isPointerTy()) {
9704 Flags.setPointer();
9705 Flags.setPointerAddrSpace(
9706 cast<PointerType>(Args[i].Ty)->getAddressSpace());
9707 }
9708 if (Args[i].IsZExt)
9709 Flags.setZExt();
9710 if (Args[i].IsSExt)
9711 Flags.setSExt();
9712 if (Args[i].IsInReg) {
9713 // If we are using vectorcall calling convention, a structure that is
9714 // passed InReg - is surely an HVA
9715 if (CLI.CallConv == CallingConv::X86_VectorCall &&
9716 isa<StructType>(FinalType)) {
9717 // The first value of a structure is marked
9718 if (0 == Value)
9719 Flags.setHvaStart();
9720 Flags.setHva();
9721 }
9722 // Set InReg Flag
9723 Flags.setInReg();
9724 }
9725 if (Args[i].IsSRet)
9726 Flags.setSRet();
9727 if (Args[i].IsSwiftSelf)
9728 Flags.setSwiftSelf();
9729 if (Args[i].IsSwiftAsync)
9730 Flags.setSwiftAsync();
9731 if (Args[i].IsSwiftError)
9732 Flags.setSwiftError();
9733 if (Args[i].IsCFGuardTarget)
9734 Flags.setCFGuardTarget();
9735 if (Args[i].IsByVal)
9736 Flags.setByVal();
9737 if (Args[i].IsByRef)
9738 Flags.setByRef();
9739 if (Args[i].IsPreallocated) {
9740 Flags.setPreallocated();
9741 // Set the byval flag for CCAssignFn callbacks that don't know about
9742 // preallocated. This way we can know how many bytes we should've
9743 // allocated and how many bytes a callee cleanup function will pop. If
9744 // we port preallocated to more targets, we'll have to add custom
9745 // preallocated handling in the various CC lowering callbacks.
9746 Flags.setByVal();
9747 }
9748 if (Args[i].IsInAlloca) {
9749 Flags.setInAlloca();
9750 // Set the byval flag for CCAssignFn callbacks that don't know about
9751 // inalloca. This way we can know how many bytes we should've allocated
9752 // and how many bytes a callee cleanup function will pop. If we port
9753 // inalloca to more targets, we'll have to add custom inalloca handling
9754 // in the various CC lowering callbacks.
9755 Flags.setByVal();
9756 }
9757 Align MemAlign;
9758 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
9759 unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
9760 Flags.setByValSize(FrameSize);
9761
9762 // info is not there but there are cases it cannot get right.
9763 if (auto MA = Args[i].Alignment)
9764 MemAlign = *MA;
9765 else
9766 MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
9767 } else if (auto MA = Args[i].Alignment) {
9768 MemAlign = *MA;
9769 } else {
9770 MemAlign = OriginalAlignment;
9771 }
9772 Flags.setMemAlign(MemAlign);
9773 if (Args[i].IsNest)
9774 Flags.setNest();
9775 if (NeedsRegBlock)
9776 Flags.setInConsecutiveRegs();
9777
9778 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9779 CLI.CallConv, VT);
9780 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9781 CLI.CallConv, VT);
9782 SmallVector<SDValue, 4> Parts(NumParts);
9783 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9784
9785 if (Args[i].IsSExt)
9786 ExtendKind = ISD::SIGN_EXTEND;
9787 else if (Args[i].IsZExt)
9788 ExtendKind = ISD::ZERO_EXTEND;
9789
9790 // Conservatively only handle 'returned' on non-vectors that can be lowered,
9791 // for now.
9792 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9793 CanLowerReturn) {
9794 assert((CLI.RetTy == Args[i].Ty ||(static_cast<void> (0))
9795 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&(static_cast<void> (0))
9796 CLI.RetTy->getPointerAddressSpace() ==(static_cast<void> (0))
9797 Args[i].Ty->getPointerAddressSpace())) &&(static_cast<void> (0))
9798 RetTys.size() == NumValues && "unexpected use of 'returned'")(static_cast<void> (0));
9799 // Before passing 'returned' to the target lowering code, ensure that
9800 // either the register MVT and the actual EVT are the same size or that
9801 // the return value and argument are extended in the same way; in these
9802 // cases it's safe to pass the argument register value unchanged as the
9803 // return register value (although it's at the target's option whether
9804 // to do so)
9805 // TODO: allow code generation to take advantage of partially preserved
9806 // registers rather than clobbering the entire register when the
9807 // parameter extension method is not compatible with the return
9808 // extension method
9809 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9810 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9811 CLI.RetZExt == Args[i].IsZExt))
9812 Flags.setReturned();
9813 }
9814
9815 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
9816 CLI.CallConv, ExtendKind);
9817
9818 for (unsigned j = 0; j != NumParts; ++j) {
9819 // if it isn't first piece, alignment must be 1
9820 // For scalable vectors the scalable part is currently handled
9821 // by individual targets, so we just use the known minimum size here.
9822 ISD::OutputArg MyFlags(
9823 Flags, Parts[j].getValueType().getSimpleVT(), VT,
9824 i < CLI.NumFixedArgs, i,
9825 j * Parts[j].getValueType().getStoreSize().getKnownMinSize());
9826 if (NumParts > 1 && j == 0)
9827 MyFlags.Flags.setSplit();
9828 else if (j != 0) {
9829 MyFlags.Flags.setOrigAlign(Align(1));
9830 if (j == NumParts - 1)
9831 MyFlags.Flags.setSplitEnd();
9832 }
9833
9834 CLI.Outs.push_back(MyFlags);
9835 CLI.OutVals.push_back(Parts[j]);
9836 }
9837
9838 if (NeedsRegBlock && Value == NumValues - 1)
9839 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9840 }
9841 }
9842
9843 SmallVector<SDValue, 4> InVals;
9844 CLI.Chain = LowerCall(CLI, InVals);
9845
9846 // Update CLI.InVals to use outside of this function.
9847 CLI.InVals = InVals;
9848
9849 // Verify that the target's LowerCall behaved as expected.
9850 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&(static_cast<void> (0))
9851 "LowerCall didn't return a valid chain!")(static_cast<void> (0));
9852 assert((!CLI.IsTailCall || InVals.empty()) &&(static_cast<void> (0))
9853 "LowerCall emitted a return value for a tail call!")(static_cast<void> (0));
9854 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&(static_cast<void> (0))
9855 "LowerCall didn't emit the correct number of values!")(static_cast<void> (0));
9856
9857 // For a tail call, the return value is merely live-out and there aren't
9858 // any nodes in the DAG representing it. Return a special value to
9859 // indicate that a tail call has been emitted and no more Instructions
9860 // should be processed in the current block.
9861 if (CLI.IsTailCall) {
9862 CLI.DAG.setRoot(CLI.Chain);
9863 return std::make_pair(SDValue(), SDValue());
9864 }
9865
9866#ifndef NDEBUG1
9867 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9868 assert(InVals[i].getNode() && "LowerCall emitted a null value!")(static_cast<void> (0));
9869 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&(static_cast<void> (0))
9870 "LowerCall emitted a value with the wrong type!")(static_cast<void> (0));
9871 }
9872#endif
9873
9874 SmallVector<SDValue, 4> ReturnValues;
9875 if (!CanLowerReturn) {
9876 // The instruction result is the result of loading from the
9877 // hidden sret parameter.
9878 SmallVector<EVT, 1> PVTs;
9879 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9880
9881 ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9882 assert(PVTs.size() == 1 && "Pointers should fit in one register")(static_cast<void> (0));
9883 EVT PtrVT = PVTs[0];
9884
9885 unsigned NumValues = RetTys.size();
9886 ReturnValues.resize(NumValues);
9887 SmallVector<SDValue, 4> Chains(NumValues);
9888
9889 // An aggregate return value cannot wrap around the address space, so
9890 // offsets to its parts don't wrap either.
9891 SDNodeFlags Flags;
9892 Flags.setNoUnsignedWrap(true);
9893
9894 MachineFunction &MF = CLI.DAG.getMachineFunction();
9895 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
9896 for (unsigned i = 0; i < NumValues; ++i) {
9897 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9898 CLI.DAG.getConstant(Offsets[i], CLI.DL,
9899 PtrVT), Flags);
9900 SDValue L = CLI.DAG.getLoad(
9901 RetTys[i], CLI.DL, CLI.Chain, Add,
9902 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9903 DemoteStackIdx, Offsets[i]),
9904 HiddenSRetAlign);
9905 ReturnValues[i] = L;
9906 Chains[i] = L.getValue(1);
9907 }
9908
9909 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9910 } else {
9911 // Collect the legal value parts into potentially illegal values
9912 // that correspond to the original function's return values.
9913 Optional<ISD::NodeType> AssertOp;
9914 if (CLI.RetSExt)
9915 AssertOp = ISD::AssertSext;
9916 else if (CLI.RetZExt)
9917 AssertOp = ISD::AssertZext;
9918 unsigned CurReg = 0;
9919 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9920 EVT VT = RetTys[I];
9921 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9922 CLI.CallConv, VT);
9923 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9924 CLI.CallConv, VT);
9925
9926 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9927 NumRegs, RegisterVT, VT, nullptr,
9928 CLI.CallConv, AssertOp));
9929 CurReg += NumRegs;
9930 }
9931
9932 // For a function returning void, there is no return value. We can't create
9933 // such a node, so we just return a null return value in that case. In
9934 // that case, nothing will actually look at the value.
9935 if (ReturnValues.empty())
9936 return std::make_pair(SDValue(), CLI.Chain);
9937 }
9938
9939 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9940 CLI.DAG.getVTList(RetTys), ReturnValues);
9941 return std::make_pair(Res, CLI.Chain);
9942}
9943
9944/// Places new result values for the node in Results (their number
9945/// and types must exactly match those of the original return values of
9946/// the node), or leaves Results empty, which indicates that the node is not
9947/// to be custom lowered after all.
9948void TargetLowering::LowerOperationWrapper(SDNode *N,
9949 SmallVectorImpl<SDValue> &Results,
9950 SelectionDAG &DAG) const {
9951 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
9952
9953 if (!Res.getNode())
9954 return;
9955
9956 // If the original node has one result, take the return value from
9957 // LowerOperation as is. It might not be result number 0.
9958 if (N->getNumValues() == 1) {
9959 Results.push_back(Res);
9960 return;
9961 }
9962
9963 // If the original node has multiple results, then the return node should
9964 // have the same number of results.
9965 assert((N->getNumValues() == Res->getNumValues()) &&(static_cast<void> (0))
9966 "Lowering returned the wrong number of results!")(static_cast<void> (0));
9967
9968 // Places new result values base on N result number.
9969 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
9970 Results.push_back(Res.getValue(I));
9971}
9972
9973SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9974 llvm_unreachable("LowerOperation not implemented for this target!")__builtin_unreachable();
9975}
9976
9977void
9978SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9979 SDValue Op = getNonRegisterValue(V);
9980 assert((Op.getOpcode() != ISD::CopyFromReg ||(static_cast<void> (0))
9981 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&(static_cast<void> (0))
9982 "Copy from a reg to the same reg!")(static_cast<void> (0));
9983 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg")(static_cast<void> (0));
9984
9985 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9986 // If this is an InlineAsm we have to match the registers required, not the
9987 // notional registers required by the type.
9988
9989 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9990 None); // This is not an ABI copy.
9991 SDValue Chain = DAG.getEntryNode();
9992
9993 ISD::NodeType ExtendType = ISD::ANY_EXTEND;
9994 auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
9995 if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
9996 ExtendType = PreferredExtendIt->second;
9997 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9998 PendingExports.push_back(Chain);
9999}
10000
10001#include "llvm/CodeGen/SelectionDAGISel.h"
10002
10003/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10004/// entry block, return true. This includes arguments used by switches, since
10005/// the switch may expand into multiple basic blocks.
10006static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10007 // With FastISel active, we may be splitting blocks, so force creation
10008 // of virtual registers for all non-dead arguments.
10009 if (FastISel)
10010 return A->use_empty();
10011
10012 const BasicBlock &Entry = A->getParent()->front();
10013 for (const User *U : A->users())
10014 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10015 return false; // Use not in entry block.
10016
10017 return true;
10018}
10019
10020using ArgCopyElisionMapTy =
10021 DenseMap<const Argument *,
10022 std::pair<const AllocaInst *, const StoreInst *>>;
10023
10024/// Scan the entry block of the function in FuncInfo for arguments that look
10025/// like copies into a local alloca. Record any copied arguments in
10026/// ArgCopyElisionCandidates.
10027static void
10028findArgumentCopyElisionCandidates(const DataLayout &DL,
10029 FunctionLoweringInfo *FuncInfo,
10030 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10031 // Record the state of every static alloca used in the entry block. Argument
10032 // allocas are all used in the entry block, so we need approximately as many
10033 // entries as we have arguments.
10034 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10035 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10036 unsigned NumArgs = FuncInfo->Fn->arg_size();
10037 StaticAllocas.reserve(NumArgs * 2);
10038
10039 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10040 if (!V)
10041 return nullptr;
10042 V = V->stripPointerCasts();
10043 const auto *AI = dyn_cast<AllocaInst>(V);
10044 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10045 return nullptr;
10046 auto Iter = StaticAllocas.insert({AI, Unknown});
10047 return &Iter.first->second;
10048 };
10049
10050 // Look for stores of arguments to static allocas. Look through bitcasts and
10051 // GEPs to handle type coercions, as long as the alloca is fully initialized
10052 // by the store. Any non-store use of an alloca escapes it and any subsequent
10053 // unanalyzed store might write it.
10054 // FIXME: Handle structs initialized with multiple stores.
10055 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10056 // Look for stores, and handle non-store uses conservatively.
10057 const auto *SI = dyn_cast<StoreInst>(&I);
10058 if (!SI) {
10059 // We will look through cast uses, so ignore them completely.
10060 if (I.isCast())
10061 continue;
10062 // Ignore debug info and pseudo op intrinsics, they don't escape or store
10063 // to allocas.
10064 if (I.isDebugOrPseudoInst())
10065 continue;
10066 // This is an unknown instruction. Assume it escapes or writes to all
10067 // static alloca operands.
10068 for (const Use &U : I.operands()) {
10069 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10070 *Info = StaticAllocaInfo::Clobbered;
10071 }
10072 continue;
10073 }
10074
10075 // If the stored value is a static alloca, mark it as escaped.
10076 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10077 *Info = StaticAllocaInfo::Clobbered;
10078
10079 // Check if the destination is a static alloca.
10080 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10081 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10082 if (!Info)
10083 continue;
10084 const AllocaInst *AI = cast<AllocaInst>(Dst);
10085
10086 // Skip allocas that have been initialized or clobbered.
10087 if (*Info != StaticAllocaInfo::Unknown)
10088 continue;
10089
10090 // Check if the stored value is an argument, and that this store fully
10091 // initializes the alloca.
10092 // If the argument type has padding bits we can't directly forward a pointer
10093 // as the upper bits may contain garbage.
10094 // Don't elide copies from the same argument twice.
10095 const Value *Val = SI->getValueOperand()->stripPointerCasts();
10096 const auto *Arg = dyn_cast<Argument>(Val);
10097 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10098 Arg->getType()->isEmptyTy() ||
10099 DL.getTypeStoreSize(Arg->getType()) !=
10100 DL.getTypeAllocSize(AI->getAllocatedType()) ||
10101 !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10102 ArgCopyElisionCandidates.count(Arg)) {
10103 *Info = StaticAllocaInfo::Clobbered;
10104 continue;
10105 }
10106
10107 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AIdo { } while (false)
10108 << '\n')do { } while (false);
10109
10110 // Mark this alloca and store for argument copy elision.
10111 *Info = StaticAllocaInfo::Elidable;
10112 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10113
10114 // Stop scanning if we've seen all arguments. This will happen early in -O0
10115 // builds, which is useful, because -O0 builds have large entry blocks and
10116 // many allocas.
10117 if (ArgCopyElisionCandidates.size() == NumArgs)
10118 break;
10119 }
10120}
10121
10122/// Try to elide argument copies from memory into a local alloca. Succeeds if
10123/// ArgVal is a load from a suitable fixed stack object.
10124static void tryToElideArgumentCopy(
10125 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10126 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10127 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10128 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10129 SDValue ArgVal, bool &ArgHasUses) {
10130 // Check if this is a load from a fixed stack object.
10131 auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
10132 if (!LNode)
10133 return;
10134 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10135 if (!FINode)
10136 return;
10137
10138 // Check that the fixed stack object is the right size and alignment.
10139 // Look at the alignment that the user wrote on the alloca instead of looking
10140 // at the stack object.
10141 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10142 assert(ArgCopyIter != ArgCopyElisionCandidates.end())(static_cast<void> (0));
10143 const AllocaInst *AI = ArgCopyIter->second.first;
10144 int FixedIndex = FINode->getIndex();
10145 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10146 int OldIndex = AllocaIndex;
10147 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10148 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10149 LLVM_DEBUG(do { } while (false)
10150 dbgs() << " argument copy elision failed due to bad fixed stack "do { } while (false)
10151 "object size\n")do { } while (false);
10152 return;
10153 }
10154 Align RequiredAlignment = AI->getAlign();
10155 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10156 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "do { } while (false)
10157 "greater than stack argument alignment ("do { } while (false)
10158 << DebugStr(RequiredAlignment) << " vs "do { } while (false)
10159 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n")do { } while (false);
10160 return;
10161 }
10162
10163 // Perform the elision. Delete the old stack object and replace its only use
10164 // in the variable info map. Mark the stack object as mutable.
10165 LLVM_DEBUG({do { } while (false)
10166 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'do { } while (false)
10167 << " Replacing frame index " << OldIndex << " with " << FixedIndexdo { } while (false)
10168 << '\n';do { } while (false)
10169 })do { } while (false);
10170 MFI.RemoveStackObject(OldIndex);
10171 MFI.setIsImmutableObjectIndex(FixedIndex, false);
10172 AllocaIndex = FixedIndex;
10173 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10174 Chains.push_back(ArgVal.getValue(1));
10175
10176 // Avoid emitting code for the store implementing the copy.
10177 const StoreInst *SI = ArgCopyIter->second.second;
10178 ElidedArgCopyInstrs.insert(SI);
10179
10180 // Check for uses of the argument again so that we can avoid exporting ArgVal
10181 // if it is't used by anything other than the store.
10182 for (const Value *U : Arg.users()) {
10183 if (U != SI) {
10184 ArgHasUses = true;
10185 break;
10186 }
10187 }
10188}
10189
10190void SelectionDAGISel::LowerArguments(const Function &F) {
10191 SelectionDAG &DAG = SDB->DAG;
10192 SDLoc dl = SDB->getCurSDLoc();
10193 const DataLayout &DL = DAG.getDataLayout();
10194 SmallVector<ISD::InputArg, 16> Ins;
10195
10196 // In Naked functions we aren't going to save any registers.
10197 if (F.hasFnAttribute(Attribute::Naked))
10198 return;
10199
10200 if (!FuncInfo->CanLowerReturn) {
10201 // Put in an sret pointer parameter before all the other parameters.
10202 SmallVector<EVT, 1> ValueVTs;
10203 ComputeValueVTs(*TLI, DAG.getDataLayout(),
10204 F.getReturnType()->getPointerTo(
10205 DAG.getDataLayout().getAllocaAddrSpace()),
10206 ValueVTs);
10207
10208 // NOTE: Assuming that a pointer will never break down to more than one VT
10209 // or one register.
10210 ISD::ArgFlagsTy Flags;
10211 Flags.setSRet();
10212 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10213 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10214 ISD::InputArg::NoArgIndex, 0);
10215 Ins.push_back(RetArg);
10216 }
10217
10218 // Look for stores of arguments to static allocas. Mark such arguments with a
10219 // flag to ask the target to give us the memory location of that argument if
10220 // available.
10221 ArgCopyElisionMapTy ArgCopyElisionCandidates;
10222 findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10223 ArgCopyElisionCandidates);
10224
10225 // Set up the incoming argument description vector.
10226 for (const Argument &Arg : F.args()) {
10227 unsigned ArgNo = Arg.getArgNo();
10228 SmallVector<EVT, 4> ValueVTs;
10229 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10230 bool isArgValueUsed = !Arg.use_empty();
10231 unsigned PartBase = 0;
10232 Type *FinalType = Arg.getType();
10233 if (Arg.hasAttribute(Attribute::ByVal))
10234 FinalType = Arg.getParamByValType();
10235 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10236 FinalType, F.getCallingConv(), F.isVarArg(), DL);
10237 for (unsigned Value = 0, NumValues = ValueVTs.size();
10238 Value != NumValues; ++Value) {
10239 EVT VT = ValueVTs[Value];
10240 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10241 ISD::ArgFlagsTy Flags;
10242
10243
10244 if (Arg.getType()->isPointerTy()) {
10245 Flags.setPointer();
10246 Flags.setPointerAddrSpace(
10247 cast<PointerType>(Arg.getType())->getAddressSpace());
10248 }
10249 if (Arg.hasAttribute(Attribute::ZExt))
10250 Flags.setZExt();
10251 if (Arg.hasAttribute(Attribute::SExt))
10252 Flags.setSExt();
10253 if (Arg.hasAttribute(Attribute::InReg)) {
10254 // If we are using vectorcall calling convention, a structure that is
10255 // passed InReg - is surely an HVA
10256 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10257 isa<StructType>(Arg.getType())) {
10258 // The first value of a structure is marked
10259 if (0 == Value)
10260 Flags.setHvaStart();
10261 Flags.setHva();
10262 }
10263 // Set InReg Flag
10264 Flags.setInReg();
10265 }
10266 if (Arg.hasAttribute(Attribute::StructRet))
10267 Flags.setSRet();
10268 if (Arg.hasAttribute(Attribute::SwiftSelf))
10269 Flags.setSwiftSelf();
10270 if (Arg.hasAttribute(Attribute::SwiftAsync))
10271 Flags.setSwiftAsync();
10272 if (Arg.hasAttribute(Attribute::SwiftError))
10273 Flags.setSwiftError();
10274 if (Arg.hasAttribute(Attribute::ByVal))
10275 Flags.setByVal();
10276 if (Arg.hasAttribute(Attribute::ByRef))
10277 Flags.setByRef();
10278 if (Arg.hasAttribute(Attribute::InAlloca)) {
10279 Flags.setInAlloca();
10280 // Set the byval flag for CCAssignFn callbacks that don't know about
10281 // inalloca. This way we can know how many bytes we should've allocated
10282 // and how many bytes a callee cleanup function will pop. If we port
10283 // inalloca to more targets, we'll have to add custom inalloca handling
10284 // in the various CC lowering callbacks.
10285 Flags.setByVal();
10286 }
10287 if (Arg.hasAttribute(Attribute::Preallocated)) {
10288 Flags.setPreallocated();
10289 // Set the byval flag for CCAssignFn callbacks that don't know about
10290 // preallocated. This way we can know how many bytes we should've
10291 // allocated and how many bytes a callee cleanup function will pop. If
10292 // we port preallocated to more targets, we'll have to add custom
10293 // preallocated handling in the various CC lowering callbacks.
10294 Flags.setByVal();
10295 }
10296
10297 // Certain targets (such as MIPS), may have a different ABI alignment
10298 // for a type depending on the context. Give the target a chance to
10299 // specify the alignment it wants.
10300 const Align OriginalAlignment(
10301 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
10302 Flags.setOrigAlign(OriginalAlignment);
10303
10304 Align MemAlign;
10305 Type *ArgMemTy = nullptr;
10306 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
10307 Flags.isByRef()) {
10308 if (!ArgMemTy)
10309 ArgMemTy = Arg.getPointeeInMemoryValueType();
10310
10311 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
10312
10313 // For in-memory arguments, size and alignment should be passed from FE.
10314 // BE will guess if this info is not there but there are cases it cannot
10315 // get right.
10316 if (auto ParamAlign = Arg.getParamStackAlign())
10317 MemAlign = *ParamAlign;
10318 else if ((ParamAlign = Arg.getParamAlign()))
10319 MemAlign = *ParamAlign;
10320 else
10321 MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
10322 if (Flags.isByRef())
10323 Flags.setByRefSize(MemSize);
10324 else
10325 Flags.setByValSize(MemSize);
10326 } else if (auto ParamAlign = Arg.getParamStackAlign()) {
10327 MemAlign = *ParamAlign;
10328 } else {
10329 MemAlign = OriginalAlignment;
10330 }
10331 Flags.setMemAlign(MemAlign);
10332
10333 if (Arg.hasAttribute(Attribute::Nest))
10334 Flags.setNest();
10335 if (NeedsRegBlock)
10336 Flags.setInConsecutiveRegs();
10337 if (ArgCopyElisionCandidates.count(&Arg))
10338 Flags.setCopyElisionCandidate();
10339 if (Arg.hasAttribute(Attribute::Returned))
10340 Flags.setReturned();
10341
10342 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
10343 *CurDAG->getContext(), F.getCallingConv(), VT);
10344 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
10345 *CurDAG->getContext(), F.getCallingConv(), VT);
10346 for (unsigned i = 0; i != NumRegs; ++i) {
10347 // For scalable vectors, use the minimum size; individual targets
10348 // are responsible for handling scalable vector arguments and
10349 // return values.
10350 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
10351 ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
10352 if (NumRegs > 1 && i == 0)
10353 MyFlags.Flags.setSplit();
10354 // if it isn't first piece, alignment must be 1
10355 else if (i > 0) {
10356 MyFlags.Flags.setOrigAlign(Align(1));
10357 if (i == NumRegs - 1)
10358 MyFlags.Flags.setSplitEnd();
10359 }
10360 Ins.push_back(MyFlags);
10361 }
10362 if (NeedsRegBlock && Value == NumValues - 1)
10363 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
10364 PartBase += VT.getStoreSize().getKnownMinSize();
10365 }
10366 }
10367
10368 // Call the target to set up the argument values.
10369 SmallVector<SDValue, 8> InVals;
10370 SDValue NewRoot = TLI->LowerFormalArguments(
10371 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
10372
10373 // Verify that the target's LowerFormalArguments behaved as expected.
10374 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&(static_cast<void> (0))
10375 "LowerFormalArguments didn't return a valid chain!")(static_cast<void> (0));
10376 assert(InVals.size() == Ins.size() &&(static_cast<void> (0))
10377 "LowerFormalArguments didn't emit the correct number of values!")(static_cast<void> (0));
10378 LLVM_DEBUG({do { } while (false)
10379 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {do { } while (false)
10380 assert(InVals[i].getNode() &&do { } while (false)
10381 "LowerFormalArguments emitted a null value!");do { } while (false)
10382 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&do { } while (false)
10383 "LowerFormalArguments emitted a value with the wrong type!");do { } while (false)
10384 }do { } while (false)
10385 })do { } while (false);
10386
10387 // Update the DAG with the new chain value resulting from argument lowering.
10388 DAG.setRoot(NewRoot);
10389
10390 // Set up the argument values.
10391 unsigned i = 0;
10392 if (!FuncInfo->CanLowerReturn) {
10393 // Create a virtual register for the sret pointer, and put in a copy
10394 // from the sret argument into it.
10395 SmallVector<EVT, 1> ValueVTs;
10396 ComputeValueVTs(*TLI, DAG.getDataLayout(),
10397 F.getReturnType()->getPointerTo(
10398 DAG.getDataLayout().getAllocaAddrSpace()),
10399 ValueVTs);
10400 MVT VT = ValueVTs[0].getSimpleVT();
10401 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
10402 Optional<ISD::NodeType> AssertOp = None;
10403 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
10404 nullptr, F.getCallingConv(), AssertOp);
10405
10406 MachineFunction& MF = SDB->DAG.getMachineFunction();
10407 MachineRegisterInfo& RegInfo = MF.getRegInfo();
10408 Register SRetReg =
10409 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
10410 FuncInfo->DemoteRegister = SRetReg;
10411 NewRoot =
10412 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
10413 DAG.setRoot(NewRoot);
10414
10415 // i indexes lowered arguments. Bump it past the hidden sret argument.
10416 ++i;
10417 }
10418
10419 SmallVector<SDValue, 4> Chains;
10420 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
10421 for (const Argument &Arg : F.args()) {
10422 SmallVector<SDValue, 4> ArgValues;
10423 SmallVector<EVT, 4> ValueVTs;
10424 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10425 unsigned NumValues = ValueVTs.size();
10426 if (NumValues == 0)
10427 continue;
10428
10429 bool ArgHasUses = !Arg.use_empty();
10430
10431 // Elide the copying store if the target loaded this argument from a
10432 // suitable fixed stack object.
10433 if (Ins[i].Flags.isCopyElisionCandidate()) {
10434 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
10435 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
10436 InVals[i], ArgHasUses);
10437 }
10438
10439 // If this argument is unused then remember its value. It is used to generate
10440 // debugging information.
10441 bool isSwiftErrorArg =
10442 TLI->supportSwiftError() &&
10443 Arg.hasAttribute(Attribute::SwiftError);
10444 if (!ArgHasUses && !isSwiftErrorArg) {
10445 SDB->setUnusedArgValue(&Arg, InVals[i]);
10446
10447 // Also remember any frame index for use in FastISel.
10448 if (FrameIndexSDNode *FI =
10449 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
10450 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10451 }
10452
10453 for (unsigned Val = 0; Val != NumValues; ++Val) {
10454 EVT VT = ValueVTs[Val];
10455 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
10456 F.getCallingConv(), VT);
10457 unsigned NumParts = TLI->getNumRegistersForCallingConv(
10458 *CurDAG->getContext(), F.getCallingConv(), VT);
10459
10460 // Even an apparent 'unused' swifterror argument needs to be returned. So
10461 // we do generate a copy for it that can be used on return from the
10462 // function.
10463 if (ArgHasUses || isSwiftErrorArg) {
10464 Optional<ISD::NodeType> AssertOp;
10465 if (Arg.hasAttribute(Attribute::SExt))
10466 AssertOp = ISD::AssertSext;
10467 else if (Arg.hasAttribute(Attribute::ZExt))
10468 AssertOp = ISD::AssertZext;
10469
10470 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
10471 PartVT, VT, nullptr,
10472 F.getCallingConv(), AssertOp));
10473 }
10474
10475 i += NumParts;
10476 }
10477
10478 // We don't need to do anything else for unused arguments.
10479 if (ArgValues.empty())
10480 continue;
10481
10482 // Note down frame index.
10483 if (FrameIndexSDNode *FI =
10484 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
10485 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10486
10487 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
10488 SDB->getCurSDLoc());
10489
10490 SDB->setValue(&Arg, Res);
10491 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
10492 // We want to associate the argument with the frame index, among
10493 // involved operands, that correspond to the lowest address. The
10494 // getCopyFromParts function, called earlier, is swapping the order of
10495 // the operands to BUILD_PAIR depending on endianness. The result of
10496 // that swapping is that the least significant bits of the argument will
10497 // be in the first operand of the BUILD_PAIR node, and the most
10498 // significant bits will be in the second operand.
10499 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
10500 if (LoadSDNode *LNode =
10501 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
10502 if (FrameIndexSDNode *FI =
10503 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
10504 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10505 }
10506
10507 // Analyses past this point are naive and don't expect an assertion.
10508 if (Res.getOpcode() == ISD::AssertZext)
10509 Res = Res.getOperand(0);
10510
10511 // Update the SwiftErrorVRegDefMap.
10512 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
10513 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
10514 if (Register::isVirtualRegister(Reg))
10515 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
10516 Reg);
10517 }
10518
10519 // If this argument is live outside of the entry block, insert a copy from
10520 // wherever we got it to the vreg that other BB's will reference it as.
10521 if (Res.getOpcode() == ISD::CopyFromReg) {
10522 // If we can, though, try to skip creating an unnecessary vreg.
10523 // FIXME: This isn't very clean... it would be nice to make this more
10524 // general.
10525 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
10526 if (Register::isVirtualRegister(Reg)) {
10527 FuncInfo->ValueMap[&Arg] = Reg;
10528 continue;
10529 }
10530 }
10531 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
10532 FuncInfo->InitializeRegForValue(&Arg);
10533 SDB->CopyToExportRegsIfNeeded(&Arg);
10534 }
10535 }
10536
10537 if (!Chains.empty()) {
10538 Chains.push_back(NewRoot);
10539 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
10540 }
10541
10542 DAG.setRoot(NewRoot);
10543
10544 assert(i == InVals.size() && "Argument register count mismatch!")(static_cast<void> (0));
10545
10546 // If any argument copy elisions occurred and we have debug info, update the
10547 // stale frame indices used in the dbg.declare variable info table.
10548 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
10549 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
10550 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
10551 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
10552 if (I != ArgCopyElisionFrameIndexMap.end())
10553 VI.Slot = I->second;
10554 }
10555 }
10556
10557 // Finally, if the target has anything special to do, allow it to do so.
10558 emitFunctionEntryCode();
10559}
10560
10561/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
10562/// ensure constants are generated when needed. Remember the virtual registers
10563/// that need to be added to the Machine PHI nodes as input. We cannot just
10564/// directly add them, because expansion might result in multiple MBB's for one
10565/// BB. As such, the start of the BB might correspond to a different MBB than
10566/// the end.
10567void
10568SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
10569 const Instruction *TI = LLVMBB->getTerminator();
10570
10571 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
10572
10573 // Check PHI nodes in successors that expect a value to be available from this
10574 // block.
10575 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
10576 const BasicBlock *SuccBB = TI->getSuccessor(succ);
10577 if (!isa<PHINode>(SuccBB->begin())) continue;
10578 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
10579
10580 // If this terminator has multiple identical successors (common for
10581 // switches), only handle each succ once.
10582 if (!SuccsHandled.insert(SuccMBB).second)
10583 continue;
10584
10585 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
10586
10587 // At this point we know that there is a 1-1 correspondence between LLVM PHI
10588 // nodes and Machine PHI nodes, but the incoming operands have not been
10589 // emitted yet.
10590 for (const PHINode &PN : SuccBB->phis()) {
10591 // Ignore dead phi's.
10592 if (PN.use_empty())
10593 continue;
10594
10595 // Skip empty types
10596 if (PN.getType()->isEmptyTy())
10597 continue;
10598
10599 unsigned Reg;
10600 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
10601
10602 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
10603 unsigned &RegOut = ConstantsOut[C];
10604 if (RegOut == 0) {
10605 RegOut = FuncInfo.CreateRegs(C);
10606 CopyValueToVirtualRegister(C, RegOut);
10607 }
10608 Reg = RegOut;
10609 } else {
10610 DenseMap<const Value *, Register>::iterator I =
10611 FuncInfo.ValueMap.find(PHIOp);
10612 if (I != FuncInfo.ValueMap.end())
10613 Reg = I->second;
10614 else {
10615 assert(isa<AllocaInst>(PHIOp) &&(static_cast<void> (0))
10616 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&(static_cast<void> (0))
10617 "Didn't codegen value into a register!??")(static_cast<void> (0));
10618 Reg = FuncInfo.CreateRegs(PHIOp);
10619 CopyValueToVirtualRegister(PHIOp, Reg);
10620 }
10621 }
10622
10623 // Remember that this register needs to added to the machine PHI node as
10624 // the input for this MBB.
10625 SmallVector<EVT, 4> ValueVTs;
10626 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10627 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10628 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10629 EVT VT = ValueVTs[vti];
10630 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10631 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10632 FuncInfo.PHINodesToUpdate.push_back(
10633 std::make_pair(&*MBBI++, Reg + i));
10634 Reg += NumRegisters;
10635 }
10636 }
10637 }
10638
10639 ConstantsOut.clear();
10640}
10641
10642/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10643/// is 0.
10644MachineBasicBlock *
10645SelectionDAGBuilder::StackProtectorDescriptor::
10646AddSuccessorMBB(const BasicBlock *BB,
10647 MachineBasicBlock *ParentMBB,
10648 bool IsLikely,
10649 MachineBasicBlock *SuccMBB) {
10650 // If SuccBB has not been created yet, create it.
10651 if (!SuccMBB) {
10652 MachineFunction *MF = ParentMBB->getParent();
10653 MachineFunction::iterator BBI(ParentMBB);
10654 SuccMBB = MF->CreateMachineBasicBlock(BB);
10655 MF->insert(++BBI, SuccMBB);
10656 }
10657 // Add it as a successor of ParentMBB.
10658 ParentMBB->addSuccessor(
10659 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10660 return SuccMBB;
10661}
10662
10663MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10664 MachineFunction::iterator I(MBB);
10665 if (++I == FuncInfo.MF->end())
10666 return nullptr;
10667 return &*I;
10668}
10669
10670/// During lowering new call nodes can be created (such as memset, etc.).
10671/// Those will become new roots of the current DAG, but complications arise
10672/// when they are tail calls. In such cases, the call lowering will update
10673/// the root, but the builder still needs to know that a tail call has been
10674/// lowered in order to avoid generating an additional return.
10675void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10676 // If the node is null, we do have a tail call.
10677 if (MaybeTC.getNode() != nullptr)
10678 DAG.setRoot(MaybeTC);
10679 else
10680 HasTailCall = true;
10681}
10682
10683void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10684 MachineBasicBlock *SwitchMBB,
10685 MachineBasicBlock *DefaultMBB) {
10686 MachineFunction *CurMF = FuncInfo.MF;
10687 MachineBasicBlock *NextMBB = nullptr;
10688 MachineFunction::iterator BBI(W.MBB);
10689 if (++BBI != FuncInfo.MF->end())
10690 NextMBB = &*BBI;
10691
10692 unsigned Size = W.LastCluster - W.FirstCluster + 1;
10693
10694 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10695
10696 if (Size == 2 && W.MBB == SwitchMBB) {
10697 // If any two of the cases has the same destination, and if one value
10698 // is the same as the other, but has one bit unset that the other has set,
10699 // use bit manipulation to do two compares at once. For example:
10700 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10701 // TODO: This could be extended to merge any 2 cases in switches with 3
10702 // cases.
10703 // TODO: Handle cases where W.CaseBB != SwitchBB.
10704 CaseCluster &Small = *W.FirstCluster;
10705 CaseCluster &Big = *W.LastCluster;
10706
10707 if (Small.Low == Small.High && Big.Low == Big.High &&
10708 Small.MBB == Big.MBB) {
10709 const APInt &SmallValue = Small.Low->getValue();
10710 const APInt &BigValue = Big.Low->getValue();
10711
10712 // Check that there is only one bit different.
10713 APInt CommonBit = BigValue ^ SmallValue;
10714 if (CommonBit.isPowerOf2()) {
10715 SDValue CondLHS = getValue(Cond);
10716 EVT VT = CondLHS.getValueType();
10717 SDLoc DL = getCurSDLoc();
10718
10719 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10720 DAG.getConstant(CommonBit, DL, VT));
10721 SDValue Cond = DAG.getSetCC(
10722 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10723 ISD::SETEQ);
10724
10725 // Update successor info.
10726 // Both Small and Big will jump to Small.BB, so we sum up the
10727 // probabilities.
10728 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10729 if (BPI)
10730 addSuccessorWithProb(
10731 SwitchMBB, DefaultMBB,
10732 // The default destination is the first successor in IR.
10733 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10734 else
10735 addSuccessorWithProb(SwitchMBB, DefaultMBB);
10736
10737 // Insert the true branch.
10738 SDValue BrCond =
10739 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10740 DAG.getBasicBlock(Small.MBB));
10741 // Insert the false branch.
10742 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10743 DAG.getBasicBlock(DefaultMBB));
10744
10745 DAG.setRoot(BrCond);
10746 return;
10747 }
10748 }
10749 }
10750
10751 if (TM.getOptLevel() != CodeGenOpt::None) {
10752 // Here, we order cases by probability so the most likely case will be
10753 // checked first. However, two clusters can have the same probability in
10754 // which case their relative ordering is non-deterministic. So we use Low
10755 // as a tie-breaker as clusters are guaranteed to never overlap.
10756 llvm::sort(W.FirstCluster, W.LastCluster + 1,
10757 [](const CaseCluster &a, const CaseCluster &b) {
10758 return a.Prob != b.Prob ?
10759 a.Prob > b.Prob :
10760 a.Low->getValue().slt(b.Low->getValue());
10761 });
10762
10763 // Rearrange the case blocks so that the last one falls through if possible
10764 // without changing the order of probabilities.
10765 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10766 --I;
10767 if (I->Prob > W.LastCluster->Prob)
10768 break;
10769 if (I->Kind == CC_Range && I->MBB == NextMBB) {
10770 std::swap(*I, *W.LastCluster);
10771 break;
10772 }
10773 }
10774 }
10775
10776 // Compute total probability.
10777 BranchProbability DefaultProb = W.DefaultProb;
10778 BranchProbability UnhandledProbs = DefaultProb;
10779 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10780 UnhandledProbs += I->Prob;
10781
10782 MachineBasicBlock *CurMBB = W.MBB;
10783 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10784 bool FallthroughUnreachable = false;
10785 MachineBasicBlock *Fallthrough;
10786 if (I == W.LastCluster) {
10787 // For the last cluster, fall through to the default destination.
10788 Fallthrough = DefaultMBB;
10789 FallthroughUnreachable = isa<UnreachableInst>(
10790 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10791 } else {
10792 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10793 CurMF->insert(BBI, Fallthrough);
10794 // Put Cond in a virtual register to make it available from the new blocks.
10795 ExportFromCurrentBlock(Cond);
10796 }
10797 UnhandledProbs -= I->Prob;
10798
10799 switch (I->Kind) {
10800 case CC_JumpTable: {
10801 // FIXME: Optimize away range check based on pivot comparisons.
10802 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10803 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10804
10805 // The jump block hasn't been inserted yet; insert it here.
10806 MachineBasicBlock *JumpMBB = JT->MBB;
10807 CurMF->insert(BBI, JumpMBB);
10808
10809 auto JumpProb = I->Prob;
10810 auto FallthroughProb = UnhandledProbs;
10811
10812 // If the default statement is a target of the jump table, we evenly
10813 // distribute the default probability to successors of CurMBB. Also
10814 // update the probability on the edge from JumpMBB to Fallthrough.
10815 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10816 SE = JumpMBB->succ_end();
10817 SI != SE; ++SI) {
10818 if (*SI == DefaultMBB) {
10819 JumpProb += DefaultProb / 2;
10820 FallthroughProb -= DefaultProb / 2;
10821 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10822 JumpMBB->normalizeSuccProbs();
10823 break;
10824 }
10825 }
10826
10827 if (FallthroughUnreachable) {
10828 // Skip the range check if the fallthrough block is unreachable.
10829 JTH->OmitRangeCheck = true;
10830 }
10831
10832 if (!JTH->OmitRangeCheck)
10833 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10834 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10835 CurMBB->normalizeSuccProbs();
10836
10837 // The jump table header will be inserted in our current block, do the
10838 // range check, and fall through to our fallthrough block.
10839 JTH->HeaderBB = CurMBB;
10840 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10841
10842 // If we're in the right place, emit the jump table header right now.
10843 if (CurMBB == SwitchMBB) {
10844 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10845 JTH->Emitted = true;
10846 }
10847 break;
10848 }
10849 case CC_BitTests: {
10850 // FIXME: Optimize away range check based on pivot comparisons.
10851 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10852
10853 // The bit test blocks haven't been inserted yet; insert them here.
10854 for (BitTestCase &BTC : BTB->Cases)
10855 CurMF->insert(BBI, BTC.ThisBB);
10856
10857 // Fill in fields of the BitTestBlock.
10858 BTB->Parent = CurMBB;
10859 BTB->Default = Fallthrough;
10860
10861 BTB->DefaultProb = UnhandledProbs;
10862 // If the cases in bit test don't form a contiguous range, we evenly
10863 // distribute the probability on the edge to Fallthrough to two
10864 // successors of CurMBB.
10865 if (!BTB->ContiguousRange) {
10866 BTB->Prob += DefaultProb / 2;
10867 BTB->DefaultProb -= DefaultProb / 2;
10868 }
10869
10870 if (FallthroughUnreachable) {
10871 // Skip the range check if the fallthrough block is unreachable.
10872 BTB->OmitRangeCheck = true;
10873 }
10874
10875 // If we're in the right place, emit the bit test header right now.
10876 if (CurMBB == SwitchMBB) {
10877 visitBitTestHeader(*BTB, SwitchMBB);
10878 BTB->Emitted = true;
10879 }
10880 break;
10881 }
10882 case CC_Range: {
10883 const Value *RHS, *LHS, *MHS;
10884 ISD::CondCode CC;
10885 if (I->Low == I->High) {
10886 // Check Cond == I->Low.
10887 CC = ISD::SETEQ;
10888 LHS = Cond;
10889 RHS=I->Low;
10890 MHS = nullptr;
10891 } else {
10892 // Check I->Low <= Cond <= I->High.
10893 CC = ISD::SETLE;
10894 LHS = I->Low;
10895 MHS = Cond;
10896 RHS = I->High;
10897 }
10898
10899 // If Fallthrough is unreachable, fold away the comparison.
10900 if (FallthroughUnreachable)
10901 CC = ISD::SETTRUE;
10902
10903 // The false probability is the sum of all unhandled cases.
10904 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10905 getCurSDLoc(), I->Prob, UnhandledProbs);
10906
10907 if (CurMBB == SwitchMBB)
10908 visitSwitchCase(CB, SwitchMBB);
10909 else
10910 SL->SwitchCases.push_back(CB);
10911
10912 break;
10913 }
10914 }
10915 CurMBB = Fallthrough;
10916 }
10917}
10918
10919unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10920 CaseClusterIt First,
10921 CaseClusterIt Last) {
10922 return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10923 if (X.Prob != CC.Prob)
10924 return X.Prob > CC.Prob;
10925
10926 // Ties are broken by comparing the case value.
10927 return X.Low->getValue().slt(CC.Low->getValue());
10928 });
10929}
10930
10931void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10932 const SwitchWorkListItem &W,
10933 Value *Cond,
10934 MachineBasicBlock *SwitchMBB) {
10935 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&(static_cast<void> (0))
10936 "Clusters not sorted?")(static_cast<void> (0));
10937
10938 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!")(static_cast<void> (0));
10939
10940 // Balance the tree based on branch probabilities to create a near-optimal (in
10941 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10942 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10943 CaseClusterIt LastLeft = W.FirstCluster;
10944 CaseClusterIt FirstRight = W.LastCluster;
10945 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10946 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10947
10948 // Move LastLeft and FirstRight towards each other from opposite directions to
10949 // find a partitioning of the clusters which balances the probability on both
10950 // sides. If LeftProb and RightProb are equal, alternate which side is
10951 // taken to ensure 0-probability nodes are distributed evenly.
10952 unsigned I = 0;
10953 while (LastLeft + 1 < FirstRight) {
10954 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10955 LeftProb += (++LastLeft)->Prob;
10956 else
10957 RightProb += (--FirstRight)->Prob;
10958 I++;
10959 }
10960
10961 while (true) {
10962 // Our binary search tree differs from a typical BST in that ours can have up
10963 // to three values in each leaf. The pivot selection above doesn't take that
10964 // into account, which means the tree might require more nodes and be less
10965 // efficient. We compensate for this here.
10966
10967 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10968 unsigned NumRight = W.LastCluster - FirstRight + 1;
10969
10970 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10971 // If one side has less than 3 clusters, and the other has more than 3,
10972 // consider taking a cluster from the other side.
10973
10974 if (NumLeft < NumRight) {
10975 // Consider moving the first cluster on the right to the left side.
10976 CaseCluster &CC = *FirstRight;
10977 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10978 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10979 if (LeftSideRank <= RightSideRank) {
10980 // Moving the cluster to the left does not demote it.
10981 ++LastLeft;
10982 ++FirstRight;
10983 continue;
10984 }
10985 } else {
10986 assert(NumRight < NumLeft)(static_cast<void> (0));
10987 // Consider moving the last element on the left to the right side.
10988 CaseCluster &CC = *LastLeft;
10989 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10990 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10991 if (RightSideRank <= LeftSideRank) {
10992 // Moving the cluster to the right does not demot it.
10993 --LastLeft;
10994 --FirstRight;
10995 continue;
10996 }
10997 }
10998 }
10999 break;
11000 }
11001
11002 assert(LastLeft + 1 == FirstRight)(static_cast<void> (0));
11003 assert(LastLeft >= W.FirstCluster)(static_cast<void> (0));
11004 assert(FirstRight <= W.LastCluster)(static_cast<void> (0));
11005
11006 // Use the first element on the right as pivot since we will make less-than
11007 // comparisons against it.
11008 CaseClusterIt PivotCluster = FirstRight;
11009 assert(PivotCluster > W.FirstCluster)(static_cast<void> (0));
11010 assert(PivotCluster <= W.LastCluster)(static_cast<void> (0));
11011
11012 CaseClusterIt FirstLeft = W.FirstCluster;
11013 CaseClusterIt LastRight = W.LastCluster;
11014
11015 const ConstantInt *Pivot = PivotCluster->Low;
11016
11017 // New blocks will be inserted immediately after the current one.
11018 MachineFunction::iterator BBI(W.MBB);
11019 ++BBI;
11020
11021 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11022 // we can branch to its destination directly if it's squeezed exactly in
11023 // between the known lower bound and Pivot - 1.
11024 MachineBasicBlock *LeftMBB;
11025 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11026 FirstLeft->Low == W.GE &&
11027 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11028 LeftMBB = FirstLeft->MBB;
11029 } else {
11030 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11031 FuncInfo.MF->insert(BBI, LeftMBB);
11032 WorkList.push_back(
11033 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11034 // Put Cond in a virtual register to make it available from the new blocks.
11035 ExportFromCurrentBlock(Cond);
11036 }
11037
11038 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11039 // single cluster, RHS.Low == Pivot, and we can branch to its destination
11040 // directly if RHS.High equals the current upper bound.
11041 MachineBasicBlock *RightMBB;
11042 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11043 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11044 RightMBB = FirstRight->MBB;
11045 } else {
11046 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11047 FuncInfo.MF->insert(BBI, RightMBB);
11048 WorkList.push_back(
11049 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11050 // Put Cond in a virtual register to make it available from the new blocks.
11051 ExportFromCurrentBlock(Cond);
11052 }
11053
11054 // Create the CaseBlock record that will be used to lower the branch.
11055 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11056 getCurSDLoc(), LeftProb, RightProb);
11057
11058 if (W.MBB == SwitchMBB)
11059 visitSwitchCase(CB, SwitchMBB);
11060 else
11061 SL->SwitchCases.push_back(CB);
11062}
11063
11064// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11065// from the swith statement.
11066static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11067 BranchProbability PeeledCaseProb) {
11068 if (PeeledCaseProb == BranchProbability::getOne())
11069 return BranchProbability::getZero();
11070 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11071
11072 uint32_t Numerator = CaseProb.getNumerator();
11073 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11074 return BranchProbability(Numerator, std::max(Numerator, Denominator));
11075}
11076
11077// Try to peel the top probability case if it exceeds the threshold.
11078// Return current MachineBasicBlock for the switch statement if the peeling
11079// does not occur.
11080// If the peeling is performed, return the newly created MachineBasicBlock
11081// for the peeled switch statement. Also update Clusters to remove the peeled
11082// case. PeeledCaseProb is the BranchProbability for the peeled case.
11083MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11084 const SwitchInst &SI, CaseClusterVector &Clusters,
11085 BranchProbability &PeeledCaseProb) {
11086 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11087 // Don't perform if there is only one cluster or optimizing for size.
11088 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11089 TM.getOptLevel() == CodeGenOpt::None ||
11090 SwitchMBB->getParent()->getFunction().hasMinSize())
11091 return SwitchMBB;
11092
11093 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11094 unsigned PeeledCaseIndex = 0;
11095 bool SwitchPeeled = false;
11096 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11097 CaseCluster &CC = Clusters[Index];
11098 if (CC.Prob < TopCaseProb)
11099 continue;
11100 TopCaseProb = CC.Prob;
11101 PeeledCaseIndex = Index;
11102 SwitchPeeled = true;
11103 }
11104 if (!SwitchPeeled)
11105 return SwitchMBB;
11106
11107 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "do { } while (false)
11108 << TopCaseProb << "\n")do { } while (false);
11109
11110 // Record the MBB for the peeled switch statement.
11111 MachineFunction::iterator BBI(SwitchMBB);
11112 ++BBI;
11113 MachineBasicBlock *PeeledSwitchMBB =
11114 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11115 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11116
11117 ExportFromCurrentBlock(SI.getCondition());
11118 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11119 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11120 nullptr, nullptr, TopCaseProb.getCompl()};
11121 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11122
11123 Clusters.erase(PeeledCaseIt);
11124 for (CaseCluster &CC : Clusters) {
11125 LLVM_DEBUG(do { } while (false)
11126 dbgs() << "Scale the probablity for one cluster, before scaling: "do { } while (false)
11127 << CC.Prob << "\n")do { } while (false);
11128 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11129 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n")do { } while (false);
11130 }
11131 PeeledCaseProb = TopCaseProb;
11132 return PeeledSwitchMBB;
11133}
11134
11135void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11136 // Extract cases from the switch.
11137 BranchProbabilityInfo *BPI = FuncInfo.BPI;
11138 CaseClusterVector Clusters;
11139 Clusters.reserve(SI.getNumCases());
11140 for (auto I : SI.cases()) {
11141 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11142 const ConstantInt *CaseVal = I.getCaseValue();
11143 BranchProbability Prob =
11144 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11145 : BranchProbability(1, SI.getNumCases() + 1);
11146 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11147 }
11148
11149 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11150
11151 // Cluster adjacent cases with the same destination. We do this at all
11152 // optimization levels because it's cheap to do and will make codegen faster
11153 // if there are many clusters.
11154 sortAndRangeify(Clusters);
11155
11156 // The branch probablity of the peeled case.
11157 BranchProbability PeeledCaseProb = BranchProbability::getZero();
11158 MachineBasicBlock *PeeledSwitchMBB =
11159 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11160
11161 // If there is only the default destination, jump there directly.
11162 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11163 if (Clusters.empty()) {
11164 assert(PeeledSwitchMBB == SwitchMBB)(static_cast<void> (0));
11165 SwitchMBB->addSuccessor(DefaultMBB);
11166 if (DefaultMBB != NextBlock(SwitchMBB)) {
11167 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11168 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11169 }
11170 return;
11171 }
11172
11173 SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
11174 SL->findBitTestClusters(Clusters, &SI);
11175
11176 LLVM_DEBUG({do { } while (false)
11177 dbgs() << "Case clusters: ";do { } while (false)
11178 for (const CaseCluster &C : Clusters) {do { } while (false)
11179 if (C.Kind == CC_JumpTable)do { } while (false)
11180 dbgs() << "JT:";do { } while (false)
11181 if (C.Kind == CC_BitTests)do { } while (false)
11182 dbgs() << "BT:";do { } while (false)
11183
11184 C.Low->getValue().print(dbgs(), true);do { } while (false)
11185 if (C.Low != C.High) {do { } while (false)
11186 dbgs() << '-';do { } while (false)
11187 C.High->getValue().print(dbgs(), true);do { } while (false)
11188 }do { } while (false)
11189 dbgs() << ' ';do { } while (false)
11190 }do { } while (false)
11191 dbgs() << '\n';do { } while (false)
11192 })do { } while (false);
11193
11194 assert(!Clusters.empty())(static_cast<void> (0));
11195 SwitchWorkList WorkList;
11196 CaseClusterIt First = Clusters.begin();
11197 CaseClusterIt Last = Clusters.end() - 1;
11198 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11199 // Scale the branchprobability for DefaultMBB if the peel occurs and
11200 // DefaultMBB is not replaced.
11201 if (PeeledCaseProb != BranchProbability::getZero() &&
11202 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11203 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11204 WorkList.push_back(
11205 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11206
11207 while (!WorkList.empty()) {
11208 SwitchWorkListItem W = WorkList.pop_back_val();
11209 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11210
11211 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
11212 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11213 // For optimized builds, lower large range as a balanced binary tree.
11214 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11215 continue;
11216 }
11217
11218 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11219 }
11220}
11221
11222void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11223 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11224 auto DL = getCurSDLoc();
11225 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11226 setValue(&I, DAG.getStepVector(DL, ResultVT));
11227}
11228
11229void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11230 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11231 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11232
11233 SDLoc DL = getCurSDLoc();
11234 SDValue V = getValue(I.getOperand(0));
11235 assert(VT == V.getValueType() && "Malformed vector.reverse!")(static_cast<void> (0));
11236
11237 if (VT.isScalableVector()) {
11238 setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11239 return;
11240 }
11241
11242 // Use VECTOR_SHUFFLE for the fixed-length vector
11243 // to maintain existing behavior.
11244 SmallVector<int, 8> Mask;
11245 unsigned NumElts = VT.getVectorMinNumElements();
11246 for (unsigned i = 0; i != NumElts; ++i)
11247 Mask.push_back(NumElts - 1 - i);
11248
11249 setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11250}
11251
11252void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11253 SmallVector<EVT, 4> ValueVTs;
11254 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11255 ValueVTs);
11256 unsigned NumValues = ValueVTs.size();
11257 if (NumValues == 0) return;
11258
11259 SmallVector<SDValue, 4> Values(NumValues);
11260 SDValue Op = getValue(I.getOperand(0));
11261
11262 for (unsigned i = 0; i != NumValues; ++i)
11263 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
11264 SDValue(Op.getNode(), Op.getResNo() + i));
11265
11266 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
11267 DAG.getVTList(ValueVTs), Values));
11268}
11269
11270void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
11271 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11272 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11273
11274 SDLoc DL = getCurSDLoc();
11275 SDValue V1 = getValue(I.getOperand(0));
11276 SDValue V2 = getValue(I.getOperand(1));
11277 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
11278
11279 // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
11280 if (VT.isScalableVector()) {
11281 MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
11282 setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
11283 DAG.getConstant(Imm, DL, IdxVT)));
11284 return;
11285 }
11286
11287 unsigned NumElts = VT.getVectorNumElements();
11288
11289 if ((-Imm > NumElts) || (Imm >= NumElts)) {
11290 // Result is undefined if immediate is out-of-bounds.
11291 setValue(&I, DAG.getUNDEF(VT));
11292 return;
11293 }
11294
11295 uint64_t Idx = (NumElts + Imm) % NumElts;
11296
11297 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
11298 SmallVector<int, 8> Mask;
11299 for (unsigned i = 0; i < NumElts; ++i)
11300 Mask.push_back(Idx + i);
11301 setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
11302}