Bug Summary

File:include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1106, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SelectionDAGBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/CodeGen/SelectionDAG -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements routines for translating from LLVM IR into SelectionDAG IR.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SelectionDAGBuilder.h"
15#include "SDNodeDbgValue.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/Optional.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/SmallSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Triple.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/Analysis/AliasAnalysis.h"
31#include "llvm/Analysis/BranchProbabilityInfo.h"
32#include "llvm/Analysis/ConstantFolding.h"
33#include "llvm/Analysis/EHPersonalities.h"
34#include "llvm/Analysis/Loads.h"
35#include "llvm/Analysis/MemoryLocation.h"
36#include "llvm/Analysis/TargetLibraryInfo.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/Analysis/VectorUtils.h"
39#include "llvm/CodeGen/Analysis.h"
40#include "llvm/CodeGen/FunctionLoweringInfo.h"
41#include "llvm/CodeGen/GCMetadata.h"
42#include "llvm/CodeGen/ISDOpcodes.h"
43#include "llvm/CodeGen/MachineBasicBlock.h"
44#include "llvm/CodeGen/MachineFrameInfo.h"
45#include "llvm/CodeGen/MachineFunction.h"
46#include "llvm/CodeGen/MachineInstr.h"
47#include "llvm/CodeGen/MachineInstrBuilder.h"
48#include "llvm/CodeGen/MachineJumpTableInfo.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
50#include "llvm/CodeGen/MachineModuleInfo.h"
51#include "llvm/CodeGen/MachineOperand.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
53#include "llvm/CodeGen/RuntimeLibcalls.h"
54#include "llvm/CodeGen/SelectionDAG.h"
55#include "llvm/CodeGen/SelectionDAGNodes.h"
56#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
57#include "llvm/CodeGen/StackMaps.h"
58#include "llvm/CodeGen/TargetFrameLowering.h"
59#include "llvm/CodeGen/TargetInstrInfo.h"
60#include "llvm/CodeGen/TargetLowering.h"
61#include "llvm/CodeGen/TargetOpcodes.h"
62#include "llvm/CodeGen/TargetRegisterInfo.h"
63#include "llvm/CodeGen/TargetSubtargetInfo.h"
64#include "llvm/CodeGen/ValueTypes.h"
65#include "llvm/CodeGen/WinEHFuncInfo.h"
66#include "llvm/IR/Argument.h"
67#include "llvm/IR/Attributes.h"
68#include "llvm/IR/BasicBlock.h"
69#include "llvm/IR/CFG.h"
70#include "llvm/IR/CallSite.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/ConstantRange.h"
74#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfoMetadata.h"
77#include "llvm/IR/DebugLoc.h"
78#include "llvm/IR/DerivedTypes.h"
79#include "llvm/IR/Function.h"
80#include "llvm/IR/GetElementPtrTypeIterator.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
84#include "llvm/IR/Instructions.h"
85#include "llvm/IR/IntrinsicInst.h"
86#include "llvm/IR/Intrinsics.h"
87#include "llvm/IR/LLVMContext.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Module.h"
90#include "llvm/IR/Operator.h"
91#include "llvm/IR/PatternMatch.h"
92#include "llvm/IR/Statepoint.h"
93#include "llvm/IR/Type.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/MC/MCContext.h"
97#include "llvm/MC/MCSymbol.h"
98#include "llvm/Support/AtomicOrdering.h"
99#include "llvm/Support/BranchProbability.h"
100#include "llvm/Support/Casting.h"
101#include "llvm/Support/CodeGen.h"
102#include "llvm/Support/CommandLine.h"
103#include "llvm/Support/Compiler.h"
104#include "llvm/Support/Debug.h"
105#include "llvm/Support/ErrorHandling.h"
106#include "llvm/Support/MachineValueType.h"
107#include "llvm/Support/MathExtras.h"
108#include "llvm/Support/raw_ostream.h"
109#include "llvm/Target/TargetIntrinsicInfo.h"
110#include "llvm/Target/TargetMachine.h"
111#include "llvm/Target/TargetOptions.h"
112#include <algorithm>
113#include <cassert>
114#include <cstddef>
115#include <cstdint>
116#include <cstring>
117#include <iterator>
118#include <limits>
119#include <numeric>
120#include <tuple>
121#include <utility>
122#include <vector>
123
124using namespace llvm;
125using namespace PatternMatch;
126
127#define DEBUG_TYPE"isel" "isel"
128
129/// LimitFloatPrecision - Generate low-precision inline sequences for
130/// some float libcalls (6, 8 or 12 bits).
131static unsigned LimitFloatPrecision;
132
133static cl::opt<unsigned, true>
134 LimitFPPrecision("limit-float-precision",
135 cl::desc("Generate low-precision inline sequences "
136 "for some float libcalls"),
137 cl::location(LimitFloatPrecision), cl::Hidden,
138 cl::init(0));
139
140static cl::opt<unsigned> SwitchPeelThreshold(
141 "switch-peel-threshold", cl::Hidden, cl::init(66),
142 cl::desc("Set the case probability threshold for peeling the case from a "
143 "switch statement. A value greater than 100 will void this "
144 "optimization"));
145
146// Limit the width of DAG chains. This is important in general to prevent
147// DAG-based analysis from blowing up. For example, alias analysis and
148// load clustering may not complete in reasonable time. It is difficult to
149// recognize and avoid this situation within each individual analysis, and
150// future analyses are likely to have the same behavior. Limiting DAG width is
151// the safe approach and will be especially important with global DAGs.
152//
153// MaxParallelChains default is arbitrarily high to avoid affecting
154// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
155// sequence over this should have been converted to llvm.memcpy by the
156// frontend. It is easy to induce this behavior with .ll code such as:
157// %buffer = alloca [4096 x i8]
158// %data = load [4096 x i8]* %argPtr
159// store [4096 x i8] %data, [4096 x i8]* %buffer
160static const unsigned MaxParallelChains = 64;
161
162// Return the calling convention if the Value passed requires ABI mangling as it
163// is a parameter to a function or a return value from a function which is not
164// an intrinsic.
165static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
166 if (auto *R = dyn_cast<ReturnInst>(V))
167 return R->getParent()->getParent()->getCallingConv();
168
169 if (auto *CI = dyn_cast<CallInst>(V)) {
170 const bool IsInlineAsm = CI->isInlineAsm();
171 const bool IsIndirectFunctionCall =
172 !IsInlineAsm && !CI->getCalledFunction();
173
174 // It is possible that the call instruction is an inline asm statement or an
175 // indirect function call in which case the return value of
176 // getCalledFunction() would be nullptr.
177 const bool IsInstrinsicCall =
178 !IsInlineAsm && !IsIndirectFunctionCall &&
179 CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
180
181 if (!IsInlineAsm && !IsInstrinsicCall)
182 return CI->getCallingConv();
183 }
184
185 return None;
186}
187
188static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
189 const SDValue *Parts, unsigned NumParts,
190 MVT PartVT, EVT ValueVT, const Value *V,
191 Optional<CallingConv::ID> CC);
192
193/// getCopyFromParts - Create a value that contains the specified legal parts
194/// combined into the value they represent. If the parts combine to a type
195/// larger than ValueVT then AssertOp can be used to specify whether the extra
196/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
197/// (ISD::AssertSext).
198static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
199 const SDValue *Parts, unsigned NumParts,
200 MVT PartVT, EVT ValueVT, const Value *V,
201 Optional<CallingConv::ID> CC = None,
202 Optional<ISD::NodeType> AssertOp = None) {
203 if (ValueVT.isVector())
204 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
205 CC);
206
207 assert(NumParts > 0 && "No parts to assemble!")((NumParts > 0 && "No parts to assemble!") ? static_cast
<void> (0) : __assert_fail ("NumParts > 0 && \"No parts to assemble!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 207, __PRETTY_FUNCTION__))
;
208 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
209 SDValue Val = Parts[0];
210
211 if (NumParts > 1) {
212 // Assemble the value from multiple parts.
213 if (ValueVT.isInteger()) {
214 unsigned PartBits = PartVT.getSizeInBits();
215 unsigned ValueBits = ValueVT.getSizeInBits();
216
217 // Assemble the power of 2 part.
218 unsigned RoundParts = NumParts & (NumParts - 1) ?
219 1 << Log2_32(NumParts) : NumParts;
220 unsigned RoundBits = PartBits * RoundParts;
221 EVT RoundVT = RoundBits == ValueBits ?
222 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
223 SDValue Lo, Hi;
224
225 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
226
227 if (RoundParts > 2) {
228 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
229 PartVT, HalfVT, V);
230 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
231 RoundParts / 2, PartVT, HalfVT, V);
232 } else {
233 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
234 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
235 }
236
237 if (DAG.getDataLayout().isBigEndian())
238 std::swap(Lo, Hi);
239
240 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
241
242 if (RoundParts < NumParts) {
243 // Assemble the trailing non-power-of-2 part.
244 unsigned OddParts = NumParts - RoundParts;
245 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
246 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
247 OddVT, V, CC);
248
249 // Combine the round and odd parts.
250 Lo = Val;
251 if (DAG.getDataLayout().isBigEndian())
252 std::swap(Lo, Hi);
253 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
254 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
255 Hi =
256 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
257 DAG.getConstant(Lo.getValueSizeInBits(), DL,
258 TLI.getPointerTy(DAG.getDataLayout())));
259 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
260 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
261 }
262 } else if (PartVT.isFloatingPoint()) {
263 // FP split into multiple FP parts (for ppcf128)
264 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&((ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split") ? static_cast<void> (0) : __assert_fail
("ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 265, __PRETTY_FUNCTION__))
265 "Unexpected split")((ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split") ? static_cast<void> (0) : __assert_fail
("ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 265, __PRETTY_FUNCTION__))
;
266 SDValue Lo, Hi;
267 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
268 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
269 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
270 std::swap(Lo, Hi);
271 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
272 } else {
273 // FP split into integer parts (soft fp)
274 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&((ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 275, __PRETTY_FUNCTION__))
275 !PartVT.isVector() && "Unexpected split")((ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && \"Unexpected split\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 275, __PRETTY_FUNCTION__))
;
276 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
277 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
278 }
279 }
280
281 // There is now one part, held in Val. Correct it to match ValueVT.
282 // PartEVT is the type of the register class that holds the value.
283 // ValueVT is the type of the inline asm operation.
284 EVT PartEVT = Val.getValueType();
285
286 if (PartEVT == ValueVT)
287 return Val;
288
289 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
290 ValueVT.bitsLT(PartEVT)) {
291 // For an FP value in an integer part, we need to truncate to the right
292 // width first.
293 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
294 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
295 }
296
297 // Handle types that have the same size.
298 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
299 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
300
301 // Handle types with different sizes.
302 if (PartEVT.isInteger() && ValueVT.isInteger()) {
303 if (ValueVT.bitsLT(PartEVT)) {
304 // For a truncate, see if we have any information to
305 // indicate whether the truncated bits will always be
306 // zero or sign-extension.
307 if (AssertOp.hasValue())
308 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
309 DAG.getValueType(ValueVT));
310 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
311 }
312 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
313 }
314
315 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
316 // FP_ROUND's are always exact here.
317 if (ValueVT.bitsLT(Val.getValueType()))
318 return DAG.getNode(
319 ISD::FP_ROUND, DL, ValueVT, Val,
320 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
321
322 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
323 }
324
325 llvm_unreachable("Unknown mismatch!")::llvm::llvm_unreachable_internal("Unknown mismatch!", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 325)
;
326}
327
328static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
329 const Twine &ErrMsg) {
330 const Instruction *I = dyn_cast_or_null<Instruction>(V);
331 if (!V)
332 return Ctx.emitError(ErrMsg);
333
334 const char *AsmError = ", possible invalid constraint for vector type";
335 if (const CallInst *CI = dyn_cast<CallInst>(I))
336 if (isa<InlineAsm>(CI->getCalledValue()))
337 return Ctx.emitError(I, ErrMsg + AsmError);
338
339 return Ctx.emitError(I, ErrMsg);
340}
341
342/// getCopyFromPartsVector - Create a value that contains the specified legal
343/// parts combined into the value they represent. If the parts combine to a
344/// type larger than ValueVT then AssertOp can be used to specify whether the
345/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
346/// ValueVT (ISD::AssertSext).
347static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
348 const SDValue *Parts, unsigned NumParts,
349 MVT PartVT, EVT ValueVT, const Value *V,
350 Optional<CallingConv::ID> CallConv) {
351 assert(ValueVT.isVector() && "Not a vector value")((ValueVT.isVector() && "Not a vector value") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isVector() && \"Not a vector value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 351, __PRETTY_FUNCTION__))
;
352 assert(NumParts > 0 && "No parts to assemble!")((NumParts > 0 && "No parts to assemble!") ? static_cast
<void> (0) : __assert_fail ("NumParts > 0 && \"No parts to assemble!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 352, __PRETTY_FUNCTION__))
;
353 const bool IsABIRegCopy = CallConv.hasValue();
354
355 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
356 SDValue Val = Parts[0];
357
358 // Handle a multi-element vector.
359 if (NumParts > 1) {
360 EVT IntermediateVT;
361 MVT RegisterVT;
362 unsigned NumIntermediates;
363 unsigned NumRegs;
364
365 if (IsABIRegCopy) {
366 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
367 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
368 NumIntermediates, RegisterVT);
369 } else {
370 NumRegs =
371 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
372 NumIntermediates, RegisterVT);
373 }
374
375 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")((NumRegs == NumParts && "Part count doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("NumRegs == NumParts && \"Part count doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 375, __PRETTY_FUNCTION__))
;
376 NumParts = NumRegs; // Silence a compiler warning.
377 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")((RegisterVT == PartVT && "Part type doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("RegisterVT == PartVT && \"Part type doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 377, __PRETTY_FUNCTION__))
;
378 assert(RegisterVT.getSizeInBits() ==((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 380, __PRETTY_FUNCTION__))
379 Parts[0].getSimpleValueType().getSizeInBits() &&((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 380, __PRETTY_FUNCTION__))
380 "Part type sizes don't match!")((RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType()
.getSizeInBits() && "Part type sizes don't match!") ?
static_cast<void> (0) : __assert_fail ("RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && \"Part type sizes don't match!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 380, __PRETTY_FUNCTION__))
;
381
382 // Assemble the parts into intermediate operands.
383 SmallVector<SDValue, 8> Ops(NumIntermediates);
384 if (NumIntermediates == NumParts) {
385 // If the register was not expanded, truncate or copy the value,
386 // as appropriate.
387 for (unsigned i = 0; i != NumParts; ++i)
388 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
389 PartVT, IntermediateVT, V);
390 } else if (NumParts > 0) {
391 // If the intermediate type was expanded, build the intermediate
392 // operands from the parts.
393 assert(NumParts % NumIntermediates == 0 &&((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 394, __PRETTY_FUNCTION__))
394 "Must expand into a divisible number of parts!")((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 394, __PRETTY_FUNCTION__))
;
395 unsigned Factor = NumParts / NumIntermediates;
396 for (unsigned i = 0; i != NumIntermediates; ++i)
397 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
398 PartVT, IntermediateVT, V);
399 }
400
401 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
402 // intermediate operands.
403 EVT BuiltVectorTy =
404 EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
405 (IntermediateVT.isVector()
406 ? IntermediateVT.getVectorNumElements() * NumParts
407 : NumIntermediates));
408 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
409 : ISD::BUILD_VECTOR,
410 DL, BuiltVectorTy, Ops);
411 }
412
413 // There is now one part, held in Val. Correct it to match ValueVT.
414 EVT PartEVT = Val.getValueType();
415
416 if (PartEVT == ValueVT)
417 return Val;
418
419 if (PartEVT.isVector()) {
420 // If the element type of the source/dest vectors are the same, but the
421 // parts vector has more elements than the value vector, then we have a
422 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
423 // elements we want.
424 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
425 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&((PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements
() && "Cannot narrow, it would be a lossy transformation"
) ? static_cast<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && \"Cannot narrow, it would be a lossy transformation\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 426, __PRETTY_FUNCTION__))
426 "Cannot narrow, it would be a lossy transformation")((PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements
() && "Cannot narrow, it would be a lossy transformation"
) ? static_cast<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && \"Cannot narrow, it would be a lossy transformation\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 426, __PRETTY_FUNCTION__))
;
427 return DAG.getNode(
428 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
429 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
430 }
431
432 // Vector/Vector bitcast.
433 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
434 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
435
436 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&((PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements
() && "Cannot handle this kind of promotion") ? static_cast
<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && \"Cannot handle this kind of promotion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 437, __PRETTY_FUNCTION__))
437 "Cannot handle this kind of promotion")((PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements
() && "Cannot handle this kind of promotion") ? static_cast
<void> (0) : __assert_fail ("PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && \"Cannot handle this kind of promotion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 437, __PRETTY_FUNCTION__))
;
438 // Promoted vector extract
439 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
440
441 }
442
443 // Trivial bitcast if the types are the same size and the destination
444 // vector type is legal.
445 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
446 TLI.isTypeLegal(ValueVT))
447 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
448
449 if (ValueVT.getVectorNumElements() != 1) {
450 // Certain ABIs require that vectors are passed as integers. For vectors
451 // are the same size, this is an obvious bitcast.
452 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
453 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
454 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
455 // Bitcast Val back the original type and extract the corresponding
456 // vector we want.
457 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
458 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
459 ValueVT.getVectorElementType(), Elts);
460 Val = DAG.getBitcast(WiderVecType, Val);
461 return DAG.getNode(
462 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
463 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
464 }
465
466 diagnosePossiblyInvalidConstraint(
467 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468 return DAG.getUNDEF(ValueVT);
469 }
470
471 // Handle cases such as i8 -> <1 x i1>
472 EVT ValueSVT = ValueVT.getVectorElementType();
473 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
474 Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
475 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
476
477 return DAG.getBuildVector(ValueVT, DL, Val);
478}
479
480static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
481 SDValue Val, SDValue *Parts, unsigned NumParts,
482 MVT PartVT, const Value *V,
483 Optional<CallingConv::ID> CallConv);
484
485/// getCopyToParts - Create a series of nodes that contain the specified value
486/// split into legal parts. If the parts contain more bits than Val, then, for
487/// integers, ExtendKind can be used to specify how to generate the extra bits.
488static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
489 SDValue *Parts, unsigned NumParts, MVT PartVT,
490 const Value *V,
491 Optional<CallingConv::ID> CallConv = None,
492 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
493 EVT ValueVT = Val.getValueType();
494
495 // Handle the vector case separately.
496 if (ValueVT.isVector())
497 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
498 CallConv);
499
500 unsigned PartBits = PartVT.getSizeInBits();
501 unsigned OrigNumParts = NumParts;
502 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && "Copying to an illegal type!"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && \"Copying to an illegal type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 503, __PRETTY_FUNCTION__))
503 "Copying to an illegal type!")((DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && "Copying to an illegal type!"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && \"Copying to an illegal type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 503, __PRETTY_FUNCTION__))
;
504
505 if (NumParts == 0)
506 return;
507
508 assert(!ValueVT.isVector() && "Vector case handled elsewhere")((!ValueVT.isVector() && "Vector case handled elsewhere"
) ? static_cast<void> (0) : __assert_fail ("!ValueVT.isVector() && \"Vector case handled elsewhere\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 508, __PRETTY_FUNCTION__))
;
509 EVT PartEVT = PartVT;
510 if (PartEVT == ValueVT) {
511 assert(NumParts == 1 && "No-op copy with multiple parts!")((NumParts == 1 && "No-op copy with multiple parts!")
? static_cast<void> (0) : __assert_fail ("NumParts == 1 && \"No-op copy with multiple parts!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 511, __PRETTY_FUNCTION__))
;
512 Parts[0] = Val;
513 return;
514 }
515
516 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
517 // If the parts cover more bits than the value has, promote the value.
518 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
519 assert(NumParts == 1 && "Do not know what to promote to!")((NumParts == 1 && "Do not know what to promote to!")
? static_cast<void> (0) : __assert_fail ("NumParts == 1 && \"Do not know what to promote to!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 519, __PRETTY_FUNCTION__))
;
520 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
521 } else {
522 if (ValueVT.isFloatingPoint()) {
523 // FP values need to be bitcast, then extended if they are being put
524 // into a larger container.
525 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
526 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
527 }
528 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
529 ValueVT.isInteger() &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
530 "Unknown mismatch!")(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
;
531 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
532 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
533 if (PartVT == MVT::x86mmx)
534 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
535 }
536 } else if (PartBits == ValueVT.getSizeInBits()) {
537 // Different types of the same size.
538 assert(NumParts == 1 && PartEVT != ValueVT)((NumParts == 1 && PartEVT != ValueVT) ? static_cast<
void> (0) : __assert_fail ("NumParts == 1 && PartEVT != ValueVT"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 538, __PRETTY_FUNCTION__))
;
539 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
540 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
541 // If the parts cover less bits than value has, truncate the value.
542 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 544, __PRETTY_FUNCTION__))
543 ValueVT.isInteger() &&(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 544, __PRETTY_FUNCTION__))
544 "Unknown mismatch!")(((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT
.isInteger() && "Unknown mismatch!") ? static_cast<
void> (0) : __assert_fail ("(PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && \"Unknown mismatch!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 544, __PRETTY_FUNCTION__))
;
545 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
546 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
547 if (PartVT == MVT::x86mmx)
548 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
549 }
550
551 // The value may have changed - recompute ValueVT.
552 ValueVT = Val.getValueType();
553 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&((NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"
) ? static_cast<void> (0) : __assert_fail ("NumParts * PartBits == ValueVT.getSizeInBits() && \"Failed to tile the value with PartVT!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 554, __PRETTY_FUNCTION__))
554 "Failed to tile the value with PartVT!")((NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"
) ? static_cast<void> (0) : __assert_fail ("NumParts * PartBits == ValueVT.getSizeInBits() && \"Failed to tile the value with PartVT!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 554, __PRETTY_FUNCTION__))
;
555
556 if (NumParts == 1) {
557 if (PartEVT != ValueVT) {
558 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
559 "scalar-to-vector conversion failed");
560 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
561 }
562
563 Parts[0] = Val;
564 return;
565 }
566
567 // Expand the value into multiple parts.
568 if (NumParts & (NumParts - 1)) {
569 // The number of parts is not a power of 2. Split off and copy the tail.
570 assert(PartVT.isInteger() && ValueVT.isInteger() &&((PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!") ? static_cast<void> (
0) : __assert_fail ("PartVT.isInteger() && ValueVT.isInteger() && \"Do not know what to expand to!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 571, __PRETTY_FUNCTION__))
571 "Do not know what to expand to!")((PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!") ? static_cast<void> (
0) : __assert_fail ("PartVT.isInteger() && ValueVT.isInteger() && \"Do not know what to expand to!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 571, __PRETTY_FUNCTION__))
;
572 unsigned RoundParts = 1 << Log2_32(NumParts);
573 unsigned RoundBits = RoundParts * PartBits;
574 unsigned OddParts = NumParts - RoundParts;
575 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
576 DAG.getIntPtrConstant(RoundBits, DL));
577 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
578 CallConv);
579
580 if (DAG.getDataLayout().isBigEndian())
581 // The odd parts were reversed by getCopyToParts - unreverse them.
582 std::reverse(Parts + RoundParts, Parts + NumParts);
583
584 NumParts = RoundParts;
585 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
586 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
587 }
588
589 // The number of parts is a power of 2. Repeatedly bisect the value using
590 // EXTRACT_ELEMENT.
591 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
592 EVT::getIntegerVT(*DAG.getContext(),
593 ValueVT.getSizeInBits()),
594 Val);
595
596 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
597 for (unsigned i = 0; i < NumParts; i += StepSize) {
598 unsigned ThisBits = StepSize * PartBits / 2;
599 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
600 SDValue &Part0 = Parts[i];
601 SDValue &Part1 = Parts[i+StepSize/2];
602
603 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
604 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
605 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
606 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
607
608 if (ThisBits == PartBits && ThisVT != PartVT) {
609 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
610 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
611 }
612 }
613 }
614
615 if (DAG.getDataLayout().isBigEndian())
616 std::reverse(Parts, Parts + OrigNumParts);
617}
618
619static SDValue widenVectorToPartType(SelectionDAG &DAG,
620 SDValue Val, const SDLoc &DL, EVT PartVT) {
621 if (!PartVT.isVector())
622 return SDValue();
623
624 EVT ValueVT = Val.getValueType();
625 unsigned PartNumElts = PartVT.getVectorNumElements();
626 unsigned ValueNumElts = ValueVT.getVectorNumElements();
627 if (PartNumElts > ValueNumElts &&
628 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
629 EVT ElementVT = PartVT.getVectorElementType();
630 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
631 // undef elements.
632 SmallVector<SDValue, 16> Ops;
633 DAG.ExtractVectorElements(Val, Ops);
634 SDValue EltUndef = DAG.getUNDEF(ElementVT);
635 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
636 Ops.push_back(EltUndef);
637
638 // FIXME: Use CONCAT for 2x -> 4x.
639 return DAG.getBuildVector(PartVT, DL, Ops);
640 }
641
642 return SDValue();
643}
644
645/// getCopyToPartsVector - Create a series of nodes that contain the specified
646/// value split into legal parts.
647static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
648 SDValue Val, SDValue *Parts, unsigned NumParts,
649 MVT PartVT, const Value *V,
650 Optional<CallingConv::ID> CallConv) {
651 EVT ValueVT = Val.getValueType();
652 assert(ValueVT.isVector() && "Not a vector")((ValueVT.isVector() && "Not a vector") ? static_cast
<void> (0) : __assert_fail ("ValueVT.isVector() && \"Not a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 652, __PRETTY_FUNCTION__))
;
653 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
654 const bool IsABIRegCopy = CallConv.hasValue();
655
656 if (NumParts == 1) {
657 EVT PartEVT = PartVT;
658 if (PartEVT == ValueVT) {
659 // Nothing to do.
660 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
661 // Bitconvert vector->vector case.
662 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
663 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
664 Val = Widened;
665 } else if (PartVT.isVector() &&
666 PartEVT.getVectorElementType().bitsGE(
667 ValueVT.getVectorElementType()) &&
668 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
669
670 // Promoted vector extract
671 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
672 } else {
673 if (ValueVT.getVectorNumElements() == 1) {
674 Val = DAG.getNode(
675 ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
676 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
677 } else {
678 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&((PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
"lossy conversion of vector to scalar type") ? static_cast<
void> (0) : __assert_fail ("PartVT.getSizeInBits() > ValueVT.getSizeInBits() && \"lossy conversion of vector to scalar type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 679, __PRETTY_FUNCTION__))
679 "lossy conversion of vector to scalar type")((PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
"lossy conversion of vector to scalar type") ? static_cast<
void> (0) : __assert_fail ("PartVT.getSizeInBits() > ValueVT.getSizeInBits() && \"lossy conversion of vector to scalar type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 679, __PRETTY_FUNCTION__))
;
680 EVT IntermediateType =
681 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
682 Val = DAG.getBitcast(IntermediateType, Val);
683 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
684 }
685 }
686
687 assert(Val.getValueType() == PartVT && "Unexpected vector part value type")((Val.getValueType() == PartVT && "Unexpected vector part value type"
) ? static_cast<void> (0) : __assert_fail ("Val.getValueType() == PartVT && \"Unexpected vector part value type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 687, __PRETTY_FUNCTION__))
;
688 Parts[0] = Val;
689 return;
690 }
691
692 // Handle a multi-element vector.
693 EVT IntermediateVT;
694 MVT RegisterVT;
695 unsigned NumIntermediates;
696 unsigned NumRegs;
697 if (IsABIRegCopy) {
698 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
699 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
700 NumIntermediates, RegisterVT);
701 } else {
702 NumRegs =
703 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
704 NumIntermediates, RegisterVT);
705 }
706
707 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!")((NumRegs == NumParts && "Part count doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("NumRegs == NumParts && \"Part count doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 707, __PRETTY_FUNCTION__))
;
708 NumParts = NumRegs; // Silence a compiler warning.
709 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!")((RegisterVT == PartVT && "Part type doesn't match vector breakdown!"
) ? static_cast<void> (0) : __assert_fail ("RegisterVT == PartVT && \"Part type doesn't match vector breakdown!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 709, __PRETTY_FUNCTION__))
;
710
711 unsigned IntermediateNumElts = IntermediateVT.isVector() ?
712 IntermediateVT.getVectorNumElements() : 1;
713
714 // Convert the vector to the appropiate type if necessary.
715 unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
716
717 EVT BuiltVectorTy = EVT::getVectorVT(
718 *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
719 MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
720 if (ValueVT != BuiltVectorTy) {
721 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
722 Val = Widened;
723
724 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
725 }
726
727 // Split the vector into intermediate operands.
728 SmallVector<SDValue, 8> Ops(NumIntermediates);
729 for (unsigned i = 0; i != NumIntermediates; ++i) {
730 if (IntermediateVT.isVector()) {
731 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
732 DAG.getConstant(i * IntermediateNumElts, DL, IdxVT));
733 } else {
734 Ops[i] = DAG.getNode(
735 ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
736 DAG.getConstant(i, DL, IdxVT));
737 }
738 }
739
740 // Split the intermediate operands into legal parts.
741 if (NumParts == NumIntermediates) {
742 // If the register was not expanded, promote or copy the value,
743 // as appropriate.
744 for (unsigned i = 0; i != NumParts; ++i)
745 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
746 } else if (NumParts > 0) {
747 // If the intermediate type was expanded, split each the value into
748 // legal parts.
749 assert(NumIntermediates != 0 && "division by zero")((NumIntermediates != 0 && "division by zero") ? static_cast
<void> (0) : __assert_fail ("NumIntermediates != 0 && \"division by zero\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 749, __PRETTY_FUNCTION__))
;
750 assert(NumParts % NumIntermediates == 0 &&((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 751, __PRETTY_FUNCTION__))
751 "Must expand into a divisible number of parts!")((NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"
) ? static_cast<void> (0) : __assert_fail ("NumParts % NumIntermediates == 0 && \"Must expand into a divisible number of parts!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 751, __PRETTY_FUNCTION__))
;
752 unsigned Factor = NumParts / NumIntermediates;
753 for (unsigned i = 0; i != NumIntermediates; ++i)
754 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
755 CallConv);
756 }
757}
758
759RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
760 EVT valuevt, Optional<CallingConv::ID> CC)
761 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
762 RegCount(1, regs.size()), CallConv(CC) {}
763
764RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
765 const DataLayout &DL, unsigned Reg, Type *Ty,
766 Optional<CallingConv::ID> CC) {
767 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
768
769 CallConv = CC;
770
771 for (EVT ValueVT : ValueVTs) {
772 unsigned NumRegs =
773 isABIMangled()
774 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
775 : TLI.getNumRegisters(Context, ValueVT);
776 MVT RegisterVT =
777 isABIMangled()
778 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
779 : TLI.getRegisterType(Context, ValueVT);
780 for (unsigned i = 0; i != NumRegs; ++i)
781 Regs.push_back(Reg + i);
782 RegVTs.push_back(RegisterVT);
783 RegCount.push_back(NumRegs);
784 Reg += NumRegs;
785 }
786}
787
788SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
789 FunctionLoweringInfo &FuncInfo,
790 const SDLoc &dl, SDValue &Chain,
791 SDValue *Flag, const Value *V) const {
792 // A Value with type {} or [0 x %t] needs no registers.
793 if (ValueVTs.empty())
794 return SDValue();
795
796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
797
798 // Assemble the legal parts into the final values.
799 SmallVector<SDValue, 4> Values(ValueVTs.size());
800 SmallVector<SDValue, 8> Parts;
801 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
802 // Copy the legal parts from the registers.
803 EVT ValueVT = ValueVTs[Value];
804 unsigned NumRegs = RegCount[Value];
805 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
806 *DAG.getContext(),
807 CallConv.getValue(), RegVTs[Value])
808 : RegVTs[Value];
809
810 Parts.resize(NumRegs);
811 for (unsigned i = 0; i != NumRegs; ++i) {
812 SDValue P;
813 if (!Flag) {
814 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
815 } else {
816 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
817 *Flag = P.getValue(2);
818 }
819
820 Chain = P.getValue(1);
821 Parts[i] = P;
822
823 // If the source register was virtual and if we know something about it,
824 // add an assert node.
825 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
826 !RegisterVT.isInteger() || RegisterVT.isVector())
827 continue;
828
829 const FunctionLoweringInfo::LiveOutInfo *LOI =
830 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
831 if (!LOI)
832 continue;
833
834 unsigned RegSize = RegisterVT.getSizeInBits();
835 unsigned NumSignBits = LOI->NumSignBits;
836 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
837
838 if (NumZeroBits == RegSize) {
839 // The current value is a zero.
840 // Explicitly express that as it would be easier for
841 // optimizations to kick in.
842 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
843 continue;
844 }
845
846 // FIXME: We capture more information than the dag can represent. For
847 // now, just use the tightest assertzext/assertsext possible.
848 bool isSExt;
849 EVT FromVT(MVT::Other);
850 if (NumZeroBits) {
851 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
852 isSExt = false;
853 } else if (NumSignBits > 1) {
854 FromVT =
855 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
856 isSExt = true;
857 } else {
858 continue;
859 }
860 // Add an assertion node.
861 assert(FromVT != MVT::Other)((FromVT != MVT::Other) ? static_cast<void> (0) : __assert_fail
("FromVT != MVT::Other", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 861, __PRETTY_FUNCTION__))
;
862 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
863 RegisterVT, P, DAG.getValueType(FromVT));
864 }
865
866 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
867 RegisterVT, ValueVT, V, CallConv);
868 Part += NumRegs;
869 Parts.clear();
870 }
871
872 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
873}
874
875void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
876 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
877 const Value *V,
878 ISD::NodeType PreferredExtendType) const {
879 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
880 ISD::NodeType ExtendKind = PreferredExtendType;
881
882 // Get the list of the values's legal parts.
883 unsigned NumRegs = Regs.size();
884 SmallVector<SDValue, 8> Parts(NumRegs);
885 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
886 unsigned NumParts = RegCount[Value];
887
888 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
889 *DAG.getContext(),
890 CallConv.getValue(), RegVTs[Value])
891 : RegVTs[Value];
892
893 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
894 ExtendKind = ISD::ZERO_EXTEND;
895
896 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
897 NumParts, RegisterVT, V, CallConv, ExtendKind);
898 Part += NumParts;
899 }
900
901 // Copy the parts into the registers.
902 SmallVector<SDValue, 8> Chains(NumRegs);
903 for (unsigned i = 0; i != NumRegs; ++i) {
904 SDValue Part;
905 if (!Flag) {
906 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
907 } else {
908 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
909 *Flag = Part.getValue(1);
910 }
911
912 Chains[i] = Part.getValue(0);
913 }
914
915 if (NumRegs == 1 || Flag)
916 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
917 // flagged to it. That is the CopyToReg nodes and the user are considered
918 // a single scheduling unit. If we create a TokenFactor and return it as
919 // chain, then the TokenFactor is both a predecessor (operand) of the
920 // user as well as a successor (the TF operands are flagged to the user).
921 // c1, f1 = CopyToReg
922 // c2, f2 = CopyToReg
923 // c3 = TokenFactor c1, c2
924 // ...
925 // = op c3, ..., f2
926 Chain = Chains[NumRegs-1];
927 else
928 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
929}
930
931void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
932 unsigned MatchingIdx, const SDLoc &dl,
933 SelectionDAG &DAG,
934 std::vector<SDValue> &Ops) const {
935 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
936
937 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
938 if (HasMatching)
939 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
940 else if (!Regs.empty() &&
941 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
942 // Put the register class of the virtual registers in the flag word. That
943 // way, later passes can recompute register class constraints for inline
944 // assembly as well as normal instructions.
945 // Don't do this for tied operands that can use the regclass information
946 // from the def.
947 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
948 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
949 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
950 }
951
952 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
953 Ops.push_back(Res);
954
955 if (Code == InlineAsm::Kind_Clobber) {
956 // Clobbers should always have a 1:1 mapping with registers, and may
957 // reference registers that have illegal (e.g. vector) types. Hence, we
958 // shouldn't try to apply any sort of splitting logic to them.
959 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&((Regs.size() == RegVTs.size() && Regs.size() == ValueVTs
.size() && "No 1:1 mapping from clobbers to regs?") ?
static_cast<void> (0) : __assert_fail ("Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && \"No 1:1 mapping from clobbers to regs?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 960, __PRETTY_FUNCTION__))
960 "No 1:1 mapping from clobbers to regs?")((Regs.size() == RegVTs.size() && Regs.size() == ValueVTs
.size() && "No 1:1 mapping from clobbers to regs?") ?
static_cast<void> (0) : __assert_fail ("Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && \"No 1:1 mapping from clobbers to regs?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 960, __PRETTY_FUNCTION__))
;
961 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
962 (void)SP;
963 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
964 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
965 assert((((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 968, __PRETTY_FUNCTION__))
966 (Regs[I] != SP ||(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 968, __PRETTY_FUNCTION__))
967 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 968, __PRETTY_FUNCTION__))
968 "If we clobbered the stack pointer, MFI should know about it.")(((Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment
()) && "If we clobbered the stack pointer, MFI should know about it."
) ? static_cast<void> (0) : __assert_fail ("(Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && \"If we clobbered the stack pointer, MFI should know about it.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 968, __PRETTY_FUNCTION__))
;
969 }
970 return;
971 }
972
973 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
974 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
975 MVT RegisterVT = RegVTs[Value];
976 for (unsigned i = 0; i != NumRegs; ++i) {
977 assert(Reg < Regs.size() && "Mismatch in # registers expected")((Reg < Regs.size() && "Mismatch in # registers expected"
) ? static_cast<void> (0) : __assert_fail ("Reg < Regs.size() && \"Mismatch in # registers expected\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 977, __PRETTY_FUNCTION__))
;
978 unsigned TheReg = Regs[Reg++];
979 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
980 }
981 }
982}
983
984SmallVector<std::pair<unsigned, unsigned>, 4>
985RegsForValue::getRegsAndSizes() const {
986 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
987 unsigned I = 0;
988 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
989 unsigned RegCount = std::get<0>(CountAndVT);
990 MVT RegisterVT = std::get<1>(CountAndVT);
991 unsigned RegisterSize = RegisterVT.getSizeInBits();
992 for (unsigned E = I + RegCount; I != E; ++I)
993 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
994 }
995 return OutVec;
996}
997
998void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
999 const TargetLibraryInfo *li) {
1000 AA = aa;
1001 GFI = gfi;
1002 LibInfo = li;
1003 DL = &DAG.getDataLayout();
1004 Context = DAG.getContext();
1005 LPadToCallSiteMap.clear();
1006}
1007
1008void SelectionDAGBuilder::clear() {
1009 NodeMap.clear();
1010 UnusedArgNodeMap.clear();
1011 PendingLoads.clear();
1012 PendingExports.clear();
1013 CurInst = nullptr;
1014 HasTailCall = false;
1015 SDNodeOrder = LowestSDNodeOrder;
1016 StatepointLowering.clear();
1017}
1018
1019void SelectionDAGBuilder::clearDanglingDebugInfo() {
1020 DanglingDebugInfoMap.clear();
1021}
1022
1023SDValue SelectionDAGBuilder::getRoot() {
1024 if (PendingLoads.empty())
1025 return DAG.getRoot();
1026
1027 if (PendingLoads.size() == 1) {
1028 SDValue Root = PendingLoads[0];
1029 DAG.setRoot(Root);
1030 PendingLoads.clear();
1031 return Root;
1032 }
1033
1034 // Otherwise, we have to make a token factor node.
1035 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1036 PendingLoads);
1037 PendingLoads.clear();
1038 DAG.setRoot(Root);
1039 return Root;
1040}
1041
1042SDValue SelectionDAGBuilder::getControlRoot() {
1043 SDValue Root = DAG.getRoot();
1044
1045 if (PendingExports.empty())
1046 return Root;
1047
1048 // Turn all of the CopyToReg chains into one factored node.
1049 if (Root.getOpcode() != ISD::EntryToken) {
1050 unsigned i = 0, e = PendingExports.size();
1051 for (; i != e; ++i) {
1052 assert(PendingExports[i].getNode()->getNumOperands() > 1)((PendingExports[i].getNode()->getNumOperands() > 1) ? static_cast
<void> (0) : __assert_fail ("PendingExports[i].getNode()->getNumOperands() > 1"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1052, __PRETTY_FUNCTION__))
;
1053 if (PendingExports[i].getNode()->getOperand(0) == Root)
1054 break; // Don't add the root if we already indirectly depend on it.
1055 }
1056
1057 if (i == e)
1058 PendingExports.push_back(Root);
1059 }
1060
1061 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1062 PendingExports);
1063 PendingExports.clear();
1064 DAG.setRoot(Root);
1065 return Root;
1066}
1067
1068void SelectionDAGBuilder::visit(const Instruction &I) {
1069 // Set up outgoing PHI node register values before emitting the terminator.
1070 if (I.isTerminator()) {
1
Taking true branch
1071 HandlePHINodesInSuccessorBlocks(I.getParent());
2
Calling 'SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks'
1072 }
1073
1074 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1075 if (!isa<DbgInfoIntrinsic>(I))
1076 ++SDNodeOrder;
1077
1078 CurInst = &I;
1079
1080 visit(I.getOpcode(), I);
1081
1082 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1083 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1084 // maps to this instruction.
1085 // TODO: We could handle all flags (nsw, etc) here.
1086 // TODO: If an IR instruction maps to >1 node, only the final node will have
1087 // flags set.
1088 if (SDNode *Node = getNodeForIRValue(&I)) {
1089 SDNodeFlags IncomingFlags;
1090 IncomingFlags.copyFMF(*FPMO);
1091 if (!Node->getFlags().isDefined())
1092 Node->setFlags(IncomingFlags);
1093 else
1094 Node->intersectFlagsWith(IncomingFlags);
1095 }
1096 }
1097
1098 if (!I.isTerminator() && !HasTailCall &&
1099 !isStatepoint(&I)) // statepoints handle their exports internally
1100 CopyToExportRegsIfNeeded(&I);
1101
1102 CurInst = nullptr;
1103}
1104
1105void SelectionDAGBuilder::visitPHI(const PHINode &) {
1106 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!")::llvm::llvm_unreachable_internal("SelectionDAGBuilder shouldn't visit PHI nodes!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1106)
;
1107}
1108
1109void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1110 // Note: this doesn't use InstVisitor, because it has to work with
1111 // ConstantExpr's in addition to instructions.
1112 switch (Opcode) {
1113 default: llvm_unreachable("Unknown instruction type encountered!")::llvm::llvm_unreachable_internal("Unknown instruction type encountered!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1113)
;
1114 // Build the switch statement using the Instruction.def file.
1115#define HANDLE_INST(NUM, OPCODE, CLASS) \
1116 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1117#include "llvm/IR/Instruction.def"
1118 }
1119}
1120
1121void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1122 const DIExpression *Expr) {
1123 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1124 const DbgValueInst *DI = DDI.getDI();
1125 DIVariable *DanglingVariable = DI->getVariable();
1126 DIExpression *DanglingExpr = DI->getExpression();
1127 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1128 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping dangling debug info for "
<< *DI << "\n"; } } while (false)
;
1129 return true;
1130 }
1131 return false;
1132 };
1133
1134 for (auto &DDIMI : DanglingDebugInfoMap) {
1135 DanglingDebugInfoVector &DDIV = DDIMI.second;
1136 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1137 }
1138}
1139
1140// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1141// generate the debug data structures now that we've seen its definition.
1142void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1143 SDValue Val) {
1144 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1145 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1146 return;
1147
1148 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1149 for (auto &DDI : DDIV) {
1150 const DbgValueInst *DI = DDI.getDI();
1151 assert(DI && "Ill-formed DanglingDebugInfo")((DI && "Ill-formed DanglingDebugInfo") ? static_cast
<void> (0) : __assert_fail ("DI && \"Ill-formed DanglingDebugInfo\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1151, __PRETTY_FUNCTION__))
;
1152 DebugLoc dl = DDI.getdl();
1153 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1154 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1155 DILocalVariable *Variable = DI->getVariable();
1156 DIExpression *Expr = DI->getExpression();
1157 assert(Variable->isValidLocationForIntrinsic(dl) &&((Variable->isValidLocationForIntrinsic(dl) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(dl) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1158, __PRETTY_FUNCTION__))
1158 "Expected inlined-at fields to agree")((Variable->isValidLocationForIntrinsic(dl) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(dl) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1158, __PRETTY_FUNCTION__))
;
1159 SDDbgValue *SDV;
1160 if (Val.getNode()) {
1161 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1162 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolve dangling debug info [order="
<< DbgSDNodeOrder << "] for:\n " << *DI <<
"\n"; } } while (false)
1163 << DbgSDNodeOrder << "] for:\n " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolve dangling debug info [order="
<< DbgSDNodeOrder << "] for:\n " << *DI <<
"\n"; } } while (false)
;
1164 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << " By mapping to:\n "; Val.dump
(); } } while (false)
;
1165 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1166 // inserted after the definition of Val when emitting the instructions
1167 // after ISel. An alternative could be to teach
1168 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1169 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
1170 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
1171 << ValSDNodeOrder << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to " << ValSDNodeOrder << "\n"; } } while (false
)
;
1172 SDV = getDbgValue(Val, Variable, Expr, dl,
1173 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1174 DAG.AddDbgValue(SDV, Val.getNode(), false);
1175 } else
1176 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolved dangling debug info for "
<< *DI << "in EmitFuncArgumentDbgValue\n"; } } while
(false)
1177 << "in EmitFuncArgumentDbgValue\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Resolved dangling debug info for "
<< *DI << "in EmitFuncArgumentDbgValue\n"; } } while
(false)
;
1178 } else
1179 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug info for " <<
*DI << "\n"; } } while (false)
;
1180 }
1181 DDIV.clear();
1182}
1183
1184/// getCopyFromRegs - If there was virtual register allocated for the value V
1185/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1186SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1187 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1188 SDValue Result;
1189
1190 if (It != FuncInfo.ValueMap.end()) {
1191 unsigned InReg = It->second;
1192
1193 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1194 DAG.getDataLayout(), InReg, Ty,
1195 None); // This is not an ABI copy.
1196 SDValue Chain = DAG.getEntryNode();
1197 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1198 V);
1199 resolveDanglingDebugInfo(V, Result);
1200 }
1201
1202 return Result;
1203}
1204
1205/// getValue - Return an SDValue for the given Value.
1206SDValue SelectionDAGBuilder::getValue(const Value *V) {
1207 // If we already have an SDValue for this value, use it. It's important
1208 // to do this first, so that we don't create a CopyFromReg if we already
1209 // have a regular SDValue.
1210 SDValue &N = NodeMap[V];
1211 if (N.getNode()) return N;
1212
1213 // If there's a virtual register allocated and initialized for this
1214 // value, use it.
1215 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1216 return copyFromReg;
1217
1218 // Otherwise create a new SDValue and remember it.
1219 SDValue Val = getValueImpl(V);
1220 NodeMap[V] = Val;
1221 resolveDanglingDebugInfo(V, Val);
1222 return Val;
1223}
1224
1225// Return true if SDValue exists for the given Value
1226bool SelectionDAGBuilder::findValue(const Value *V) const {
1227 return (NodeMap.find(V) != NodeMap.end()) ||
1228 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1229}
1230
1231/// getNonRegisterValue - Return an SDValue for the given Value, but
1232/// don't look in FuncInfo.ValueMap for a virtual register.
1233SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1234 // If we already have an SDValue for this value, use it.
1235 SDValue &N = NodeMap[V];
1236 if (N.getNode()) {
16
Assuming the condition is false
17
Taking false branch
1237 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1238 // Remove the debug location from the node as the node is about to be used
1239 // in a location which may differ from the original debug location. This
1240 // is relevant to Constant and ConstantFP nodes because they can appear
1241 // as constant expressions inside PHI nodes.
1242 N->setDebugLoc(DebugLoc());
1243 }
1244 return N;
1245 }
1246
1247 // Otherwise create a new SDValue and remember it.
1248 SDValue Val = getValueImpl(V);
1249 NodeMap[V] = Val;
1250 resolveDanglingDebugInfo(V, Val);
1251 return Val;
18
Null pointer value stored to 'Op.Node'
1252}
1253
1254/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1255/// Create an SDValue for the given value.
1256SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1257 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1258
1259 if (const Constant *C = dyn_cast<Constant>(V)) {
1260 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1261
1262 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1263 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1264
1265 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1266 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1267
1268 if (isa<ConstantPointerNull>(C)) {
1269 unsigned AS = V->getType()->getPointerAddressSpace();
1270 return DAG.getConstant(0, getCurSDLoc(),
1271 TLI.getPointerTy(DAG.getDataLayout(), AS));
1272 }
1273
1274 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1275 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1276
1277 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1278 return DAG.getUNDEF(VT);
1279
1280 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1281 visit(CE->getOpcode(), *CE);
1282 SDValue N1 = NodeMap[V];
1283 assert(N1.getNode() && "visit didn't populate the NodeMap!")((N1.getNode() && "visit didn't populate the NodeMap!"
) ? static_cast<void> (0) : __assert_fail ("N1.getNode() && \"visit didn't populate the NodeMap!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1283, __PRETTY_FUNCTION__))
;
1284 return N1;
1285 }
1286
1287 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1288 SmallVector<SDValue, 4> Constants;
1289 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1290 OI != OE; ++OI) {
1291 SDNode *Val = getValue(*OI).getNode();
1292 // If the operand is an empty aggregate, there are no values.
1293 if (!Val) continue;
1294 // Add each leaf value from the operand to the Constants list
1295 // to form a flattened list of all the values.
1296 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1297 Constants.push_back(SDValue(Val, i));
1298 }
1299
1300 return DAG.getMergeValues(Constants, getCurSDLoc());
1301 }
1302
1303 if (const ConstantDataSequential *CDS =
1304 dyn_cast<ConstantDataSequential>(C)) {
1305 SmallVector<SDValue, 4> Ops;
1306 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1307 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1308 // Add each leaf value from the operand to the Constants list
1309 // to form a flattened list of all the values.
1310 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1311 Ops.push_back(SDValue(Val, i));
1312 }
1313
1314 if (isa<ArrayType>(CDS->getType()))
1315 return DAG.getMergeValues(Ops, getCurSDLoc());
1316 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1317 }
1318
1319 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1320 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&(((isa<ConstantAggregateZero>(C) || isa<UndefValue>
(C)) && "Unknown struct or array constant!") ? static_cast
<void> (0) : __assert_fail ("(isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && \"Unknown struct or array constant!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1321, __PRETTY_FUNCTION__))
1321 "Unknown struct or array constant!")(((isa<ConstantAggregateZero>(C) || isa<UndefValue>
(C)) && "Unknown struct or array constant!") ? static_cast
<void> (0) : __assert_fail ("(isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && \"Unknown struct or array constant!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1321, __PRETTY_FUNCTION__))
;
1322
1323 SmallVector<EVT, 4> ValueVTs;
1324 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1325 unsigned NumElts = ValueVTs.size();
1326 if (NumElts == 0)
1327 return SDValue(); // empty struct
1328 SmallVector<SDValue, 4> Constants(NumElts);
1329 for (unsigned i = 0; i != NumElts; ++i) {
1330 EVT EltVT = ValueVTs[i];
1331 if (isa<UndefValue>(C))
1332 Constants[i] = DAG.getUNDEF(EltVT);
1333 else if (EltVT.isFloatingPoint())
1334 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1335 else
1336 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1337 }
1338
1339 return DAG.getMergeValues(Constants, getCurSDLoc());
1340 }
1341
1342 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1343 return DAG.getBlockAddress(BA, VT);
1344
1345 VectorType *VecTy = cast<VectorType>(V->getType());
1346 unsigned NumElements = VecTy->getNumElements();
1347
1348 // Now that we know the number and type of the elements, get that number of
1349 // elements into the Ops array based on what kind of constant it is.
1350 SmallVector<SDValue, 16> Ops;
1351 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1352 for (unsigned i = 0; i != NumElements; ++i)
1353 Ops.push_back(getValue(CV->getOperand(i)));
1354 } else {
1355 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!")((isa<ConstantAggregateZero>(C) && "Unknown vector constant!"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantAggregateZero>(C) && \"Unknown vector constant!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1355, __PRETTY_FUNCTION__))
;
1356 EVT EltVT =
1357 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1358
1359 SDValue Op;
1360 if (EltVT.isFloatingPoint())
1361 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1362 else
1363 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1364 Ops.assign(NumElements, Op);
1365 }
1366
1367 // Create a BUILD_VECTOR node.
1368 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1369 }
1370
1371 // If this is a static alloca, generate it as the frameindex instead of
1372 // computation.
1373 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1374 DenseMap<const AllocaInst*, int>::iterator SI =
1375 FuncInfo.StaticAllocaMap.find(AI);
1376 if (SI != FuncInfo.StaticAllocaMap.end())
1377 return DAG.getFrameIndex(SI->second,
1378 TLI.getFrameIndexTy(DAG.getDataLayout()));
1379 }
1380
1381 // If this is an instruction which fast-isel has deferred, select it now.
1382 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1383 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1384
1385 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1386 Inst->getType(), getABIRegCopyCC(V));
1387 SDValue Chain = DAG.getEntryNode();
1388 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1389 }
1390
1391 llvm_unreachable("Can't get register for value!")::llvm::llvm_unreachable_internal("Can't get register for value!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1391)
;
1392}
1393
1394void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1395 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1396 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1397 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1398 bool IsSEH = isAsynchronousEHPersonality(Pers);
1399 bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1400 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1401 if (!IsSEH)
1402 CatchPadMBB->setIsEHScopeEntry();
1403 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1404 if (IsMSVCCXX || IsCoreCLR)
1405 CatchPadMBB->setIsEHFuncletEntry();
1406 // Wasm does not need catchpads anymore
1407 if (!IsWasmCXX)
1408 DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1409 getControlRoot()));
1410}
1411
1412void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1413 // Update machine-CFG edge.
1414 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1415 FuncInfo.MBB->addSuccessor(TargetMBB);
1416
1417 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1418 bool IsSEH = isAsynchronousEHPersonality(Pers);
1419 if (IsSEH) {
1420 // If this is not a fall-through branch or optimizations are switched off,
1421 // emit the branch.
1422 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1423 TM.getOptLevel() == CodeGenOpt::None)
1424 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1425 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1426 return;
1427 }
1428
1429 // Figure out the funclet membership for the catchret's successor.
1430 // This will be used by the FuncletLayout pass to determine how to order the
1431 // BB's.
1432 // A 'catchret' returns to the outer scope's color.
1433 Value *ParentPad = I.getCatchSwitchParentPad();
1434 const BasicBlock *SuccessorColor;
1435 if (isa<ConstantTokenNone>(ParentPad))
1436 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1437 else
1438 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1439 assert(SuccessorColor && "No parent funclet for catchret!")((SuccessorColor && "No parent funclet for catchret!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorColor && \"No parent funclet for catchret!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1439, __PRETTY_FUNCTION__))
;
1440 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1441 assert(SuccessorColorMBB && "No MBB for SuccessorColor!")((SuccessorColorMBB && "No MBB for SuccessorColor!") ?
static_cast<void> (0) : __assert_fail ("SuccessorColorMBB && \"No MBB for SuccessorColor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1441, __PRETTY_FUNCTION__))
;
1442
1443 // Create the terminator node.
1444 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1445 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1446 DAG.getBasicBlock(SuccessorColorMBB));
1447 DAG.setRoot(Ret);
1448}
1449
1450void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1451 // Don't emit any special code for the cleanuppad instruction. It just marks
1452 // the start of an EH scope/funclet.
1453 FuncInfo.MBB->setIsEHScopeEntry();
1454 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1455 if (Pers != EHPersonality::Wasm_CXX) {
1456 FuncInfo.MBB->setIsEHFuncletEntry();
1457 FuncInfo.MBB->setIsCleanupFuncletEntry();
1458 }
1459}
1460
1461/// When an invoke or a cleanupret unwinds to the next EH pad, there are
1462/// many places it could ultimately go. In the IR, we have a single unwind
1463/// destination, but in the machine CFG, we enumerate all the possible blocks.
1464/// This function skips over imaginary basic blocks that hold catchswitch
1465/// instructions, and finds all the "real" machine
1466/// basic block destinations. As those destinations may not be successors of
1467/// EHPadBB, here we also calculate the edge probability to those destinations.
1468/// The passed-in Prob is the edge probability to EHPadBB.
1469static void findUnwindDestinations(
1470 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1471 BranchProbability Prob,
1472 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1473 &UnwindDests) {
1474 EHPersonality Personality =
1475 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1476 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1477 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1478 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1479 bool IsSEH = isAsynchronousEHPersonality(Personality);
1480
1481 while (EHPadBB) {
1482 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1483 BasicBlock *NewEHPadBB = nullptr;
1484 if (isa<LandingPadInst>(Pad)) {
1485 // Stop on landingpads. They are not funclets.
1486 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1487 break;
1488 } else if (isa<CleanupPadInst>(Pad)) {
1489 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1490 // personalities.
1491 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1492 UnwindDests.back().first->setIsEHScopeEntry();
1493 if (!IsWasmCXX)
1494 UnwindDests.back().first->setIsEHFuncletEntry();
1495 break;
1496 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1497 // Add the catchpad handlers to the possible destinations.
1498 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1499 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1500 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1501 if (IsMSVCCXX || IsCoreCLR)
1502 UnwindDests.back().first->setIsEHFuncletEntry();
1503 if (!IsSEH)
1504 UnwindDests.back().first->setIsEHScopeEntry();
1505 }
1506 NewEHPadBB = CatchSwitch->getUnwindDest();
1507 } else {
1508 continue;
1509 }
1510
1511 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1512 if (BPI && NewEHPadBB)
1513 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1514 EHPadBB = NewEHPadBB;
1515 }
1516}
1517
1518void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1519 // Update successor info.
1520 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1521 auto UnwindDest = I.getUnwindDest();
1522 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1523 BranchProbability UnwindDestProb =
1524 (BPI && UnwindDest)
1525 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1526 : BranchProbability::getZero();
1527 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1528 for (auto &UnwindDest : UnwindDests) {
1529 UnwindDest.first->setIsEHPad();
1530 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1531 }
1532 FuncInfo.MBB->normalizeSuccProbs();
1533
1534 // Create the terminator node.
1535 SDValue Ret =
1536 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1537 DAG.setRoot(Ret);
1538}
1539
1540void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1541 report_fatal_error("visitCatchSwitch not yet implemented!");
1542}
1543
1544void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1545 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1546 auto &DL = DAG.getDataLayout();
1547 SDValue Chain = getControlRoot();
1548 SmallVector<ISD::OutputArg, 8> Outs;
1549 SmallVector<SDValue, 8> OutVals;
1550
1551 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1552 // lower
1553 //
1554 // %val = call <ty> @llvm.experimental.deoptimize()
1555 // ret <ty> %val
1556 //
1557 // differently.
1558 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1559 LowerDeoptimizingReturn();
1560 return;
1561 }
1562
1563 if (!FuncInfo.CanLowerReturn) {
1564 unsigned DemoteReg = FuncInfo.DemoteRegister;
1565 const Function *F = I.getParent()->getParent();
1566
1567 // Emit a store of the return value through the virtual register.
1568 // Leave Outs empty so that LowerReturn won't try to load return
1569 // registers the usual way.
1570 SmallVector<EVT, 1> PtrValueVTs;
1571 ComputeValueVTs(TLI, DL,
1572 F->getReturnType()->getPointerTo(
1573 DAG.getDataLayout().getAllocaAddrSpace()),
1574 PtrValueVTs);
1575
1576 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1577 DemoteReg, PtrValueVTs[0]);
1578 SDValue RetOp = getValue(I.getOperand(0));
1579
1580 SmallVector<EVT, 4> ValueVTs;
1581 SmallVector<uint64_t, 4> Offsets;
1582 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1583 unsigned NumValues = ValueVTs.size();
1584
1585 SmallVector<SDValue, 4> Chains(NumValues);
1586 for (unsigned i = 0; i != NumValues; ++i) {
1587 // An aggregate return value cannot wrap around the address space, so
1588 // offsets to its parts don't wrap either.
1589 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1590 Chains[i] = DAG.getStore(
1591 Chain, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1592 // FIXME: better loc info would be nice.
1593 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1594 }
1595
1596 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1597 MVT::Other, Chains);
1598 } else if (I.getNumOperands() != 0) {
1599 SmallVector<EVT, 4> ValueVTs;
1600 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1601 unsigned NumValues = ValueVTs.size();
1602 if (NumValues) {
1603 SDValue RetOp = getValue(I.getOperand(0));
1604
1605 const Function *F = I.getParent()->getParent();
1606
1607 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1608 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1609 Attribute::SExt))
1610 ExtendKind = ISD::SIGN_EXTEND;
1611 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1612 Attribute::ZExt))
1613 ExtendKind = ISD::ZERO_EXTEND;
1614
1615 LLVMContext &Context = F->getContext();
1616 bool RetInReg = F->getAttributes().hasAttribute(
1617 AttributeList::ReturnIndex, Attribute::InReg);
1618
1619 for (unsigned j = 0; j != NumValues; ++j) {
1620 EVT VT = ValueVTs[j];
1621
1622 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1623 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1624
1625 CallingConv::ID CC = F->getCallingConv();
1626
1627 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1628 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1629 SmallVector<SDValue, 4> Parts(NumParts);
1630 getCopyToParts(DAG, getCurSDLoc(),
1631 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1632 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1633
1634 // 'inreg' on function refers to return value
1635 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1636 if (RetInReg)
1637 Flags.setInReg();
1638
1639 // Propagate extension type if any
1640 if (ExtendKind == ISD::SIGN_EXTEND)
1641 Flags.setSExt();
1642 else if (ExtendKind == ISD::ZERO_EXTEND)
1643 Flags.setZExt();
1644
1645 for (unsigned i = 0; i < NumParts; ++i) {
1646 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1647 VT, /*isfixed=*/true, 0, 0));
1648 OutVals.push_back(Parts[i]);
1649 }
1650 }
1651 }
1652 }
1653
1654 // Push in swifterror virtual register as the last element of Outs. This makes
1655 // sure swifterror virtual register will be returned in the swifterror
1656 // physical register.
1657 const Function *F = I.getParent()->getParent();
1658 if (TLI.supportSwiftError() &&
1659 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1660 assert(FuncInfo.SwiftErrorArg && "Need a swift error argument")((FuncInfo.SwiftErrorArg && "Need a swift error argument"
) ? static_cast<void> (0) : __assert_fail ("FuncInfo.SwiftErrorArg && \"Need a swift error argument\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1660, __PRETTY_FUNCTION__))
;
1661 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1662 Flags.setSwiftError();
1663 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1664 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1665 true /*isfixed*/, 1 /*origidx*/,
1666 0 /*partOffs*/));
1667 // Create SDNode for the swifterror virtual register.
1668 OutVals.push_back(
1669 DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1670 &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1671 EVT(TLI.getPointerTy(DL))));
1672 }
1673
1674 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1675 CallingConv::ID CallConv =
1676 DAG.getMachineFunction().getFunction().getCallingConv();
1677 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1678 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1679
1680 // Verify that the target's LowerReturn behaved as expected.
1681 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&((Chain.getNode() && Chain.getValueType() == MVT::Other
&& "LowerReturn didn't return a valid chain!") ? static_cast
<void> (0) : __assert_fail ("Chain.getNode() && Chain.getValueType() == MVT::Other && \"LowerReturn didn't return a valid chain!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1682, __PRETTY_FUNCTION__))
1682 "LowerReturn didn't return a valid chain!")((Chain.getNode() && Chain.getValueType() == MVT::Other
&& "LowerReturn didn't return a valid chain!") ? static_cast
<void> (0) : __assert_fail ("Chain.getNode() && Chain.getValueType() == MVT::Other && \"LowerReturn didn't return a valid chain!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1682, __PRETTY_FUNCTION__))
;
1683
1684 // Update the DAG with the new chain value resulting from return lowering.
1685 DAG.setRoot(Chain);
1686}
1687
1688/// CopyToExportRegsIfNeeded - If the given value has virtual registers
1689/// created for it, emit nodes to copy the value into the virtual
1690/// registers.
1691void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1692 // Skip empty types
1693 if (V->getType()->isEmptyTy())
1694 return;
1695
1696 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1697 if (VMI != FuncInfo.ValueMap.end()) {
1698 assert(!V->use_empty() && "Unused value assigned virtual registers!")((!V->use_empty() && "Unused value assigned virtual registers!"
) ? static_cast<void> (0) : __assert_fail ("!V->use_empty() && \"Unused value assigned virtual registers!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1698, __PRETTY_FUNCTION__))
;
1699 CopyValueToVirtualRegister(V, VMI->second);
1700 }
1701}
1702
1703/// ExportFromCurrentBlock - If this condition isn't known to be exported from
1704/// the current basic block, add it to ValueMap now so that we'll get a
1705/// CopyTo/FromReg.
1706void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1707 // No need to export constants.
1708 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1709
1710 // Already exported?
1711 if (FuncInfo.isExportedInst(V)) return;
1712
1713 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1714 CopyValueToVirtualRegister(V, Reg);
1715}
1716
1717bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1718 const BasicBlock *FromBB) {
1719 // The operands of the setcc have to be in this block. We don't know
1720 // how to export them from some other block.
1721 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1722 // Can export from current BB.
1723 if (VI->getParent() == FromBB)
1724 return true;
1725
1726 // Is already exported, noop.
1727 return FuncInfo.isExportedInst(V);
1728 }
1729
1730 // If this is an argument, we can export it if the BB is the entry block or
1731 // if it is already exported.
1732 if (isa<Argument>(V)) {
1733 if (FromBB == &FromBB->getParent()->getEntryBlock())
1734 return true;
1735
1736 // Otherwise, can only export this if it is already exported.
1737 return FuncInfo.isExportedInst(V);
1738 }
1739
1740 // Otherwise, constants can always be exported.
1741 return true;
1742}
1743
1744/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1745BranchProbability
1746SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1747 const MachineBasicBlock *Dst) const {
1748 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1749 const BasicBlock *SrcBB = Src->getBasicBlock();
1750 const BasicBlock *DstBB = Dst->getBasicBlock();
1751 if (!BPI) {
1752 // If BPI is not available, set the default probability as 1 / N, where N is
1753 // the number of successors.
1754 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
1755 return BranchProbability(1, SuccSize);
1756 }
1757 return BPI->getEdgeProbability(SrcBB, DstBB);
1758}
1759
1760void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1761 MachineBasicBlock *Dst,
1762 BranchProbability Prob) {
1763 if (!FuncInfo.BPI)
1764 Src->addSuccessorWithoutProb(Dst);
1765 else {
1766 if (Prob.isUnknown())
1767 Prob = getEdgeProbability(Src, Dst);
1768 Src->addSuccessor(Dst, Prob);
1769 }
1770}
1771
1772static bool InBlock(const Value *V, const BasicBlock *BB) {
1773 if (const Instruction *I = dyn_cast<Instruction>(V))
1774 return I->getParent() == BB;
1775 return true;
1776}
1777
1778/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1779/// This function emits a branch and is used at the leaves of an OR or an
1780/// AND operator tree.
1781void
1782SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1783 MachineBasicBlock *TBB,
1784 MachineBasicBlock *FBB,
1785 MachineBasicBlock *CurBB,
1786 MachineBasicBlock *SwitchBB,
1787 BranchProbability TProb,
1788 BranchProbability FProb,
1789 bool InvertCond) {
1790 const BasicBlock *BB = CurBB->getBasicBlock();
1791
1792 // If the leaf of the tree is a comparison, merge the condition into
1793 // the caseblock.
1794 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1795 // The operands of the cmp have to be in this block. We don't know
1796 // how to export them from some other block. If this is the first block
1797 // of the sequence, no exporting is needed.
1798 if (CurBB == SwitchBB ||
1799 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1800 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1801 ISD::CondCode Condition;
1802 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1803 ICmpInst::Predicate Pred =
1804 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1805 Condition = getICmpCondCode(Pred);
1806 } else {
1807 const FCmpInst *FC = cast<FCmpInst>(Cond);
1808 FCmpInst::Predicate Pred =
1809 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1810 Condition = getFCmpCondCode(Pred);
1811 if (TM.Options.NoNaNsFPMath)
1812 Condition = getFCmpCodeWithoutNaN(Condition);
1813 }
1814
1815 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1816 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1817 SwitchCases.push_back(CB);
1818 return;
1819 }
1820 }
1821
1822 // Create a CaseBlock record representing this branch.
1823 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1824 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1825 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1826 SwitchCases.push_back(CB);
1827}
1828
1829void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1830 MachineBasicBlock *TBB,
1831 MachineBasicBlock *FBB,
1832 MachineBasicBlock *CurBB,
1833 MachineBasicBlock *SwitchBB,
1834 Instruction::BinaryOps Opc,
1835 BranchProbability TProb,
1836 BranchProbability FProb,
1837 bool InvertCond) {
1838 // Skip over not part of the tree and remember to invert op and operands at
1839 // next level.
1840 Value *NotCond;
1841 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
1842 InBlock(NotCond, CurBB->getBasicBlock())) {
1843 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1844 !InvertCond);
1845 return;
1846 }
1847
1848 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1849 // Compute the effective opcode for Cond, taking into account whether it needs
1850 // to be inverted, e.g.
1851 // and (not (or A, B)), C
1852 // gets lowered as
1853 // and (and (not A, not B), C)
1854 unsigned BOpc = 0;
1855 if (BOp) {
1856 BOpc = BOp->getOpcode();
1857 if (InvertCond) {
1858 if (BOpc == Instruction::And)
1859 BOpc = Instruction::Or;
1860 else if (BOpc == Instruction::Or)
1861 BOpc = Instruction::And;
1862 }
1863 }
1864
1865 // If this node is not part of the or/and tree, emit it as a branch.
1866 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1867 BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
1868 BOp->getParent() != CurBB->getBasicBlock() ||
1869 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1870 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1871 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1872 TProb, FProb, InvertCond);
1873 return;
1874 }
1875
1876 // Create TmpBB after CurBB.
1877 MachineFunction::iterator BBI(CurBB);
1878 MachineFunction &MF = DAG.getMachineFunction();
1879 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1880 CurBB->getParent()->insert(++BBI, TmpBB);
1881
1882 if (Opc == Instruction::Or) {
1883 // Codegen X | Y as:
1884 // BB1:
1885 // jmp_if_X TBB
1886 // jmp TmpBB
1887 // TmpBB:
1888 // jmp_if_Y TBB
1889 // jmp FBB
1890 //
1891
1892 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1893 // The requirement is that
1894 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1895 // = TrueProb for original BB.
1896 // Assuming the original probabilities are A and B, one choice is to set
1897 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1898 // A/(1+B) and 2B/(1+B). This choice assumes that
1899 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1900 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1901 // TmpBB, but the math is more complicated.
1902
1903 auto NewTrueProb = TProb / 2;
1904 auto NewFalseProb = TProb / 2 + FProb;
1905 // Emit the LHS condition.
1906 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1907 NewTrueProb, NewFalseProb, InvertCond);
1908
1909 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1910 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1911 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1912 // Emit the RHS condition into TmpBB.
1913 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1914 Probs[0], Probs[1], InvertCond);
1915 } else {
1916 assert(Opc == Instruction::And && "Unknown merge op!")((Opc == Instruction::And && "Unknown merge op!") ? static_cast
<void> (0) : __assert_fail ("Opc == Instruction::And && \"Unknown merge op!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 1916, __PRETTY_FUNCTION__))
;
1917 // Codegen X & Y as:
1918 // BB1:
1919 // jmp_if_X TmpBB
1920 // jmp FBB
1921 // TmpBB:
1922 // jmp_if_Y TBB
1923 // jmp FBB
1924 //
1925 // This requires creation of TmpBB after CurBB.
1926
1927 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1928 // The requirement is that
1929 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1930 // = FalseProb for original BB.
1931 // Assuming the original probabilities are A and B, one choice is to set
1932 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1933 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1934 // TrueProb for BB1 * FalseProb for TmpBB.
1935
1936 auto NewTrueProb = TProb + FProb / 2;
1937 auto NewFalseProb = FProb / 2;
1938 // Emit the LHS condition.
1939 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1940 NewTrueProb, NewFalseProb, InvertCond);
1941
1942 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1943 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1944 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1945 // Emit the RHS condition into TmpBB.
1946 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1947 Probs[0], Probs[1], InvertCond);
1948 }
1949}
1950
1951/// If the set of cases should be emitted as a series of branches, return true.
1952/// If we should emit this as a bunch of and/or'd together conditions, return
1953/// false.
1954bool
1955SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1956 if (Cases.size() != 2) return true;
1957
1958 // If this is two comparisons of the same values or'd or and'd together, they
1959 // will get folded into a single comparison, so don't emit two blocks.
1960 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1961 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1962 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1963 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1964 return false;
1965 }
1966
1967 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1968 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1969 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1970 Cases[0].CC == Cases[1].CC &&
1971 isa<Constant>(Cases[0].CmpRHS) &&
1972 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1973 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1974 return false;
1975 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1976 return false;
1977 }
1978
1979 return true;
1980}
1981
1982void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1983 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1984
1985 // Update machine-CFG edges.
1986 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1987
1988 if (I.isUnconditional()) {
1989 // Update machine-CFG edges.
1990 BrMBB->addSuccessor(Succ0MBB);
1991
1992 // If this is not a fall-through branch or optimizations are switched off,
1993 // emit the branch.
1994 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1995 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1996 MVT::Other, getControlRoot(),
1997 DAG.getBasicBlock(Succ0MBB)));
1998
1999 return;
2000 }
2001
2002 // If this condition is one of the special cases we handle, do special stuff
2003 // now.
2004 const Value *CondVal = I.getCondition();
2005 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2006
2007 // If this is a series of conditions that are or'd or and'd together, emit
2008 // this as a sequence of branches instead of setcc's with and/or operations.
2009 // As long as jumps are not expensive, this should improve performance.
2010 // For example, instead of something like:
2011 // cmp A, B
2012 // C = seteq
2013 // cmp D, E
2014 // F = setle
2015 // or C, F
2016 // jnz foo
2017 // Emit:
2018 // cmp A, B
2019 // je foo
2020 // cmp D, E
2021 // jle foo
2022 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2023 Instruction::BinaryOps Opcode = BOp->getOpcode();
2024 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2025 !I.getMetadata(LLVMContext::MD_unpredictable) &&
2026 (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2027 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2028 Opcode,
2029 getEdgeProbability(BrMBB, Succ0MBB),
2030 getEdgeProbability(BrMBB, Succ1MBB),
2031 /*InvertCond=*/false);
2032 // If the compares in later blocks need to use values not currently
2033 // exported from this block, export them now. This block should always
2034 // be the first entry.
2035 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!")((SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"
) ? static_cast<void> (0) : __assert_fail ("SwitchCases[0].ThisBB == BrMBB && \"Unexpected lowering!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2035, __PRETTY_FUNCTION__))
;
2036
2037 // Allow some cases to be rejected.
2038 if (ShouldEmitAsBranches(SwitchCases)) {
2039 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2040 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2041 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2042 }
2043
2044 // Emit the branch for this block.
2045 visitSwitchCase(SwitchCases[0], BrMBB);
2046 SwitchCases.erase(SwitchCases.begin());
2047 return;
2048 }
2049
2050 // Okay, we decided not to do this, remove any inserted MBB's and clear
2051 // SwitchCases.
2052 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2053 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2054
2055 SwitchCases.clear();
2056 }
2057 }
2058
2059 // Create a CaseBlock record representing this branch.
2060 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2061 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2062
2063 // Use visitSwitchCase to actually insert the fast branch sequence for this
2064 // cond branch.
2065 visitSwitchCase(CB, BrMBB);
2066}
2067
2068/// visitSwitchCase - Emits the necessary code to represent a single node in
2069/// the binary search tree resulting from lowering a switch instruction.
2070void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2071 MachineBasicBlock *SwitchBB) {
2072 SDValue Cond;
2073 SDValue CondLHS = getValue(CB.CmpLHS);
2074 SDLoc dl = CB.DL;
2075
2076 // Build the setcc now.
2077 if (!CB.CmpMHS) {
2078 // Fold "(X == true)" to X and "(X == false)" to !X to
2079 // handle common cases produced by branch lowering.
2080 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2081 CB.CC == ISD::SETEQ)
2082 Cond = CondLHS;
2083 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2084 CB.CC == ISD::SETEQ) {
2085 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2086 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2087 } else
2088 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2089 } else {
2090 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now")((CB.CC == ISD::SETLE && "Can handle only LE ranges now"
) ? static_cast<void> (0) : __assert_fail ("CB.CC == ISD::SETLE && \"Can handle only LE ranges now\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2090, __PRETTY_FUNCTION__))
;
2091
2092 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2093 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2094
2095 SDValue CmpOp = getValue(CB.CmpMHS);
2096 EVT VT = CmpOp.getValueType();
2097
2098 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2099 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2100 ISD::SETLE);
2101 } else {
2102 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2103 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2104 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2105 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2106 }
2107 }
2108
2109 // Update successor info
2110 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2111 // TrueBB and FalseBB are always different unless the incoming IR is
2112 // degenerate. This only happens when running llc on weird IR.
2113 if (CB.TrueBB != CB.FalseBB)
2114 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2115 SwitchBB->normalizeSuccProbs();
2116
2117 // If the lhs block is the next block, invert the condition so that we can
2118 // fall through to the lhs instead of the rhs block.
2119 if (CB.TrueBB == NextBlock(SwitchBB)) {
2120 std::swap(CB.TrueBB, CB.FalseBB);
2121 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2122 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2123 }
2124
2125 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2126 MVT::Other, getControlRoot(), Cond,
2127 DAG.getBasicBlock(CB.TrueBB));
2128
2129 // Insert the false branch. Do this even if it's a fall through branch,
2130 // this makes it easier to do DAG optimizations which require inverting
2131 // the branch condition.
2132 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2133 DAG.getBasicBlock(CB.FalseBB));
2134
2135 DAG.setRoot(BrCond);
2136}
2137
2138/// visitJumpTable - Emit JumpTable node in the current MBB
2139void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
2140 // Emit the code for the jump table
2141 assert(JT.Reg != -1U && "Should lower JT Header first!")((JT.Reg != -1U && "Should lower JT Header first!") ?
static_cast<void> (0) : __assert_fail ("JT.Reg != -1U && \"Should lower JT Header first!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2141, __PRETTY_FUNCTION__))
;
2142 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2143 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2144 JT.Reg, PTy);
2145 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2146 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2147 MVT::Other, Index.getValue(1),
2148 Table, Index);
2149 DAG.setRoot(BrJumpTable);
2150}
2151
2152/// visitJumpTableHeader - This function emits necessary code to produce index
2153/// in the JumpTable from switch case.
2154void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
2155 JumpTableHeader &JTH,
2156 MachineBasicBlock *SwitchBB) {
2157 SDLoc dl = getCurSDLoc();
2158
2159 // Subtract the lowest switch case value from the value being switched on and
2160 // conditional branch to default mbb if the result is greater than the
2161 // difference between smallest and largest cases.
2162 SDValue SwitchOp = getValue(JTH.SValue);
2163 EVT VT = SwitchOp.getValueType();
2164 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2165 DAG.getConstant(JTH.First, dl, VT));
2166
2167 // The SDNode we just created, which holds the value being switched on minus
2168 // the smallest case value, needs to be copied to a virtual register so it
2169 // can be used as an index into the jump table in a subsequent basic block.
2170 // This value may be smaller or larger than the target's pointer type, and
2171 // therefore require extension or truncating.
2172 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2173 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2174
2175 unsigned JumpTableReg =
2176 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2177 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2178 JumpTableReg, SwitchOp);
2179 JT.Reg = JumpTableReg;
2180
2181 // Emit the range check for the jump table, and branch to the default block
2182 // for the switch statement if the value being switched on exceeds the largest
2183 // case in the switch.
2184 SDValue CMP = DAG.getSetCC(
2185 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2186 Sub.getValueType()),
2187 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2188
2189 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2190 MVT::Other, CopyTo, CMP,
2191 DAG.getBasicBlock(JT.Default));
2192
2193 // Avoid emitting unnecessary branches to the next block.
2194 if (JT.MBB != NextBlock(SwitchBB))
2195 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2196 DAG.getBasicBlock(JT.MBB));
2197
2198 DAG.setRoot(BrCond);
2199}
2200
2201/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2202/// variable if there exists one.
2203static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2204 SDValue &Chain) {
2205 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2206 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2207 MachineFunction &MF = DAG.getMachineFunction();
2208 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2209 MachineSDNode *Node =
2210 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2211 if (Global) {
2212 MachinePointerInfo MPInfo(Global);
2213 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2214 MachineMemOperand::MODereferenceable;
2215 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2216 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2217 DAG.setNodeMemRefs(Node, {MemRef});
2218 }
2219 return SDValue(Node, 0);
2220}
2221
2222/// Codegen a new tail for a stack protector check ParentMBB which has had its
2223/// tail spliced into a stack protector check success bb.
2224///
2225/// For a high level explanation of how this fits into the stack protector
2226/// generation see the comment on the declaration of class
2227/// StackProtectorDescriptor.
2228void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2229 MachineBasicBlock *ParentBB) {
2230
2231 // First create the loads to the guard/stack slot for the comparison.
2232 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2233 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2234
2235 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2236 int FI = MFI.getStackProtectorIndex();
2237
2238 SDValue Guard;
2239 SDLoc dl = getCurSDLoc();
2240 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2241 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2242 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2243
2244 // Generate code to load the content of the guard slot.
2245 SDValue GuardVal = DAG.getLoad(
2246 PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2247 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2248 MachineMemOperand::MOVolatile);
2249
2250 if (TLI.useStackGuardXorFP())
2251 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2252
2253 // Retrieve guard check function, nullptr if instrumentation is inlined.
2254 if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2255 // The target provides a guard check function to validate the guard value.
2256 // Generate a call to that function with the content of the guard slot as
2257 // argument.
2258 auto *Fn = cast<Function>(GuardCheck);
2259 FunctionType *FnTy = Fn->getFunctionType();
2260 assert(FnTy->getNumParams() == 1 && "Invalid function signature")((FnTy->getNumParams() == 1 && "Invalid function signature"
) ? static_cast<void> (0) : __assert_fail ("FnTy->getNumParams() == 1 && \"Invalid function signature\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2260, __PRETTY_FUNCTION__))
;
2261
2262 TargetLowering::ArgListTy Args;
2263 TargetLowering::ArgListEntry Entry;
2264 Entry.Node = GuardVal;
2265 Entry.Ty = FnTy->getParamType(0);
2266 if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2267 Entry.IsInReg = true;
2268 Args.push_back(Entry);
2269
2270 TargetLowering::CallLoweringInfo CLI(DAG);
2271 CLI.setDebugLoc(getCurSDLoc())
2272 .setChain(DAG.getEntryNode())
2273 .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2274 getValue(GuardCheck), std::move(Args));
2275
2276 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2277 DAG.setRoot(Result.second);
2278 return;
2279 }
2280
2281 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2282 // Otherwise, emit a volatile load to retrieve the stack guard value.
2283 SDValue Chain = DAG.getEntryNode();
2284 if (TLI.useLoadStackGuardNode()) {
2285 Guard = getLoadStackGuard(DAG, dl, Chain);
2286 } else {
2287 const Value *IRGuard = TLI.getSDagStackGuard(M);
2288 SDValue GuardPtr = getValue(IRGuard);
2289
2290 Guard =
2291 DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2292 Align, MachineMemOperand::MOVolatile);
2293 }
2294
2295 // Perform the comparison via a subtract/getsetcc.
2296 EVT VT = Guard.getValueType();
2297 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2298
2299 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2300 *DAG.getContext(),
2301 Sub.getValueType()),
2302 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2303
2304 // If the sub is not 0, then we know the guard/stackslot do not equal, so
2305 // branch to failure MBB.
2306 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2307 MVT::Other, GuardVal.getOperand(0),
2308 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2309 // Otherwise branch to success MBB.
2310 SDValue Br = DAG.getNode(ISD::BR, dl,
2311 MVT::Other, BrCond,
2312 DAG.getBasicBlock(SPD.getSuccessMBB()));
2313
2314 DAG.setRoot(Br);
2315}
2316
2317/// Codegen the failure basic block for a stack protector check.
2318///
2319/// A failure stack protector machine basic block consists simply of a call to
2320/// __stack_chk_fail().
2321///
2322/// For a high level explanation of how this fits into the stack protector
2323/// generation see the comment on the declaration of class
2324/// StackProtectorDescriptor.
2325void
2326SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2327 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2328 SDValue Chain =
2329 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2330 None, false, getCurSDLoc(), false, false).second;
2331 DAG.setRoot(Chain);
2332}
2333
2334/// visitBitTestHeader - This function emits necessary code to produce value
2335/// suitable for "bit tests"
2336void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2337 MachineBasicBlock *SwitchBB) {
2338 SDLoc dl = getCurSDLoc();
2339
2340 // Subtract the minimum value
2341 SDValue SwitchOp = getValue(B.SValue);
2342 EVT VT = SwitchOp.getValueType();
2343 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2344 DAG.getConstant(B.First, dl, VT));
2345
2346 // Check range
2347 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2348 SDValue RangeCmp = DAG.getSetCC(
2349 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2350 Sub.getValueType()),
2351 Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2352
2353 // Determine the type of the test operands.
2354 bool UsePtrType = false;
2355 if (!TLI.isTypeLegal(VT))
2356 UsePtrType = true;
2357 else {
2358 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2359 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2360 // Switch table case range are encoded into series of masks.
2361 // Just use pointer type, it's guaranteed to fit.
2362 UsePtrType = true;
2363 break;
2364 }
2365 }
2366 if (UsePtrType) {
2367 VT = TLI.getPointerTy(DAG.getDataLayout());
2368 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2369 }
2370
2371 B.RegVT = VT.getSimpleVT();
2372 B.Reg = FuncInfo.CreateReg(B.RegVT);
2373 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2374
2375 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2376
2377 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2378 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2379 SwitchBB->normalizeSuccProbs();
2380
2381 SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2382 MVT::Other, CopyTo, RangeCmp,
2383 DAG.getBasicBlock(B.Default));
2384
2385 // Avoid emitting unnecessary branches to the next block.
2386 if (MBB != NextBlock(SwitchBB))
2387 BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2388 DAG.getBasicBlock(MBB));
2389
2390 DAG.setRoot(BrRange);
2391}
2392
2393/// visitBitTestCase - this function produces one "bit test"
2394void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2395 MachineBasicBlock* NextMBB,
2396 BranchProbability BranchProbToNext,
2397 unsigned Reg,
2398 BitTestCase &B,
2399 MachineBasicBlock *SwitchBB) {
2400 SDLoc dl = getCurSDLoc();
2401 MVT VT = BB.RegVT;
2402 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2403 SDValue Cmp;
2404 unsigned PopCount = countPopulation(B.Mask);
2405 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2406 if (PopCount == 1) {
2407 // Testing for a single bit; just compare the shift count with what it
2408 // would need to be to shift a 1 bit in that position.
2409 Cmp = DAG.getSetCC(
2410 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2411 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2412 ISD::SETEQ);
2413 } else if (PopCount == BB.Range) {
2414 // There is only one zero bit in the range, test for it directly.
2415 Cmp = DAG.getSetCC(
2416 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2417 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2418 ISD::SETNE);
2419 } else {
2420 // Make desired shift
2421 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2422 DAG.getConstant(1, dl, VT), ShiftOp);
2423
2424 // Emit bit tests and jumps
2425 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2426 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2427 Cmp = DAG.getSetCC(
2428 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2429 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2430 }
2431
2432 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2433 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2434 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2435 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2436 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2437 // one as they are relative probabilities (and thus work more like weights),
2438 // and hence we need to normalize them to let the sum of them become one.
2439 SwitchBB->normalizeSuccProbs();
2440
2441 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2442 MVT::Other, getControlRoot(),
2443 Cmp, DAG.getBasicBlock(B.TargetBB));
2444
2445 // Avoid emitting unnecessary branches to the next block.
2446 if (NextMBB != NextBlock(SwitchBB))
2447 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2448 DAG.getBasicBlock(NextMBB));
2449
2450 DAG.setRoot(BrAnd);
2451}
2452
2453void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2454 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2455
2456 // Retrieve successors. Look through artificial IR level blocks like
2457 // catchswitch for successors.
2458 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2459 const BasicBlock *EHPadBB = I.getSuccessor(1);
2460
2461 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2462 // have to do anything here to lower funclet bundles.
2463 assert(!I.hasOperandBundlesOtherThan(((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2465, __PRETTY_FUNCTION__))
2464 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2465, __PRETTY_FUNCTION__))
2465 "Cannot lower invokes with arbitrary operand bundles yet!")((!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext
::OB_funclet}) && "Cannot lower invokes with arbitrary operand bundles yet!"
) ? static_cast<void> (0) : __assert_fail ("!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && \"Cannot lower invokes with arbitrary operand bundles yet!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2465, __PRETTY_FUNCTION__))
;
2466
2467 const Value *Callee(I.getCalledValue());
2468 const Function *Fn = dyn_cast<Function>(Callee);
2469 if (isa<InlineAsm>(Callee))
2470 visitInlineAsm(&I);
2471 else if (Fn && Fn->isIntrinsic()) {
2472 switch (Fn->getIntrinsicID()) {
2473 default:
2474 llvm_unreachable("Cannot invoke this intrinsic")::llvm::llvm_unreachable_internal("Cannot invoke this intrinsic"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2474)
;
2475 case Intrinsic::donothing:
2476 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2477 break;
2478 case Intrinsic::experimental_patchpoint_void:
2479 case Intrinsic::experimental_patchpoint_i64:
2480 visitPatchpoint(&I, EHPadBB);
2481 break;
2482 case Intrinsic::experimental_gc_statepoint:
2483 LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2484 break;
2485 }
2486 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2487 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2488 // Eventually we will support lowering the @llvm.experimental.deoptimize
2489 // intrinsic, and right now there are no plans to support other intrinsics
2490 // with deopt state.
2491 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2492 } else {
2493 LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2494 }
2495
2496 // If the value of the invoke is used outside of its defining block, make it
2497 // available as a virtual register.
2498 // We already took care of the exported value for the statepoint instruction
2499 // during call to the LowerStatepoint.
2500 if (!isStatepoint(I)) {
2501 CopyToExportRegsIfNeeded(&I);
2502 }
2503
2504 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2505 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2506 BranchProbability EHPadBBProb =
2507 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2508 : BranchProbability::getZero();
2509 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2510
2511 // Update successor info.
2512 addSuccessorWithProb(InvokeMBB, Return);
2513 for (auto &UnwindDest : UnwindDests) {
2514 UnwindDest.first->setIsEHPad();
2515 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2516 }
2517 InvokeMBB->normalizeSuccProbs();
2518
2519 // Drop into normal successor.
2520 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2521 MVT::Other, getControlRoot(),
2522 DAG.getBasicBlock(Return)));
2523}
2524
2525void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2526 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!")::llvm::llvm_unreachable_internal("SelectionDAGBuilder shouldn't visit resume instructions!"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2526)
;
2527}
2528
2529void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2530 assert(FuncInfo.MBB->isEHPad() &&((FuncInfo.MBB->isEHPad() && "Call to landingpad not in landing pad!"
) ? static_cast<void> (0) : __assert_fail ("FuncInfo.MBB->isEHPad() && \"Call to landingpad not in landing pad!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2531, __PRETTY_FUNCTION__))
2531 "Call to landingpad not in landing pad!")((FuncInfo.MBB->isEHPad() && "Call to landingpad not in landing pad!"
) ? static_cast<void> (0) : __assert_fail ("FuncInfo.MBB->isEHPad() && \"Call to landingpad not in landing pad!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2531, __PRETTY_FUNCTION__))
;
2532
2533 // If there aren't registers to copy the values into (e.g., during SjLj
2534 // exceptions), then don't bother to create these DAG nodes.
2535 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2536 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2537 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2538 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2539 return;
2540
2541 // If landingpad's return type is token type, we don't create DAG nodes
2542 // for its exception pointer and selector value. The extraction of exception
2543 // pointer or selector value from token type landingpads is not currently
2544 // supported.
2545 if (LP.getType()->isTokenTy())
2546 return;
2547
2548 SmallVector<EVT, 2> ValueVTs;
2549 SDLoc dl = getCurSDLoc();
2550 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2551 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported")((ValueVTs.size() == 2 && "Only two-valued landingpads are supported"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 2 && \"Only two-valued landingpads are supported\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2551, __PRETTY_FUNCTION__))
;
2552
2553 // Get the two live-in registers as SDValues. The physregs have already been
2554 // copied into virtual registers.
2555 SDValue Ops[2];
2556 if (FuncInfo.ExceptionPointerVirtReg) {
2557 Ops[0] = DAG.getZExtOrTrunc(
2558 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2559 FuncInfo.ExceptionPointerVirtReg,
2560 TLI.getPointerTy(DAG.getDataLayout())),
2561 dl, ValueVTs[0]);
2562 } else {
2563 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2564 }
2565 Ops[1] = DAG.getZExtOrTrunc(
2566 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2567 FuncInfo.ExceptionSelectorVirtReg,
2568 TLI.getPointerTy(DAG.getDataLayout())),
2569 dl, ValueVTs[1]);
2570
2571 // Merge into one.
2572 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2573 DAG.getVTList(ValueVTs), Ops);
2574 setValue(&LP, Res);
2575}
2576
2577void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2578#ifndef NDEBUG
2579 for (const CaseCluster &CC : Clusters)
2580 assert(CC.Low == CC.High && "Input clusters must be single-case")((CC.Low == CC.High && "Input clusters must be single-case"
) ? static_cast<void> (0) : __assert_fail ("CC.Low == CC.High && \"Input clusters must be single-case\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2580, __PRETTY_FUNCTION__))
;
2581#endif
2582
2583 llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
2584 return a.Low->getValue().slt(b.Low->getValue());
2585 });
2586
2587 // Merge adjacent clusters with the same destination.
2588 const unsigned N = Clusters.size();
2589 unsigned DstIndex = 0;
2590 for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2591 CaseCluster &CC = Clusters[SrcIndex];
2592 const ConstantInt *CaseVal = CC.Low;
2593 MachineBasicBlock *Succ = CC.MBB;
2594
2595 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2596 (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2597 // If this case has the same successor and is a neighbour, merge it into
2598 // the previous cluster.
2599 Clusters[DstIndex - 1].High = CaseVal;
2600 Clusters[DstIndex - 1].Prob += CC.Prob;
2601 } else {
2602 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2603 sizeof(Clusters[SrcIndex]));
2604 }
2605 }
2606 Clusters.resize(DstIndex);
2607}
2608
2609void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2610 MachineBasicBlock *Last) {
2611 // Update JTCases.
2612 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2613 if (JTCases[i].first.HeaderBB == First)
2614 JTCases[i].first.HeaderBB = Last;
2615
2616 // Update BitTestCases.
2617 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2618 if (BitTestCases[i].Parent == First)
2619 BitTestCases[i].Parent = Last;
2620}
2621
2622void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2623 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2624
2625 // Update machine-CFG edges with unique successors.
2626 SmallSet<BasicBlock*, 32> Done;
2627 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2628 BasicBlock *BB = I.getSuccessor(i);
2629 bool Inserted = Done.insert(BB).second;
2630 if (!Inserted)
2631 continue;
2632
2633 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2634 addSuccessorWithProb(IndirectBrMBB, Succ);
2635 }
2636 IndirectBrMBB->normalizeSuccProbs();
2637
2638 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2639 MVT::Other, getControlRoot(),
2640 getValue(I.getAddress())));
2641}
2642
2643void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2644 if (!DAG.getTarget().Options.TrapUnreachable)
2645 return;
2646
2647 // We may be able to ignore unreachable behind a noreturn call.
2648 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2649 const BasicBlock &BB = *I.getParent();
2650 if (&I != &BB.front()) {
2651 BasicBlock::const_iterator PredI =
2652 std::prev(BasicBlock::const_iterator(&I));
2653 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2654 if (Call->doesNotReturn())
2655 return;
2656 }
2657 }
2658 }
2659
2660 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2661}
2662
2663void SelectionDAGBuilder::visitFSub(const User &I) {
2664 // -0.0 - X --> fneg
2665 Type *Ty = I.getType();
2666 if (isa<Constant>(I.getOperand(0)) &&
2667 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2668 SDValue Op2 = getValue(I.getOperand(1));
2669 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2670 Op2.getValueType(), Op2));
2671 return;
2672 }
2673
2674 visitBinary(I, ISD::FSUB);
2675}
2676
2677/// Checks if the given instruction performs a vector reduction, in which case
2678/// we have the freedom to alter the elements in the result as long as the
2679/// reduction of them stays unchanged.
2680static bool isVectorReductionOp(const User *I) {
2681 const Instruction *Inst = dyn_cast<Instruction>(I);
2682 if (!Inst || !Inst->getType()->isVectorTy())
2683 return false;
2684
2685 auto OpCode = Inst->getOpcode();
2686 switch (OpCode) {
2687 case Instruction::Add:
2688 case Instruction::Mul:
2689 case Instruction::And:
2690 case Instruction::Or:
2691 case Instruction::Xor:
2692 break;
2693 case Instruction::FAdd:
2694 case Instruction::FMul:
2695 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2696 if (FPOp->getFastMathFlags().isFast())
2697 break;
2698 LLVM_FALLTHROUGH[[clang::fallthrough]];
2699 default:
2700 return false;
2701 }
2702
2703 unsigned ElemNum = Inst->getType()->getVectorNumElements();
2704 // Ensure the reduction size is a power of 2.
2705 if (!isPowerOf2_32(ElemNum))
2706 return false;
2707
2708 unsigned ElemNumToReduce = ElemNum;
2709
2710 // Do DFS search on the def-use chain from the given instruction. We only
2711 // allow four kinds of operations during the search until we reach the
2712 // instruction that extracts the first element from the vector:
2713 //
2714 // 1. The reduction operation of the same opcode as the given instruction.
2715 //
2716 // 2. PHI node.
2717 //
2718 // 3. ShuffleVector instruction together with a reduction operation that
2719 // does a partial reduction.
2720 //
2721 // 4. ExtractElement that extracts the first element from the vector, and we
2722 // stop searching the def-use chain here.
2723 //
2724 // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2725 // from 1-3 to the stack to continue the DFS. The given instruction is not
2726 // a reduction operation if we meet any other instructions other than those
2727 // listed above.
2728
2729 SmallVector<const User *, 16> UsersToVisit{Inst};
2730 SmallPtrSet<const User *, 16> Visited;
2731 bool ReduxExtracted = false;
2732
2733 while (!UsersToVisit.empty()) {
2734 auto User = UsersToVisit.back();
2735 UsersToVisit.pop_back();
2736 if (!Visited.insert(User).second)
2737 continue;
2738
2739 for (const auto &U : User->users()) {
2740 auto Inst = dyn_cast<Instruction>(U);
2741 if (!Inst)
2742 return false;
2743
2744 if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2745 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2746 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2747 return false;
2748 UsersToVisit.push_back(U);
2749 } else if (const ShuffleVectorInst *ShufInst =
2750 dyn_cast<ShuffleVectorInst>(U)) {
2751 // Detect the following pattern: A ShuffleVector instruction together
2752 // with a reduction that do partial reduction on the first and second
2753 // ElemNumToReduce / 2 elements, and store the result in
2754 // ElemNumToReduce / 2 elements in another vector.
2755
2756 unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2757 if (ResultElements < ElemNum)
2758 return false;
2759
2760 if (ElemNumToReduce == 1)
2761 return false;
2762 if (!isa<UndefValue>(U->getOperand(1)))
2763 return false;
2764 for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2765 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2766 return false;
2767 for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2768 if (ShufInst->getMaskValue(i) != -1)
2769 return false;
2770
2771 // There is only one user of this ShuffleVector instruction, which
2772 // must be a reduction operation.
2773 if (!U->hasOneUse())
2774 return false;
2775
2776 auto U2 = dyn_cast<Instruction>(*U->user_begin());
2777 if (!U2 || U2->getOpcode() != OpCode)
2778 return false;
2779
2780 // Check operands of the reduction operation.
2781 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2782 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2783 UsersToVisit.push_back(U2);
2784 ElemNumToReduce /= 2;
2785 } else
2786 return false;
2787 } else if (isa<ExtractElementInst>(U)) {
2788 // At this moment we should have reduced all elements in the vector.
2789 if (ElemNumToReduce != 1)
2790 return false;
2791
2792 const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2793 if (!Val || !Val->isZero())
2794 return false;
2795
2796 ReduxExtracted = true;
2797 } else
2798 return false;
2799 }
2800 }
2801 return ReduxExtracted;
2802}
2803
2804void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
2805 SDNodeFlags Flags;
2806 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
2807 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
2808 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
2809 }
2810 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
2811 Flags.setExact(ExactOp->isExact());
2812 }
2813 if (isVectorReductionOp(&I)) {
2814 Flags.setVectorReduction(true);
2815 LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Detected a reduction operation:"
<< I << "\n"; } } while (false)
;
2816 }
2817
2818 SDValue Op1 = getValue(I.getOperand(0));
2819 SDValue Op2 = getValue(I.getOperand(1));
2820 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
2821 Op1, Op2, Flags);
2822 setValue(&I, BinNodeValue);
2823}
2824
2825void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2826 SDValue Op1 = getValue(I.getOperand(0));
2827 SDValue Op2 = getValue(I.getOperand(1));
2828
2829 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2830 Op1.getValueType(), DAG.getDataLayout());
2831
2832 // Coerce the shift amount to the right type if we can.
2833 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2834 unsigned ShiftSize = ShiftTy.getSizeInBits();
2835 unsigned Op2Size = Op2.getValueSizeInBits();
2836 SDLoc DL = getCurSDLoc();
2837
2838 // If the operand is smaller than the shift count type, promote it.
2839 if (ShiftSize > Op2Size)
2840 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2841
2842 // If the operand is larger than the shift count type but the shift
2843 // count type has enough bits to represent any shift value, truncate
2844 // it now. This is a common case and it exposes the truncate to
2845 // optimization early.
2846 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2847 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2848 // Otherwise we'll need to temporarily settle for some other convenient
2849 // type. Type legalization will make adjustments once the shiftee is split.
2850 else
2851 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2852 }
2853
2854 bool nuw = false;
2855 bool nsw = false;
2856 bool exact = false;
2857
2858 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2859
2860 if (const OverflowingBinaryOperator *OFBinOp =
2861 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2862 nuw = OFBinOp->hasNoUnsignedWrap();
2863 nsw = OFBinOp->hasNoSignedWrap();
2864 }
2865 if (const PossiblyExactOperator *ExactOp =
2866 dyn_cast<const PossiblyExactOperator>(&I))
2867 exact = ExactOp->isExact();
2868 }
2869 SDNodeFlags Flags;
2870 Flags.setExact(exact);
2871 Flags.setNoSignedWrap(nsw);
2872 Flags.setNoUnsignedWrap(nuw);
2873 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2874 Flags);
2875 setValue(&I, Res);
2876}
2877
2878void SelectionDAGBuilder::visitSDiv(const User &I) {
2879 SDValue Op1 = getValue(I.getOperand(0));
2880 SDValue Op2 = getValue(I.getOperand(1));
2881
2882 SDNodeFlags Flags;
2883 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2884 cast<PossiblyExactOperator>(&I)->isExact());
2885 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2886 Op2, Flags));
2887}
2888
2889void SelectionDAGBuilder::visitICmp(const User &I) {
2890 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2891 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2892 predicate = IC->getPredicate();
2893 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2894 predicate = ICmpInst::Predicate(IC->getPredicate());
2895 SDValue Op1 = getValue(I.getOperand(0));
2896 SDValue Op2 = getValue(I.getOperand(1));
2897 ISD::CondCode Opcode = getICmpCondCode(predicate);
2898
2899 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2900 I.getType());
2901 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2902}
2903
2904void SelectionDAGBuilder::visitFCmp(const User &I) {
2905 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2906 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2907 predicate = FC->getPredicate();
2908 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2909 predicate = FCmpInst::Predicate(FC->getPredicate());
2910 SDValue Op1 = getValue(I.getOperand(0));
2911 SDValue Op2 = getValue(I.getOperand(1));
2912
2913 ISD::CondCode Condition = getFCmpCondCode(predicate);
2914 auto *FPMO = dyn_cast<FPMathOperator>(&I);
2915 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
2916 Condition = getFCmpCodeWithoutNaN(Condition);
2917
2918 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2919 I.getType());
2920 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2921}
2922
2923// Check if the condition of the select has one use or two users that are both
2924// selects with the same condition.
2925static bool hasOnlySelectUsers(const Value *Cond) {
2926 return llvm::all_of(Cond->users(), [](const Value *V) {
2927 return isa<SelectInst>(V);
2928 });
2929}
2930
2931void SelectionDAGBuilder::visitSelect(const User &I) {
2932 SmallVector<EVT, 4> ValueVTs;
2933 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2934 ValueVTs);
2935 unsigned NumValues = ValueVTs.size();
2936 if (NumValues == 0) return;
2937
2938 SmallVector<SDValue, 4> Values(NumValues);
2939 SDValue Cond = getValue(I.getOperand(0));
2940 SDValue LHSVal = getValue(I.getOperand(1));
2941 SDValue RHSVal = getValue(I.getOperand(2));
2942 auto BaseOps = {Cond};
2943 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2944 ISD::VSELECT : ISD::SELECT;
2945
2946 // Min/max matching is only viable if all output VTs are the same.
2947 if (is_splat(ValueVTs)) {
2948 EVT VT = ValueVTs[0];
2949 LLVMContext &Ctx = *DAG.getContext();
2950 auto &TLI = DAG.getTargetLoweringInfo();
2951
2952 // We care about the legality of the operation after it has been type
2953 // legalized.
2954 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2955 VT != TLI.getTypeToTransformTo(Ctx, VT))
2956 VT = TLI.getTypeToTransformTo(Ctx, VT);
2957
2958 // If the vselect is legal, assume we want to leave this as a vector setcc +
2959 // vselect. Otherwise, if this is going to be scalarized, we want to see if
2960 // min/max is legal on the scalar type.
2961 bool UseScalarMinMax = VT.isVector() &&
2962 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2963
2964 Value *LHS, *RHS;
2965 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2966 ISD::NodeType Opc = ISD::DELETED_NODE;
2967 switch (SPR.Flavor) {
2968 case SPF_UMAX: Opc = ISD::UMAX; break;
2969 case SPF_UMIN: Opc = ISD::UMIN; break;
2970 case SPF_SMAX: Opc = ISD::SMAX; break;
2971 case SPF_SMIN: Opc = ISD::SMIN; break;
2972 case SPF_FMINNUM:
2973 switch (SPR.NaNBehavior) {
2974 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")::llvm::llvm_unreachable_internal("No NaN behavior for FP op?"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2974)
;
2975 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
2976 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2977 case SPNB_RETURNS_ANY: {
2978 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2979 Opc = ISD::FMINNUM;
2980 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
2981 Opc = ISD::FMINIMUM;
2982 else if (UseScalarMinMax)
2983 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2984 ISD::FMINNUM : ISD::FMINIMUM;
2985 break;
2986 }
2987 }
2988 break;
2989 case SPF_FMAXNUM:
2990 switch (SPR.NaNBehavior) {
2991 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?")::llvm::llvm_unreachable_internal("No NaN behavior for FP op?"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 2991)
;
2992 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
2993 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2994 case SPNB_RETURNS_ANY:
2995
2996 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
2997 Opc = ISD::FMAXNUM;
2998 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
2999 Opc = ISD::FMAXIMUM;
3000 else if (UseScalarMinMax)
3001 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3002 ISD::FMAXNUM : ISD::FMAXIMUM;
3003 break;
3004 }
3005 break;
3006 default: break;
3007 }
3008
3009 if (Opc != ISD::DELETED_NODE &&
3010 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3011 (UseScalarMinMax &&
3012 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3013 // If the underlying comparison instruction is used by any other
3014 // instruction, the consumed instructions won't be destroyed, so it is
3015 // not profitable to convert to a min/max.
3016 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3017 OpCode = Opc;
3018 LHSVal = getValue(LHS);
3019 RHSVal = getValue(RHS);
3020 BaseOps = {};
3021 }
3022 }
3023
3024 for (unsigned i = 0; i != NumValues; ++i) {
3025 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3026 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3027 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3028 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
3029 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3030 Ops);
3031 }
3032
3033 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3034 DAG.getVTList(ValueVTs), Values));
3035}
3036
3037void SelectionDAGBuilder::visitTrunc(const User &I) {
3038 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3039 SDValue N = getValue(I.getOperand(0));
3040 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3041 I.getType());
3042 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3043}
3044
3045void SelectionDAGBuilder::visitZExt(const User &I) {
3046 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3047 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3048 SDValue N = getValue(I.getOperand(0));
3049 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3050 I.getType());
3051 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3052}
3053
3054void SelectionDAGBuilder::visitSExt(const User &I) {
3055 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3056 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3057 SDValue N = getValue(I.getOperand(0));
3058 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3059 I.getType());
3060 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3061}
3062
3063void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3064 // FPTrunc is never a no-op cast, no need to check
3065 SDValue N = getValue(I.getOperand(0));
3066 SDLoc dl = getCurSDLoc();
3067 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3068 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3069 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3070 DAG.getTargetConstant(
3071 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3072}
3073
3074void SelectionDAGBuilder::visitFPExt(const User &I) {
3075 // FPExt is never a no-op cast, no need to check
3076 SDValue N = getValue(I.getOperand(0));
3077 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3078 I.getType());
3079 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3080}
3081
3082void SelectionDAGBuilder::visitFPToUI(const User &I) {
3083 // FPToUI is never a no-op cast, no need to check
3084 SDValue N = getValue(I.getOperand(0));
3085 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3086 I.getType());
3087 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3088}
3089
3090void SelectionDAGBuilder::visitFPToSI(const User &I) {
3091 // FPToSI is never a no-op cast, no need to check
3092 SDValue N = getValue(I.getOperand(0));
3093 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3094 I.getType());
3095 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3096}
3097
3098void SelectionDAGBuilder::visitUIToFP(const User &I) {
3099 // UIToFP is never a no-op cast, no need to check
3100 SDValue N = getValue(I.getOperand(0));
3101 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3102 I.getType());
3103 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3104}
3105
3106void SelectionDAGBuilder::visitSIToFP(const User &I) {
3107 // SIToFP is never a no-op cast, no need to check
3108 SDValue N = getValue(I.getOperand(0));
3109 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3110 I.getType());
3111 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3112}
3113
3114void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3115 // What to do depends on the size of the integer and the size of the pointer.
3116 // We can either truncate, zero extend, or no-op, accordingly.
3117 SDValue N = getValue(I.getOperand(0));
3118 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3119 I.getType());
3120 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3121}
3122
3123void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3124 // What to do depends on the size of the integer and the size of the pointer.
3125 // We can either truncate, zero extend, or no-op, accordingly.
3126 SDValue N = getValue(I.getOperand(0));
3127 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3128 I.getType());
3129 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3130}
3131
3132void SelectionDAGBuilder::visitBitCast(const User &I) {
3133 SDValue N = getValue(I.getOperand(0));
3134 SDLoc dl = getCurSDLoc();
3135 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3136 I.getType());
3137
3138 // BitCast assures us that source and destination are the same size so this is
3139 // either a BITCAST or a no-op.
3140 if (DestVT != N.getValueType())
3141 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3142 DestVT, N)); // convert types.
3143 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3144 // might fold any kind of constant expression to an integer constant and that
3145 // is not what we are looking for. Only recognize a bitcast of a genuine
3146 // constant integer as an opaque constant.
3147 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3148 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3149 /*isOpaque*/true));
3150 else
3151 setValue(&I, N); // noop cast.
3152}
3153
3154void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3155 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3156 const Value *SV = I.getOperand(0);
3157 SDValue N = getValue(SV);
3158 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3159
3160 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3161 unsigned DestAS = I.getType()->getPointerAddressSpace();
3162
3163 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3164 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3165
3166 setValue(&I, N);
3167}
3168
3169void SelectionDAGBuilder::visitInsertElement(const User &I) {
3170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3171 SDValue InVec = getValue(I.getOperand(0));
3172 SDValue InVal = getValue(I.getOperand(1));
3173 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3174 TLI.getVectorIdxTy(DAG.getDataLayout()));
3175 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3176 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3177 InVec, InVal, InIdx));
3178}
3179
3180void SelectionDAGBuilder::visitExtractElement(const User &I) {
3181 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3182 SDValue InVec = getValue(I.getOperand(0));
3183 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3184 TLI.getVectorIdxTy(DAG.getDataLayout()));
3185 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3186 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3187 InVec, InIdx));
3188}
3189
3190void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3191 SDValue Src1 = getValue(I.getOperand(0));
3192 SDValue Src2 = getValue(I.getOperand(1));
3193 SDLoc DL = getCurSDLoc();
3194
3195 SmallVector<int, 8> Mask;
3196 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3197 unsigned MaskNumElts = Mask.size();
3198
3199 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3200 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3201 EVT SrcVT = Src1.getValueType();
3202 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3203
3204 if (SrcNumElts == MaskNumElts) {
3205 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3206 return;
3207 }
3208
3209 // Normalize the shuffle vector since mask and vector length don't match.
3210 if (SrcNumElts < MaskNumElts) {
3211 // Mask is longer than the source vectors. We can use concatenate vector to
3212 // make the mask and vectors lengths match.
3213
3214 if (MaskNumElts % SrcNumElts == 0) {
3215 // Mask length is a multiple of the source vector length.
3216 // Check if the shuffle is some kind of concatenation of the input
3217 // vectors.
3218 unsigned NumConcat = MaskNumElts / SrcNumElts;
3219 bool IsConcat = true;
3220 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3221 for (unsigned i = 0; i != MaskNumElts; ++i) {
3222 int Idx = Mask[i];
3223 if (Idx < 0)
3224 continue;
3225 // Ensure the indices in each SrcVT sized piece are sequential and that
3226 // the same source is used for the whole piece.
3227 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3228 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3229 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3230 IsConcat = false;
3231 break;
3232 }
3233 // Remember which source this index came from.
3234 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3235 }
3236
3237 // The shuffle is concatenating multiple vectors together. Just emit
3238 // a CONCAT_VECTORS operation.
3239 if (IsConcat) {
3240 SmallVector<SDValue, 8> ConcatOps;
3241 for (auto Src : ConcatSrcs) {
3242 if (Src < 0)
3243 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3244 else if (Src == 0)
3245 ConcatOps.push_back(Src1);
3246 else
3247 ConcatOps.push_back(Src2);
3248 }
3249 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3250 return;
3251 }
3252 }
3253
3254 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3255 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3256 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3257 PaddedMaskNumElts);
3258
3259 // Pad both vectors with undefs to make them the same length as the mask.
3260 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3261
3262 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3263 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3264 MOps1[0] = Src1;
3265 MOps2[0] = Src2;
3266
3267 Src1 = Src1.isUndef()
3268 ? DAG.getUNDEF(PaddedVT)
3269 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3270 Src2 = Src2.isUndef()
3271 ? DAG.getUNDEF(PaddedVT)
3272 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3273
3274 // Readjust mask for new input vector length.
3275 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3276 for (unsigned i = 0; i != MaskNumElts; ++i) {
3277 int Idx = Mask[i];
3278 if (Idx >= (int)SrcNumElts)
3279 Idx -= SrcNumElts - PaddedMaskNumElts;
3280 MappedOps[i] = Idx;
3281 }
3282
3283 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3284
3285 // If the concatenated vector was padded, extract a subvector with the
3286 // correct number of elements.
3287 if (MaskNumElts != PaddedMaskNumElts)
3288 Result = DAG.getNode(
3289 ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3290 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3291
3292 setValue(&I, Result);
3293 return;
3294 }
3295
3296 if (SrcNumElts > MaskNumElts) {
3297 // Analyze the access pattern of the vector to see if we can extract
3298 // two subvectors and do the shuffle.
3299 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3300 bool CanExtract = true;
3301 for (int Idx : Mask) {
3302 unsigned Input = 0;
3303 if (Idx < 0)
3304 continue;
3305
3306 if (Idx >= (int)SrcNumElts) {
3307 Input = 1;
3308 Idx -= SrcNumElts;
3309 }
3310
3311 // If all the indices come from the same MaskNumElts sized portion of
3312 // the sources we can use extract. Also make sure the extract wouldn't
3313 // extract past the end of the source.
3314 int NewStartIdx = alignDown(Idx, MaskNumElts);
3315 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3316 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3317 CanExtract = false;
3318 // Make sure we always update StartIdx as we use it to track if all
3319 // elements are undef.
3320 StartIdx[Input] = NewStartIdx;
3321 }
3322
3323 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3324 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3325 return;
3326 }
3327 if (CanExtract) {
3328 // Extract appropriate subvector and generate a vector shuffle
3329 for (unsigned Input = 0; Input < 2; ++Input) {
3330 SDValue &Src = Input == 0 ? Src1 : Src2;
3331 if (StartIdx[Input] < 0)
3332 Src = DAG.getUNDEF(VT);
3333 else {
3334 Src = DAG.getNode(
3335 ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3336 DAG.getConstant(StartIdx[Input], DL,
3337 TLI.getVectorIdxTy(DAG.getDataLayout())));
3338 }
3339 }
3340
3341 // Calculate new mask.
3342 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3343 for (int &Idx : MappedOps) {
3344 if (Idx >= (int)SrcNumElts)
3345 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3346 else if (Idx >= 0)
3347 Idx -= StartIdx[0];
3348 }
3349
3350 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3351 return;
3352 }
3353 }
3354
3355 // We can't use either concat vectors or extract subvectors so fall back to
3356 // replacing the shuffle with extract and build vector.
3357 // to insert and build vector.
3358 EVT EltVT = VT.getVectorElementType();
3359 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3360 SmallVector<SDValue,8> Ops;
3361 for (int Idx : Mask) {
3362 SDValue Res;
3363
3364 if (Idx < 0) {
3365 Res = DAG.getUNDEF(EltVT);
3366 } else {
3367 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3368 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3369
3370 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3371 EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3372 }
3373
3374 Ops.push_back(Res);
3375 }
3376
3377 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3378}
3379
3380void SelectionDAGBuilder::visitInsertValue(const User &I) {
3381 ArrayRef<unsigned> Indices;
3382 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3383 Indices = IV->getIndices();
3384 else
3385 Indices = cast<ConstantExpr>(&I)->getIndices();
3386
3387 const Value *Op0 = I.getOperand(0);
3388 const Value *Op1 = I.getOperand(1);
3389 Type *AggTy = I.getType();
3390 Type *ValTy = Op1->getType();
3391 bool IntoUndef = isa<UndefValue>(Op0);
3392 bool FromUndef = isa<UndefValue>(Op1);
3393
3394 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3395
3396 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3397 SmallVector<EVT, 4> AggValueVTs;
3398 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3399 SmallVector<EVT, 4> ValValueVTs;
3400 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3401
3402 unsigned NumAggValues = AggValueVTs.size();
3403 unsigned NumValValues = ValValueVTs.size();
3404 SmallVector<SDValue, 4> Values(NumAggValues);
3405
3406 // Ignore an insertvalue that produces an empty object
3407 if (!NumAggValues) {
3408 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3409 return;
3410 }
3411
3412 SDValue Agg = getValue(Op0);
3413 unsigned i = 0;
3414 // Copy the beginning value(s) from the original aggregate.
3415 for (; i != LinearIndex; ++i)
3416 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3417 SDValue(Agg.getNode(), Agg.getResNo() + i);
3418 // Copy values from the inserted value(s).
3419 if (NumValValues) {
3420 SDValue Val = getValue(Op1);
3421 for (; i != LinearIndex + NumValValues; ++i)
3422 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3423 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3424 }
3425 // Copy remaining value(s) from the original aggregate.
3426 for (; i != NumAggValues; ++i)
3427 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3428 SDValue(Agg.getNode(), Agg.getResNo() + i);
3429
3430 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3431 DAG.getVTList(AggValueVTs), Values));
3432}
3433
3434void SelectionDAGBuilder::visitExtractValue(const User &I) {
3435 ArrayRef<unsigned> Indices;
3436 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3437 Indices = EV->getIndices();
3438 else
3439 Indices = cast<ConstantExpr>(&I)->getIndices();
3440
3441 const Value *Op0 = I.getOperand(0);
3442 Type *AggTy = Op0->getType();
3443 Type *ValTy = I.getType();
3444 bool OutOfUndef = isa<UndefValue>(Op0);
3445
3446 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3447
3448 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3449 SmallVector<EVT, 4> ValValueVTs;
3450 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3451
3452 unsigned NumValValues = ValValueVTs.size();
3453
3454 // Ignore a extractvalue that produces an empty object
3455 if (!NumValValues) {
3456 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3457 return;
3458 }
3459
3460 SmallVector<SDValue, 4> Values(NumValValues);
3461
3462 SDValue Agg = getValue(Op0);
3463 // Copy out the selected value(s).
3464 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3465 Values[i - LinearIndex] =
3466 OutOfUndef ?
3467 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3468 SDValue(Agg.getNode(), Agg.getResNo() + i);
3469
3470 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3471 DAG.getVTList(ValValueVTs), Values));
3472}
3473
3474void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3475 Value *Op0 = I.getOperand(0);
3476 // Note that the pointer operand may be a vector of pointers. Take the scalar
3477 // element which holds a pointer.
3478 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3479 SDValue N = getValue(Op0);
3480 SDLoc dl = getCurSDLoc();
3481
3482 // Normalize Vector GEP - all scalar operands should be converted to the
3483 // splat vector.
3484 unsigned VectorWidth = I.getType()->isVectorTy() ?
3485 cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3486
3487 if (VectorWidth && !N.getValueType().isVector()) {
3488 LLVMContext &Context = *DAG.getContext();
3489 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3490 N = DAG.getSplatBuildVector(VT, dl, N);
3491 }
3492
3493 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3494 GTI != E; ++GTI) {
3495 const Value *Idx = GTI.getOperand();
3496 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3497 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3498 if (Field) {
3499 // N = N + Offset
3500 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3501
3502 // In an inbounds GEP with an offset that is nonnegative even when
3503 // interpreted as signed, assume there is no unsigned overflow.
3504 SDNodeFlags Flags;
3505 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3506 Flags.setNoUnsignedWrap(true);
3507
3508 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3509 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3510 }
3511 } else {
3512 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3513 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3514 APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3515
3516 // If this is a scalar constant or a splat vector of constants,
3517 // handle it quickly.
3518 const auto *CI = dyn_cast<ConstantInt>(Idx);
3519 if (!CI && isa<ConstantDataVector>(Idx) &&
3520 cast<ConstantDataVector>(Idx)->getSplatValue())
3521 CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3522
3523 if (CI) {
3524 if (CI->isZero())
3525 continue;
3526 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3527 LLVMContext &Context = *DAG.getContext();
3528 SDValue OffsVal = VectorWidth ?
3529 DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3530 DAG.getConstant(Offs, dl, IdxTy);
3531
3532 // In an inbouds GEP with an offset that is nonnegative even when
3533 // interpreted as signed, assume there is no unsigned overflow.
3534 SDNodeFlags Flags;
3535 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3536 Flags.setNoUnsignedWrap(true);
3537
3538 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3539 continue;
3540 }
3541
3542 // N = N + Idx * ElementSize;
3543 SDValue IdxN = getValue(Idx);
3544
3545 if (!IdxN.getValueType().isVector() && VectorWidth) {
3546 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3547 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3548 }
3549
3550 // If the index is smaller or larger than intptr_t, truncate or extend
3551 // it.
3552 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3553
3554 // If this is a multiply by a power of two, turn it into a shl
3555 // immediately. This is a very common case.
3556 if (ElementSize != 1) {
3557 if (ElementSize.isPowerOf2()) {
3558 unsigned Amt = ElementSize.logBase2();
3559 IdxN = DAG.getNode(ISD::SHL, dl,
3560 N.getValueType(), IdxN,
3561 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3562 } else {
3563 SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3564 IdxN = DAG.getNode(ISD::MUL, dl,
3565 N.getValueType(), IdxN, Scale);
3566 }
3567 }
3568
3569 N = DAG.getNode(ISD::ADD, dl,
3570 N.getValueType(), N, IdxN);
3571 }
3572 }
3573
3574 setValue(&I, N);
3575}
3576
3577void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3578 // If this is a fixed sized alloca in the entry block of the function,
3579 // allocate it statically on the stack.
3580 if (FuncInfo.StaticAllocaMap.count(&I))
3581 return; // getValue will auto-populate this.
3582
3583 SDLoc dl = getCurSDLoc();
3584 Type *Ty = I.getAllocatedType();
3585 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3586 auto &DL = DAG.getDataLayout();
3587 uint64_t TySize = DL.getTypeAllocSize(Ty);
3588 unsigned Align =
3589 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3590
3591 SDValue AllocSize = getValue(I.getArraySize());
3592
3593 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3594 if (AllocSize.getValueType() != IntPtr)
3595 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3596
3597 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3598 AllocSize,
3599 DAG.getConstant(TySize, dl, IntPtr));
3600
3601 // Handle alignment. If the requested alignment is less than or equal to
3602 // the stack alignment, ignore it. If the size is greater than or equal to
3603 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3604 unsigned StackAlign =
3605 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3606 if (Align <= StackAlign)
3607 Align = 0;
3608
3609 // Round the size of the allocation up to the stack alignment size
3610 // by add SA-1 to the size. This doesn't overflow because we're computing
3611 // an address inside an alloca.
3612 SDNodeFlags Flags;
3613 Flags.setNoUnsignedWrap(true);
3614 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3615 DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
3616
3617 // Mask out the low bits for alignment purposes.
3618 AllocSize =
3619 DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3620 DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3621
3622 SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
3623 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3624 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3625 setValue(&I, DSA);
3626 DAG.setRoot(DSA.getValue(1));
3627
3628 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects())((FuncInfo.MF->getFrameInfo().hasVarSizedObjects()) ? static_cast
<void> (0) : __assert_fail ("FuncInfo.MF->getFrameInfo().hasVarSizedObjects()"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3628, __PRETTY_FUNCTION__))
;
3629}
3630
3631void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3632 if (I.isAtomic())
3633 return visitAtomicLoad(I);
3634
3635 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3636 const Value *SV = I.getOperand(0);
3637 if (TLI.supportSwiftError()) {
3638 // Swifterror values can come from either a function parameter with
3639 // swifterror attribute or an alloca with swifterror attribute.
3640 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3641 if (Arg->hasSwiftErrorAttr())
3642 return visitLoadFromSwiftError(I);
3643 }
3644
3645 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3646 if (Alloca->isSwiftError())
3647 return visitLoadFromSwiftError(I);
3648 }
3649 }
3650
3651 SDValue Ptr = getValue(SV);
3652
3653 Type *Ty = I.getType();
3654
3655 bool isVolatile = I.isVolatile();
3656 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3657 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3658 bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3659 unsigned Alignment = I.getAlignment();
3660
3661 AAMDNodes AAInfo;
3662 I.getAAMetadata(AAInfo);
3663 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3664
3665 SmallVector<EVT, 4> ValueVTs;
3666 SmallVector<uint64_t, 4> Offsets;
3667 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3668 unsigned NumValues = ValueVTs.size();
3669 if (NumValues == 0)
3670 return;
3671
3672 SDValue Root;
3673 bool ConstantMemory = false;
3674 if (isVolatile || NumValues > MaxParallelChains)
3675 // Serialize volatile loads with other side effects.
3676 Root = getRoot();
3677 else if (AA && AA->pointsToConstantMemory(MemoryLocation(
3678 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3679 // Do not serialize (non-volatile) loads of constant memory with anything.
3680 Root = DAG.getEntryNode();
3681 ConstantMemory = true;
3682 } else {
3683 // Do not serialize non-volatile loads against each other.
3684 Root = DAG.getRoot();
3685 }
3686
3687 SDLoc dl = getCurSDLoc();
3688
3689 if (isVolatile)
3690 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3691
3692 // An aggregate load cannot wrap around the address space, so offsets to its
3693 // parts don't wrap either.
3694 SDNodeFlags Flags;
3695 Flags.setNoUnsignedWrap(true);
3696
3697 SmallVector<SDValue, 4> Values(NumValues);
3698 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3699 EVT PtrVT = Ptr.getValueType();
3700 unsigned ChainI = 0;
3701 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3702 // Serializing loads here may result in excessive register pressure, and
3703 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3704 // could recover a bit by hoisting nodes upward in the chain by recognizing
3705 // they are side-effect free or do not alias. The optimizer should really
3706 // avoid this case by converting large object/array copies to llvm.memcpy
3707 // (MaxParallelChains should always remain as failsafe).
3708 if (ChainI == MaxParallelChains) {
3709 assert(PendingLoads.empty() && "PendingLoads must be serialized first")((PendingLoads.empty() && "PendingLoads must be serialized first"
) ? static_cast<void> (0) : __assert_fail ("PendingLoads.empty() && \"PendingLoads must be serialized first\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3709, __PRETTY_FUNCTION__))
;
3710 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3711 makeArrayRef(Chains.data(), ChainI));
3712 Root = Chain;
3713 ChainI = 0;
3714 }
3715 SDValue A = DAG.getNode(ISD::ADD, dl,
3716 PtrVT, Ptr,
3717 DAG.getConstant(Offsets[i], dl, PtrVT),
3718 Flags);
3719 auto MMOFlags = MachineMemOperand::MONone;
3720 if (isVolatile)
3721 MMOFlags |= MachineMemOperand::MOVolatile;
3722 if (isNonTemporal)
3723 MMOFlags |= MachineMemOperand::MONonTemporal;
3724 if (isInvariant)
3725 MMOFlags |= MachineMemOperand::MOInvariant;
3726 if (isDereferenceable)
3727 MMOFlags |= MachineMemOperand::MODereferenceable;
3728 MMOFlags |= TLI.getMMOFlags(I);
3729
3730 SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3731 MachinePointerInfo(SV, Offsets[i]), Alignment,
3732 MMOFlags, AAInfo, Ranges);
3733
3734 Values[i] = L;
3735 Chains[ChainI] = L.getValue(1);
3736 }
3737
3738 if (!ConstantMemory) {
3739 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3740 makeArrayRef(Chains.data(), ChainI));
3741 if (isVolatile)
3742 DAG.setRoot(Chain);
3743 else
3744 PendingLoads.push_back(Chain);
3745 }
3746
3747 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3748 DAG.getVTList(ValueVTs), Values));
3749}
3750
3751void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3752 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitStoreToSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitStoreToSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3753, __PRETTY_FUNCTION__))
3753 "call visitStoreToSwiftError when backend supports swifterror")((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitStoreToSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitStoreToSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3753, __PRETTY_FUNCTION__))
;
3754
3755 SmallVector<EVT, 4> ValueVTs;
3756 SmallVector<uint64_t, 4> Offsets;
3757 const Value *SrcV = I.getOperand(0);
3758 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3759 SrcV->getType(), ValueVTs, &Offsets);
3760 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3761, __PRETTY_FUNCTION__))
3761 "expect a single EVT for swifterror")((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3761, __PRETTY_FUNCTION__))
;
3762
3763 SDValue Src = getValue(SrcV);
3764 // Create a virtual register, then update the virtual register.
3765 unsigned VReg; bool CreatedVReg;
3766 std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
3767 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3768 // Chain can be getRoot or getControlRoot.
3769 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3770 SDValue(Src.getNode(), Src.getResNo()));
3771 DAG.setRoot(CopyNode);
3772 if (CreatedVReg)
3773 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3774}
3775
3776void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3777 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitLoadFromSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitLoadFromSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3778, __PRETTY_FUNCTION__))
3778 "call visitLoadFromSwiftError when backend supports swifterror")((DAG.getTargetLoweringInfo().supportSwiftError() && "call visitLoadFromSwiftError when backend supports swifterror"
) ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().supportSwiftError() && \"call visitLoadFromSwiftError when backend supports swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3778, __PRETTY_FUNCTION__))
;
3779
3780 assert(!I.isVolatile() &&((!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal
) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load
) == nullptr && "Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load) == nullptr && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3783, __PRETTY_FUNCTION__))
3781 I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&((!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal
) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load
) == nullptr && "Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load) == nullptr && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3783, __PRETTY_FUNCTION__))
3782 I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&((!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal
) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load
) == nullptr && "Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load) == nullptr && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3783, __PRETTY_FUNCTION__))
3783 "Support volatile, non temporal, invariant for load_from_swift_error")((!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal
) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load
) == nullptr && "Support volatile, non temporal, invariant for load_from_swift_error"
) ? static_cast<void> (0) : __assert_fail ("!I.isVolatile() && I.getMetadata(LLVMContext::MD_nontemporal) == nullptr && I.getMetadata(LLVMContext::MD_invariant_load) == nullptr && \"Support volatile, non temporal, invariant for load_from_swift_error\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3783, __PRETTY_FUNCTION__))
;
3784
3785 const Value *SV = I.getOperand(0);
3786 Type *Ty = I.getType();
3787 AAMDNodes AAInfo;
3788 I.getAAMetadata(AAInfo);
3789 assert((!AA || !AA->pointsToConstantMemory(MemoryLocation((((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG
.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3791, __PRETTY_FUNCTION__))
3790 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) &&(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG
.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3791, __PRETTY_FUNCTION__))
3791 "load_from_swift_error should not be constant memory")(((!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG
.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && "load_from_swift_error should not be constant memory"
) ? static_cast<void> (0) : __assert_fail ("(!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && \"load_from_swift_error should not be constant memory\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3791, __PRETTY_FUNCTION__))
;
3792
3793 SmallVector<EVT, 4> ValueVTs;
3794 SmallVector<uint64_t, 4> Offsets;
3795 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3796 ValueVTs, &Offsets);
3797 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3798, __PRETTY_FUNCTION__))
3798 "expect a single EVT for swifterror")((ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"
) ? static_cast<void> (0) : __assert_fail ("ValueVTs.size() == 1 && Offsets[0] == 0 && \"expect a single EVT for swifterror\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3798, __PRETTY_FUNCTION__))
;
3799
3800 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3801 SDValue L = DAG.getCopyFromReg(
3802 getRoot(), getCurSDLoc(),
3803 FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
3804 ValueVTs[0]);
3805
3806 setValue(&I, L);
3807}
3808
3809void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3810 if (I.isAtomic())
3811 return visitAtomicStore(I);
3812
3813 const Value *SrcV = I.getOperand(0);
3814 const Value *PtrV = I.getOperand(1);
3815
3816 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3817 if (TLI.supportSwiftError()) {
3818 // Swifterror values can come from either a function parameter with
3819 // swifterror attribute or an alloca with swifterror attribute.
3820 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3821 if (Arg->hasSwiftErrorAttr())
3822 return visitStoreToSwiftError(I);
3823 }
3824
3825 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3826 if (Alloca->isSwiftError())
3827 return visitStoreToSwiftError(I);
3828 }
3829 }
3830
3831 SmallVector<EVT, 4> ValueVTs;
3832 SmallVector<uint64_t, 4> Offsets;
3833 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3834 SrcV->getType(), ValueVTs, &Offsets);
3835 unsigned NumValues = ValueVTs.size();
3836 if (NumValues == 0)
3837 return;
3838
3839 // Get the lowered operands. Note that we do this after
3840 // checking if NumResults is zero, because with zero results
3841 // the operands won't have values in the map.
3842 SDValue Src = getValue(SrcV);
3843 SDValue Ptr = getValue(PtrV);
3844
3845 SDValue Root = getRoot();
3846 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3847 SDLoc dl = getCurSDLoc();
3848 EVT PtrVT = Ptr.getValueType();
3849 unsigned Alignment = I.getAlignment();
3850 AAMDNodes AAInfo;
3851 I.getAAMetadata(AAInfo);
3852
3853 auto MMOFlags = MachineMemOperand::MONone;
3854 if (I.isVolatile())
3855 MMOFlags |= MachineMemOperand::MOVolatile;
3856 if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3857 MMOFlags |= MachineMemOperand::MONonTemporal;
3858 MMOFlags |= TLI.getMMOFlags(I);
3859
3860 // An aggregate load cannot wrap around the address space, so offsets to its
3861 // parts don't wrap either.
3862 SDNodeFlags Flags;
3863 Flags.setNoUnsignedWrap(true);
3864
3865 unsigned ChainI = 0;
3866 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3867 // See visitLoad comments.
3868 if (ChainI == MaxParallelChains) {
3869 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3870 makeArrayRef(Chains.data(), ChainI));
3871 Root = Chain;
3872 ChainI = 0;
3873 }
3874 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3875 DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
3876 SDValue St = DAG.getStore(
3877 Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3878 MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3879 Chains[ChainI] = St;
3880 }
3881
3882 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3883 makeArrayRef(Chains.data(), ChainI));
3884 DAG.setRoot(StoreNode);
3885}
3886
3887void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3888 bool IsCompressing) {
3889 SDLoc sdl = getCurSDLoc();
3890
3891 auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3892 unsigned& Alignment) {
3893 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3894 Src0 = I.getArgOperand(0);
3895 Ptr = I.getArgOperand(1);
3896 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3897 Mask = I.getArgOperand(3);
3898 };
3899 auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3900 unsigned& Alignment) {
3901 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3902 Src0 = I.getArgOperand(0);
3903 Ptr = I.getArgOperand(1);
3904 Mask = I.getArgOperand(2);
3905 Alignment = 0;
3906 };
3907
3908 Value *PtrOperand, *MaskOperand, *Src0Operand;
3909 unsigned Alignment;
3910 if (IsCompressing)
3911 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3912 else
3913 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3914
3915 SDValue Ptr = getValue(PtrOperand);
3916 SDValue Src0 = getValue(Src0Operand);
3917 SDValue Mask = getValue(MaskOperand);
3918
3919 EVT VT = Src0.getValueType();
3920 if (!Alignment)
3921 Alignment = DAG.getEVTAlignment(VT);
3922
3923 AAMDNodes AAInfo;
3924 I.getAAMetadata(AAInfo);
3925
3926 MachineMemOperand *MMO =
3927 DAG.getMachineFunction().
3928 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3929 MachineMemOperand::MOStore, VT.getStoreSize(),
3930 Alignment, AAInfo);
3931 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3932 MMO, false /* Truncating */,
3933 IsCompressing);
3934 DAG.setRoot(StoreNode);
3935 setValue(&I, StoreNode);
3936}
3937
3938// Get a uniform base for the Gather/Scatter intrinsic.
3939// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3940// We try to represent it as a base pointer + vector of indices.
3941// Usually, the vector of pointers comes from a 'getelementptr' instruction.
3942// The first operand of the GEP may be a single pointer or a vector of pointers
3943// Example:
3944// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3945// or
3946// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
3947// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3948//
3949// When the first GEP operand is a single pointer - it is the uniform base we
3950// are looking for. If first operand of the GEP is a splat vector - we
3951// extract the splat value and use it as a uniform base.
3952// In all other cases the function returns 'false'.
3953static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3954 SDValue &Scale, SelectionDAGBuilder* SDB) {
3955 SelectionDAG& DAG = SDB->DAG;
3956 LLVMContext &Context = *DAG.getContext();
3957
3958 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type")((Ptr->getType()->isVectorTy() && "Uexpected pointer type"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isVectorTy() && \"Uexpected pointer type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 3958, __PRETTY_FUNCTION__))
;
3959 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3960 if (!GEP)
3961 return false;
3962
3963 const Value *GEPPtr = GEP->getPointerOperand();
3964 if (!GEPPtr->getType()->isVectorTy())
3965 Ptr = GEPPtr;
3966 else if (!(Ptr = getSplatValue(GEPPtr)))
3967 return false;
3968
3969 unsigned FinalIndex = GEP->getNumOperands() - 1;
3970 Value *IndexVal = GEP->getOperand(FinalIndex);
3971
3972 // Ensure all the other indices are 0.
3973 for (unsigned i = 1; i < FinalIndex; ++i) {
3974 auto *C = dyn_cast<ConstantInt>(GEP->getOperand(i));
3975 if (!C || !C->isZero())
3976 return false;
3977 }
3978
3979 // The operands of the GEP may be defined in another basic block.
3980 // In this case we'll not find nodes for the operands.
3981 if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3982 return false;
3983
3984 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3985 const DataLayout &DL = DAG.getDataLayout();
3986 Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),
3987 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
3988 Base = SDB->getValue(Ptr);
3989 Index = SDB->getValue(IndexVal);
3990
3991 if (!Index.getValueType().isVector()) {
3992 unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3993 EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3994 Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3995 }
3996 return true;
3997}
3998
3999void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4000 SDLoc sdl = getCurSDLoc();
4001
4002 // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
4003 const Value *Ptr = I.getArgOperand(1);
4004 SDValue Src0 = getValue(I.getArgOperand(0));
4005 SDValue Mask = getValue(I.getArgOperand(3));
4006 EVT VT = Src0.getValueType();
4007 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4008 if (!Alignment)
4009 Alignment = DAG.getEVTAlignment(VT);
4010 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4011
4012 AAMDNodes AAInfo;
4013 I.getAAMetadata(AAInfo);
4014
4015 SDValue Base;
4016 SDValue Index;
4017 SDValue Scale;
4018 const Value *BasePtr = Ptr;
4019 bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4020
4021 const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4022 MachineMemOperand *MMO = DAG.getMachineFunction().
4023 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4024 MachineMemOperand::MOStore, VT.getStoreSize(),
4025 Alignment, AAInfo);
4026 if (!UniformBase) {
4027 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4028 Index = getValue(Ptr);
4029 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4030 }
4031 SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
4032 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4033 Ops, MMO);
4034 DAG.setRoot(Scatter);
4035 setValue(&I, Scatter);
4036}
4037
4038void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4039 SDLoc sdl = getCurSDLoc();
4040
4041 auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4042 unsigned& Alignment) {
4043 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4044 Ptr = I.getArgOperand(0);
4045 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4046 Mask = I.getArgOperand(2);
4047 Src0 = I.getArgOperand(3);
4048 };
4049 auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4050 unsigned& Alignment) {
4051 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4052 Ptr = I.getArgOperand(0);
4053 Alignment = 0;
4054 Mask = I.getArgOperand(1);
4055 Src0 = I.getArgOperand(2);
4056 };
4057
4058 Value *PtrOperand, *MaskOperand, *Src0Operand;
4059 unsigned Alignment;
4060 if (IsExpanding)
4061 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4062 else
4063 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4064
4065 SDValue Ptr = getValue(PtrOperand);
4066 SDValue Src0 = getValue(Src0Operand);
4067 SDValue Mask = getValue(MaskOperand);
4068
4069 EVT VT = Src0.getValueType();
4070 if (!Alignment)
4071 Alignment = DAG.getEVTAlignment(VT);
4072
4073 AAMDNodes AAInfo;
4074 I.getAAMetadata(AAInfo);
4075 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4076
4077 // Do not serialize masked loads of constant memory with anything.
4078 bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation(
4079 PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
4080 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4081
4082 MachineMemOperand *MMO =
4083 DAG.getMachineFunction().
4084 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4085 MachineMemOperand::MOLoad, VT.getStoreSize(),
4086 Alignment, AAInfo, Ranges);
4087
4088 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
4089 ISD::NON_EXTLOAD, IsExpanding);
4090 if (AddToChain)
4091 PendingLoads.push_back(Load.getValue(1));
4092 setValue(&I, Load);
4093}
4094
4095void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4096 SDLoc sdl = getCurSDLoc();
4097
4098 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4099 const Value *Ptr = I.getArgOperand(0);
4100 SDValue Src0 = getValue(I.getArgOperand(3));
4101 SDValue Mask = getValue(I.getArgOperand(2));
4102
4103 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4104 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4105 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4106 if (!Alignment)
4107 Alignment = DAG.getEVTAlignment(VT);
4108
4109 AAMDNodes AAInfo;
4110 I.getAAMetadata(AAInfo);
4111 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4112
4113 SDValue Root = DAG.getRoot();
4114 SDValue Base;
4115 SDValue Index;
4116 SDValue Scale;
4117 const Value *BasePtr = Ptr;
4118 bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4119 bool ConstantMemory = false;
4120 if (UniformBase &&
4121 AA && AA->pointsToConstantMemory(MemoryLocation(
4122 BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
4123 AAInfo))) {
4124 // Do not serialize (non-volatile) loads of constant memory with anything.
4125 Root = DAG.getEntryNode();
4126 ConstantMemory = true;
4127 }
4128
4129 MachineMemOperand *MMO =
4130 DAG.getMachineFunction().
4131 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4132 MachineMemOperand::MOLoad, VT.getStoreSize(),
4133 Alignment, AAInfo, Ranges);
4134
4135 if (!UniformBase) {
4136 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4137 Index = getValue(Ptr);
4138 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4139 }
4140 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4141 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4142 Ops, MMO);
4143
4144 SDValue OutChain = Gather.getValue(1);
4145 if (!ConstantMemory)
4146 PendingLoads.push_back(OutChain);
4147 setValue(&I, Gather);
4148}
4149
4150void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4151 SDLoc dl = getCurSDLoc();
4152 AtomicOrdering SuccessOrder = I.getSuccessOrdering();
4153 AtomicOrdering FailureOrder = I.getFailureOrdering();
4154 SyncScope::ID SSID = I.getSyncScopeID();
4155
4156 SDValue InChain = getRoot();
4157
4158 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4159 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4160 SDValue L = DAG.getAtomicCmpSwap(
4161 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
4162 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
4163 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
4164 /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
4165
4166 SDValue OutChain = L.getValue(2);
4167
4168 setValue(&I, L);
4169 DAG.setRoot(OutChain);
4170}
4171
4172void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4173 SDLoc dl = getCurSDLoc();
4174 ISD::NodeType NT;
4175 switch (I.getOperation()) {
4176 default: llvm_unreachable("Unknown atomicrmw operation")::llvm::llvm_unreachable_internal("Unknown atomicrmw operation"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4176)
;
4177 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4178 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4179 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4180 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4181 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4182 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4183 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4184 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4185 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4186 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4187 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4188 }
4189 AtomicOrdering Order = I.getOrdering();
4190 SyncScope::ID SSID = I.getSyncScopeID();
4191
4192 SDValue InChain = getRoot();
4193
4194 SDValue L =
4195 DAG.getAtomic(NT, dl,
4196 getValue(I.getValOperand()).getSimpleValueType(),
4197 InChain,
4198 getValue(I.getPointerOperand()),
4199 getValue(I.getValOperand()),
4200 I.getPointerOperand(),
4201 /* Alignment=*/ 0, Order, SSID);
4202
4203 SDValue OutChain = L.getValue(1);
4204
4205 setValue(&I, L);
4206 DAG.setRoot(OutChain);
4207}
4208
4209void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4210 SDLoc dl = getCurSDLoc();
4211 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4212 SDValue Ops[3];
4213 Ops[0] = getRoot();
4214 Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
4215 TLI.getFenceOperandTy(DAG.getDataLayout()));
4216 Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
4217 TLI.getFenceOperandTy(DAG.getDataLayout()));
4218 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4219}
4220
4221void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4222 SDLoc dl = getCurSDLoc();
4223 AtomicOrdering Order = I.getOrdering();
4224 SyncScope::ID SSID = I.getSyncScopeID();
4225
4226 SDValue InChain = getRoot();
4227
4228 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4229 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4230
4231 if (!TLI.supportsUnalignedAtomics() &&
4232 I.getAlignment() < VT.getStoreSize())
4233 report_fatal_error("Cannot generate unaligned atomic load");
4234
4235 MachineMemOperand *MMO =
4236 DAG.getMachineFunction().
4237 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4238 MachineMemOperand::MOVolatile |
4239 MachineMemOperand::MOLoad,
4240 VT.getStoreSize(),
4241 I.getAlignment() ? I.getAlignment() :
4242 DAG.getEVTAlignment(VT),
4243 AAMDNodes(), nullptr, SSID, Order);
4244
4245 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4246 SDValue L =
4247 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4248 getValue(I.getPointerOperand()), MMO);
4249
4250 SDValue OutChain = L.getValue(1);
4251
4252 setValue(&I, L);
4253 DAG.setRoot(OutChain);
4254}
4255
4256void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4257 SDLoc dl = getCurSDLoc();
4258
4259 AtomicOrdering Order = I.getOrdering();
4260 SyncScope::ID SSID = I.getSyncScopeID();
4261
4262 SDValue InChain = getRoot();
4263
4264 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4265 EVT VT =
4266 TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4267
4268 if (I.getAlignment() < VT.getStoreSize())
4269 report_fatal_error("Cannot generate unaligned atomic store");
4270
4271 SDValue OutChain =
4272 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4273 InChain,
4274 getValue(I.getPointerOperand()),
4275 getValue(I.getValueOperand()),
4276 I.getPointerOperand(), I.getAlignment(),
4277 Order, SSID);
4278
4279 DAG.setRoot(OutChain);
4280}
4281
4282/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4283/// node.
4284void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4285 unsigned Intrinsic) {
4286 // Ignore the callsite's attributes. A specific call site may be marked with
4287 // readnone, but the lowering code will expect the chain based on the
4288 // definition.
4289 const Function *F = I.getCalledFunction();
4290 bool HasChain = !F->doesNotAccessMemory();
4291 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4292
4293 // Build the operand list.
4294 SmallVector<SDValue, 8> Ops;
4295 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4296 if (OnlyLoad) {
4297 // We don't need to serialize loads against other loads.
4298 Ops.push_back(DAG.getRoot());
4299 } else {
4300 Ops.push_back(getRoot());
4301 }
4302 }
4303
4304 // Info is set by getTgtMemInstrinsic
4305 TargetLowering::IntrinsicInfo Info;
4306 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4307 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4308 DAG.getMachineFunction(),
4309 Intrinsic);
4310
4311 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4312 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4313 Info.opc == ISD::INTRINSIC_W_CHAIN)
4314 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4315 TLI.getPointerTy(DAG.getDataLayout())));
4316
4317 // Add all operands of the call to the operand list.
4318 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4319 SDValue Op = getValue(I.getArgOperand(i));
4320 Ops.push_back(Op);
4321 }
4322
4323 SmallVector<EVT, 4> ValueVTs;
4324 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4325
4326 if (HasChain)
4327 ValueVTs.push_back(MVT::Other);
4328
4329 SDVTList VTs = DAG.getVTList(ValueVTs);
4330
4331 // Create the node.
4332 SDValue Result;
4333 if (IsTgtIntrinsic) {
4334 // This is target intrinsic that touches memory
4335 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs,
4336 Ops, Info.memVT,
4337 MachinePointerInfo(Info.ptrVal, Info.offset), Info.align,
4338 Info.flags, Info.size);
4339 } else if (!HasChain) {
4340 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4341 } else if (!I.getType()->isVoidTy()) {
4342 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4343 } else {
4344 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4345 }
4346
4347 if (HasChain) {
4348 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4349 if (OnlyLoad)
4350 PendingLoads.push_back(Chain);
4351 else
4352 DAG.setRoot(Chain);
4353 }
4354
4355 if (!I.getType()->isVoidTy()) {
4356 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4357 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4358 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4359 } else
4360 Result = lowerRangeToAssertZExt(DAG, I, Result);
4361
4362 setValue(&I, Result);
4363 }
4364}
4365
4366/// GetSignificand - Get the significand and build it into a floating-point
4367/// number with exponent of 1:
4368///
4369/// Op = (Op & 0x007fffff) | 0x3f800000;
4370///
4371/// where Op is the hexadecimal representation of floating point value.
4372static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4373 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4374 DAG.getConstant(0x007fffff, dl, MVT::i32));
4375 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4376 DAG.getConstant(0x3f800000, dl, MVT::i32));
4377 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4378}
4379
4380/// GetExponent - Get the exponent:
4381///
4382/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4383///
4384/// where Op is the hexadecimal representation of floating point value.
4385static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4386 const TargetLowering &TLI, const SDLoc &dl) {
4387 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4388 DAG.getConstant(0x7f800000, dl, MVT::i32));
4389 SDValue t1 = DAG.getNode(
4390 ISD::SRL, dl, MVT::i32, t0,
4391 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4392 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4393 DAG.getConstant(127, dl, MVT::i32));
4394 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4395}
4396
4397/// getF32Constant - Get 32-bit floating point constant.
4398static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4399 const SDLoc &dl) {
4400 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4401 MVT::f32);
4402}
4403
4404static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4405 SelectionDAG &DAG) {
4406 // TODO: What fast-math-flags should be set on the floating-point nodes?
4407
4408 // IntegerPartOfX = ((int32_t)(t0);
4409 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4410
4411 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4412 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4413 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4414
4415 // IntegerPartOfX <<= 23;
4416 IntegerPartOfX = DAG.getNode(
4417 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4418 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4419 DAG.getDataLayout())));
4420
4421 SDValue TwoToFractionalPartOfX;
4422 if (LimitFloatPrecision <= 6) {
4423 // For floating-point precision of 6:
4424 //
4425 // TwoToFractionalPartOfX =
4426 // 0.997535578f +
4427 // (0.735607626f + 0.252464424f * x) * x;
4428 //
4429 // error 0.0144103317, which is 6 bits
4430 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4431 getF32Constant(DAG, 0x3e814304, dl));
4432 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4433 getF32Constant(DAG, 0x3f3c50c8, dl));
4434 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4435 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4436 getF32Constant(DAG, 0x3f7f5e7e, dl));
4437 } else if (LimitFloatPrecision <= 12) {
4438 // For floating-point precision of 12:
4439 //
4440 // TwoToFractionalPartOfX =
4441 // 0.999892986f +
4442 // (0.696457318f +
4443 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4444 //
4445 // error 0.000107046256, which is 13 to 14 bits
4446 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4447 getF32Constant(DAG, 0x3da235e3, dl));
4448 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4449 getF32Constant(DAG, 0x3e65b8f3, dl));
4450 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4451 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4452 getF32Constant(DAG, 0x3f324b07, dl));
4453 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4454 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4455 getF32Constant(DAG, 0x3f7ff8fd, dl));
4456 } else { // LimitFloatPrecision <= 18
4457 // For floating-point precision of 18:
4458 //
4459 // TwoToFractionalPartOfX =
4460 // 0.999999982f +
4461 // (0.693148872f +
4462 // (0.240227044f +
4463 // (0.554906021e-1f +
4464 // (0.961591928e-2f +
4465 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4466 // error 2.47208000*10^(-7), which is better than 18 bits
4467 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4468 getF32Constant(DAG, 0x3924b03e, dl));
4469 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4470 getF32Constant(DAG, 0x3ab24b87, dl));
4471 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4472 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4473 getF32Constant(DAG, 0x3c1d8c17, dl));
4474 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4475 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4476 getF32Constant(DAG, 0x3d634a1d, dl));
4477 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4478 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4479 getF32Constant(DAG, 0x3e75fe14, dl));
4480 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4481 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4482 getF32Constant(DAG, 0x3f317234, dl));
4483 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4484 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4485 getF32Constant(DAG, 0x3f800000, dl));
4486 }
4487
4488 // Add the exponent into the result in integer domain.
4489 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4490 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4491 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4492}
4493
4494/// expandExp - Lower an exp intrinsic. Handles the special sequences for
4495/// limited-precision mode.
4496static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4497 const TargetLowering &TLI) {
4498 if (Op.getValueType() == MVT::f32 &&
4499 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4500
4501 // Put the exponent in the right bit position for later addition to the
4502 // final result:
4503 //
4504 // #define LOG2OFe 1.4426950f
4505 // t0 = Op * LOG2OFe
4506
4507 // TODO: What fast-math-flags should be set here?
4508 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4509 getF32Constant(DAG, 0x3fb8aa3b, dl));
4510 return getLimitedPrecisionExp2(t0, dl, DAG);
4511 }
4512
4513 // No special expansion.
4514 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4515}
4516
4517/// expandLog - Lower a log intrinsic. Handles the special sequences for
4518/// limited-precision mode.
4519static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4520 const TargetLowering &TLI) {
4521 // TODO: What fast-math-flags should be set on the floating-point nodes?
4522
4523 if (Op.getValueType() == MVT::f32 &&
4524 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4525 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4526
4527 // Scale the exponent by log(2) [0.69314718f].
4528 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4529 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4530 getF32Constant(DAG, 0x3f317218, dl));
4531
4532 // Get the significand and build it into a floating-point number with
4533 // exponent of 1.
4534 SDValue X = GetSignificand(DAG, Op1, dl);
4535
4536 SDValue LogOfMantissa;
4537 if (LimitFloatPrecision <= 6) {
4538 // For floating-point precision of 6:
4539 //
4540 // LogofMantissa =
4541 // -1.1609546f +
4542 // (1.4034025f - 0.23903021f * x) * x;
4543 //
4544 // error 0.0034276066, which is better than 8 bits
4545 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4546 getF32Constant(DAG, 0xbe74c456, dl));
4547 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4548 getF32Constant(DAG, 0x3fb3a2b1, dl));
4549 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4550 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4551 getF32Constant(DAG, 0x3f949a29, dl));
4552 } else if (LimitFloatPrecision <= 12) {
4553 // For floating-point precision of 12:
4554 //
4555 // LogOfMantissa =
4556 // -1.7417939f +
4557 // (2.8212026f +
4558 // (-1.4699568f +
4559 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4560 //
4561 // error 0.000061011436, which is 14 bits
4562 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4563 getF32Constant(DAG, 0xbd67b6d6, dl));
4564 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4565 getF32Constant(DAG, 0x3ee4f4b8, dl));
4566 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4567 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4568 getF32Constant(DAG, 0x3fbc278b, dl));
4569 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4570 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4571 getF32Constant(DAG, 0x40348e95, dl));
4572 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4573 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4574 getF32Constant(DAG, 0x3fdef31a, dl));
4575 } else { // LimitFloatPrecision <= 18
4576 // For floating-point precision of 18:
4577 //
4578 // LogOfMantissa =
4579 // -2.1072184f +
4580 // (4.2372794f +
4581 // (-3.7029485f +
4582 // (2.2781945f +
4583 // (-0.87823314f +
4584 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4585 //
4586 // error 0.0000023660568, which is better than 18 bits
4587 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4588 getF32Constant(DAG, 0xbc91e5ac, dl));
4589 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4590 getF32Constant(DAG, 0x3e4350aa, dl));
4591 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4592 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4593 getF32Constant(DAG, 0x3f60d3e3, dl));
4594 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4595 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4596 getF32Constant(DAG, 0x4011cdf0, dl));
4597 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4598 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4599 getF32Constant(DAG, 0x406cfd1c, dl));
4600 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4601 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4602 getF32Constant(DAG, 0x408797cb, dl));
4603 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4604 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4605 getF32Constant(DAG, 0x4006dcab, dl));
4606 }
4607
4608 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4609 }
4610
4611 // No special expansion.
4612 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4613}
4614
4615/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4616/// limited-precision mode.
4617static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4618 const TargetLowering &TLI) {
4619 // TODO: What fast-math-flags should be set on the floating-point nodes?
4620
4621 if (Op.getValueType() == MVT::f32 &&
4622 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4623 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4624
4625 // Get the exponent.
4626 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4627
4628 // Get the significand and build it into a floating-point number with
4629 // exponent of 1.
4630 SDValue X = GetSignificand(DAG, Op1, dl);
4631
4632 // Different possible minimax approximations of significand in
4633 // floating-point for various degrees of accuracy over [1,2].
4634 SDValue Log2ofMantissa;
4635 if (LimitFloatPrecision <= 6) {
4636 // For floating-point precision of 6:
4637 //
4638 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4639 //
4640 // error 0.0049451742, which is more than 7 bits
4641 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4642 getF32Constant(DAG, 0xbeb08fe0, dl));
4643 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4644 getF32Constant(DAG, 0x40019463, dl));
4645 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4646 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4647 getF32Constant(DAG, 0x3fd6633d, dl));
4648 } else if (LimitFloatPrecision <= 12) {
4649 // For floating-point precision of 12:
4650 //
4651 // Log2ofMantissa =
4652 // -2.51285454f +
4653 // (4.07009056f +
4654 // (-2.12067489f +
4655 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4656 //
4657 // error 0.0000876136000, which is better than 13 bits
4658 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4659 getF32Constant(DAG, 0xbda7262e, dl));
4660 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4661 getF32Constant(DAG, 0x3f25280b, dl));
4662 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4663 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4664 getF32Constant(DAG, 0x4007b923, dl));
4665 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4666 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4667 getF32Constant(DAG, 0x40823e2f, dl));
4668 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4669 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4670 getF32Constant(DAG, 0x4020d29c, dl));
4671 } else { // LimitFloatPrecision <= 18
4672 // For floating-point precision of 18:
4673 //
4674 // Log2ofMantissa =
4675 // -3.0400495f +
4676 // (6.1129976f +
4677 // (-5.3420409f +
4678 // (3.2865683f +
4679 // (-1.2669343f +
4680 // (0.27515199f -
4681 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4682 //
4683 // error 0.0000018516, which is better than 18 bits
4684 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4685 getF32Constant(DAG, 0xbcd2769e, dl));
4686 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4687 getF32Constant(DAG, 0x3e8ce0b9, dl));
4688 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4689 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4690 getF32Constant(DAG, 0x3fa22ae7, dl));
4691 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4692 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4693 getF32Constant(DAG, 0x40525723, dl));
4694 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4695 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4696 getF32Constant(DAG, 0x40aaf200, dl));
4697 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4698 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4699 getF32Constant(DAG, 0x40c39dad, dl));
4700 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4701 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4702 getF32Constant(DAG, 0x4042902c, dl));
4703 }
4704
4705 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4706 }
4707
4708 // No special expansion.
4709 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4710}
4711
4712/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4713/// limited-precision mode.
4714static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4715 const TargetLowering &TLI) {
4716 // TODO: What fast-math-flags should be set on the floating-point nodes?
4717
4718 if (Op.getValueType() == MVT::f32 &&
4719 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4720 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4721
4722 // Scale the exponent by log10(2) [0.30102999f].
4723 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4724 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4725 getF32Constant(DAG, 0x3e9a209a, dl));
4726
4727 // Get the significand and build it into a floating-point number with
4728 // exponent of 1.
4729 SDValue X = GetSignificand(DAG, Op1, dl);
4730
4731 SDValue Log10ofMantissa;
4732 if (LimitFloatPrecision <= 6) {
4733 // For floating-point precision of 6:
4734 //
4735 // Log10ofMantissa =
4736 // -0.50419619f +
4737 // (0.60948995f - 0.10380950f * x) * x;
4738 //
4739 // error 0.0014886165, which is 6 bits
4740 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4741 getF32Constant(DAG, 0xbdd49a13, dl));
4742 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4743 getF32Constant(DAG, 0x3f1c0789, dl));
4744 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4745 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4746 getF32Constant(DAG, 0x3f011300, dl));
4747 } else if (LimitFloatPrecision <= 12) {
4748 // For floating-point precision of 12:
4749 //
4750 // Log10ofMantissa =
4751 // -0.64831180f +
4752 // (0.91751397f +
4753 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4754 //
4755 // error 0.00019228036, which is better than 12 bits
4756 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4757 getF32Constant(DAG, 0x3d431f31, dl));
4758 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4759 getF32Constant(DAG, 0x3ea21fb2, dl));
4760 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4761 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4762 getF32Constant(DAG, 0x3f6ae232, dl));
4763 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4764 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4765 getF32Constant(DAG, 0x3f25f7c3, dl));
4766 } else { // LimitFloatPrecision <= 18
4767 // For floating-point precision of 18:
4768 //
4769 // Log10ofMantissa =
4770 // -0.84299375f +
4771 // (1.5327582f +
4772 // (-1.0688956f +
4773 // (0.49102474f +
4774 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4775 //
4776 // error 0.0000037995730, which is better than 18 bits
4777 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4778 getF32Constant(DAG, 0x3c5d51ce, dl));
4779 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4780 getF32Constant(DAG, 0x3e00685a, dl));
4781 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4782 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4783 getF32Constant(DAG, 0x3efb6798, dl));
4784 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4785 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4786 getF32Constant(DAG, 0x3f88d192, dl));
4787 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4788 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4789 getF32Constant(DAG, 0x3fc4316c, dl));
4790 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4791 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4792 getF32Constant(DAG, 0x3f57ce70, dl));
4793 }
4794
4795 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4796 }
4797
4798 // No special expansion.
4799 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4800}
4801
4802/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4803/// limited-precision mode.
4804static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4805 const TargetLowering &TLI) {
4806 if (Op.getValueType() == MVT::f32 &&
4807 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4808 return getLimitedPrecisionExp2(Op, dl, DAG);
4809
4810 // No special expansion.
4811 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4812}
4813
4814/// visitPow - Lower a pow intrinsic. Handles the special sequences for
4815/// limited-precision mode with x == 10.0f.
4816static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4817 SelectionDAG &DAG, const TargetLowering &TLI) {
4818 bool IsExp10 = false;
4819 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4820 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4821 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4822 APFloat Ten(10.0f);
4823 IsExp10 = LHSC->isExactlyValue(Ten);
4824 }
4825 }
4826
4827 // TODO: What fast-math-flags should be set on the FMUL node?
4828 if (IsExp10) {
4829 // Put the exponent in the right bit position for later addition to the
4830 // final result:
4831 //
4832 // #define LOG2OF10 3.3219281f
4833 // t0 = Op * LOG2OF10;
4834 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4835 getF32Constant(DAG, 0x40549a78, dl));
4836 return getLimitedPrecisionExp2(t0, dl, DAG);
4837 }
4838
4839 // No special expansion.
4840 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4841}
4842
4843/// ExpandPowI - Expand a llvm.powi intrinsic.
4844static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4845 SelectionDAG &DAG) {
4846 // If RHS is a constant, we can expand this out to a multiplication tree,
4847 // otherwise we end up lowering to a call to __powidf2 (for example). When
4848 // optimizing for size, we only want to do this if the expansion would produce
4849 // a small number of multiplies, otherwise we do the full expansion.
4850 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4851 // Get the exponent as a positive value.
4852 unsigned Val = RHSC->getSExtValue();
4853 if ((int)Val < 0) Val = -Val;
4854
4855 // powi(x, 0) -> 1.0
4856 if (Val == 0)
4857 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4858
4859 const Function &F = DAG.getMachineFunction().getFunction();
4860 if (!F.optForSize() ||
4861 // If optimizing for size, don't insert too many multiplies.
4862 // This inserts up to 5 multiplies.
4863 countPopulation(Val) + Log2_32(Val) < 7) {
4864 // We use the simple binary decomposition method to generate the multiply
4865 // sequence. There are more optimal ways to do this (for example,
4866 // powi(x,15) generates one more multiply than it should), but this has
4867 // the benefit of being both really simple and much better than a libcall.
4868 SDValue Res; // Logically starts equal to 1.0
4869 SDValue CurSquare = LHS;
4870 // TODO: Intrinsics should have fast-math-flags that propagate to these
4871 // nodes.
4872 while (Val) {
4873 if (Val & 1) {
4874 if (Res.getNode())
4875 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4876 else
4877 Res = CurSquare; // 1.0*CurSquare.
4878 }
4879
4880 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4881 CurSquare, CurSquare);
4882 Val >>= 1;
4883 }
4884
4885 // If the original was negative, invert the result, producing 1/(x*x*x).
4886 if (RHSC->getSExtValue() < 0)
4887 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4888 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4889 return Res;
4890 }
4891 }
4892
4893 // Otherwise, expand to a libcall.
4894 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4895}
4896
4897// getUnderlyingArgReg - Find underlying register used for a truncated or
4898// bitcasted argument.
4899static unsigned getUnderlyingArgReg(const SDValue &N) {
4900 switch (N.getOpcode()) {
4901 case ISD::CopyFromReg:
4902 return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4903 case ISD::BITCAST:
4904 case ISD::AssertZext:
4905 case ISD::AssertSext:
4906 case ISD::TRUNCATE:
4907 return getUnderlyingArgReg(N.getOperand(0));
4908 default:
4909 return 0;
4910 }
4911}
4912
4913/// If the DbgValueInst is a dbg_value of a function argument, create the
4914/// corresponding DBG_VALUE machine instruction for it now. At the end of
4915/// instruction selection, they will be inserted to the entry BB.
4916bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4917 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4918 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
4919 const Argument *Arg = dyn_cast<Argument>(V);
4920 if (!Arg)
4921 return false;
4922
4923 MachineFunction &MF = DAG.getMachineFunction();
4924 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4925
4926 bool IsIndirect = false;
4927 Optional<MachineOperand> Op;
4928 // Some arguments' frame index is recorded during argument lowering.
4929 int FI = FuncInfo.getArgumentFrameIndex(Arg);
4930 if (FI != std::numeric_limits<int>::max())
4931 Op = MachineOperand::CreateFI(FI);
4932
4933 if (!Op && N.getNode()) {
4934 unsigned Reg = getUnderlyingArgReg(N);
4935 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4936 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4937 unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4938 if (PR)
4939 Reg = PR;
4940 }
4941 if (Reg) {
4942 Op = MachineOperand::CreateReg(Reg, false);
4943 IsIndirect = IsDbgDeclare;
4944 }
4945 }
4946
4947 if (!Op && N.getNode())
4948 // Check if frame index is available.
4949 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4950 if (FrameIndexSDNode *FINode =
4951 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4952 Op = MachineOperand::CreateFI(FINode->getIndex());
4953
4954 if (!Op) {
4955 // Check if ValueMap has reg number.
4956 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4957 if (VMI != FuncInfo.ValueMap.end()) {
4958 const auto &TLI = DAG.getTargetLoweringInfo();
4959 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
4960 V->getType(), getABIRegCopyCC(V));
4961 if (RFV.occupiesMultipleRegs()) {
4962 unsigned Offset = 0;
4963 for (auto RegAndSize : RFV.getRegsAndSizes()) {
4964 Op = MachineOperand::CreateReg(RegAndSize.first, false);
4965 auto FragmentExpr = DIExpression::createFragmentExpression(
4966 Expr, Offset, RegAndSize.second);
4967 if (!FragmentExpr)
4968 continue;
4969 FuncInfo.ArgDbgValues.push_back(
4970 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
4971 Op->getReg(), Variable, *FragmentExpr));
4972 Offset += RegAndSize.second;
4973 }
4974 return true;
4975 }
4976 Op = MachineOperand::CreateReg(VMI->second, false);
4977 IsIndirect = IsDbgDeclare;
4978 }
4979 }
4980
4981 if (!Op)
4982 return false;
4983
4984 assert(Variable->isValidLocationForIntrinsic(DL) &&((Variable->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4985, __PRETTY_FUNCTION__))
4985 "Expected inlined-at fields to agree")((Variable->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("Variable->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 4985, __PRETTY_FUNCTION__))
;
4986 IsIndirect = (Op->isReg()) ? IsIndirect : true;
4987 FuncInfo.ArgDbgValues.push_back(
4988 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4989 *Op, Variable, Expr));
4990
4991 return true;
4992}
4993
4994/// Return the appropriate SDDbgValue based on N.
4995SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
4996 DILocalVariable *Variable,
4997 DIExpression *Expr,
4998 const DebugLoc &dl,
4999 unsigned DbgSDNodeOrder) {
5000 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5001 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5002 // stack slot locations.
5003 //
5004 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5005 // debug values here after optimization:
5006 //
5007 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5008 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5009 //
5010 // Both describe the direct values of their associated variables.
5011 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5012 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5013 }
5014 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5015 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5016}
5017
5018// VisualStudio defines setjmp as _setjmp
5019#if defined(_MSC_VER) && defined(setjmp) && \
5020 !defined(setjmp_undefined_for_msvc)
5021# pragma push_macro("setjmp")
5022# undef setjmp
5023# define setjmp_undefined_for_msvc
5024#endif
5025
5026/// Lower the call to the specified intrinsic function. If we want to emit this
5027/// as a call to a named external function, return the name. Otherwise, lower it
5028/// and return null.
5029const char *
5030SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
5031 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5032 SDLoc sdl = getCurSDLoc();
5033 DebugLoc dl = getCurDebugLoc();
5034 SDValue Res;
5035
5036 switch (Intrinsic) {
5037 default:
5038 // By default, turn this into a target intrinsic node.
5039 visitTargetIntrinsic(I, Intrinsic);
5040 return nullptr;
5041 case Intrinsic::vastart: visitVAStart(I); return nullptr;
5042 case Intrinsic::vaend: visitVAEnd(I); return nullptr;
5043 case Intrinsic::vacopy: visitVACopy(I); return nullptr;
5044 case Intrinsic::returnaddress:
5045 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5046 TLI.getPointerTy(DAG.getDataLayout()),
5047 getValue(I.getArgOperand(0))));
5048 return nullptr;
5049 case Intrinsic::addressofreturnaddress:
5050 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5051 TLI.getPointerTy(DAG.getDataLayout())));
5052 return nullptr;
5053 case Intrinsic::frameaddress:
5054 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5055 TLI.getPointerTy(DAG.getDataLayout()),
5056 getValue(I.getArgOperand(0))));
5057 return nullptr;
5058 case Intrinsic::read_register: {
5059 Value *Reg = I.getArgOperand(0);
5060 SDValue Chain = getRoot();
5061 SDValue RegName =
5062 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5063 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5064 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5065 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5066 setValue(&I, Res);
5067 DAG.setRoot(Res.getValue(1));
5068 return nullptr;
5069 }
5070 case Intrinsic::write_register: {
5071 Value *Reg = I.getArgOperand(0);
5072 Value *RegValue = I.getArgOperand(1);
5073 SDValue Chain = getRoot();
5074 SDValue RegName =
5075 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5076 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5077 RegName, getValue(RegValue)));
5078 return nullptr;
5079 }
5080 case Intrinsic::setjmp:
5081 return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
5082 case Intrinsic::longjmp:
5083 return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
5084 case Intrinsic::memcpy: {
5085 const auto &MCI = cast<MemCpyInst>(I);
5086 SDValue Op1 = getValue(I.getArgOperand(0));
5087 SDValue Op2 = getValue(I.getArgOperand(1));
5088 SDValue Op3 = getValue(I.getArgOperand(2));
5089 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5090 unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5091 unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5092 unsigned Align = MinAlign(DstAlign, SrcAlign);
5093 bool isVol = MCI.isVolatile();
5094 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5095 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5096 // node.
5097 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5098 false, isTC,
5099 MachinePointerInfo(I.getArgOperand(0)),
5100 MachinePointerInfo(I.getArgOperand(1)));
5101 updateDAGForMaybeTailCall(MC);
5102 return nullptr;
5103 }
5104 case Intrinsic::memset: {
5105 const auto &MSI = cast<MemSetInst>(I);
5106 SDValue Op1 = getValue(I.getArgOperand(0));
5107 SDValue Op2 = getValue(I.getArgOperand(1));
5108 SDValue Op3 = getValue(I.getArgOperand(2));
5109 // @llvm.memset defines 0 and 1 to both mean no alignment.
5110 unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5111 bool isVol = MSI.isVolatile();
5112 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5113 SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5114 isTC, MachinePointerInfo(I.getArgOperand(0)));
5115 updateDAGForMaybeTailCall(MS);
5116 return nullptr;
5117 }
5118 case Intrinsic::memmove: {
5119 const auto &MMI = cast<MemMoveInst>(I);
5120 SDValue Op1 = getValue(I.getArgOperand(0));
5121 SDValue Op2 = getValue(I.getArgOperand(1));
5122 SDValue Op3 = getValue(I.getArgOperand(2));
5123 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5124 unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5125 unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5126 unsigned Align = MinAlign(DstAlign, SrcAlign);
5127 bool isVol = MMI.isVolatile();
5128 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5129 // FIXME: Support passing different dest/src alignments to the memmove DAG
5130 // node.
5131 SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5132 isTC, MachinePointerInfo(I.getArgOperand(0)),
5133 MachinePointerInfo(I.getArgOperand(1)));
5134 updateDAGForMaybeTailCall(MM);
5135 return nullptr;
5136 }
5137 case Intrinsic::memcpy_element_unordered_atomic: {
5138 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5139 SDValue Dst = getValue(MI.getRawDest());
5140 SDValue Src = getValue(MI.getRawSource());
5141 SDValue Length = getValue(MI.getLength());
5142
5143 unsigned DstAlign = MI.getDestAlignment();
5144 unsigned SrcAlign = MI.getSourceAlignment();
5145 Type *LengthTy = MI.getLength()->getType();
5146 unsigned ElemSz = MI.getElementSizeInBytes();
5147 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5148 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5149 SrcAlign, Length, LengthTy, ElemSz, isTC,
5150 MachinePointerInfo(MI.getRawDest()),
5151 MachinePointerInfo(MI.getRawSource()));
5152 updateDAGForMaybeTailCall(MC);
5153 return nullptr;
5154 }
5155 case Intrinsic::memmove_element_unordered_atomic: {
5156 auto &MI = cast<AtomicMemMoveInst>(I);
5157 SDValue Dst = getValue(MI.getRawDest());
5158 SDValue Src = getValue(MI.getRawSource());
5159 SDValue Length = getValue(MI.getLength());
5160
5161 unsigned DstAlign = MI.getDestAlignment();
5162 unsigned SrcAlign = MI.getSourceAlignment();
5163 Type *LengthTy = MI.getLength()->getType();
5164 unsigned ElemSz = MI.getElementSizeInBytes();
5165 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5166 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5167 SrcAlign, Length, LengthTy, ElemSz, isTC,
5168 MachinePointerInfo(MI.getRawDest()),
5169 MachinePointerInfo(MI.getRawSource()));
5170 updateDAGForMaybeTailCall(MC);
5171 return nullptr;
5172 }
5173 case Intrinsic::memset_element_unordered_atomic: {
5174 auto &MI = cast<AtomicMemSetInst>(I);
5175 SDValue Dst = getValue(MI.getRawDest());
5176 SDValue Val = getValue(MI.getValue());
5177 SDValue Length = getValue(MI.getLength());
5178
5179 unsigned DstAlign = MI.getDestAlignment();
5180 Type *LengthTy = MI.getLength()->getType();
5181 unsigned ElemSz = MI.getElementSizeInBytes();
5182 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5183 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5184 LengthTy, ElemSz, isTC,
5185 MachinePointerInfo(MI.getRawDest()));
5186 updateDAGForMaybeTailCall(MC);
5187 return nullptr;
5188 }
5189 case Intrinsic::dbg_addr:
5190 case Intrinsic::dbg_declare: {
5191 const auto &DI = cast<DbgVariableIntrinsic>(I);
5192 DILocalVariable *Variable = DI.getVariable();
5193 DIExpression *Expression = DI.getExpression();
5194 dropDanglingDebugInfo(Variable, Expression);
5195 assert(Variable && "Missing variable")((Variable && "Missing variable") ? static_cast<void
> (0) : __assert_fail ("Variable && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5195, __PRETTY_FUNCTION__))
;
5196
5197 // Check if address has undef value.
5198 const Value *Address = DI.getVariableLocation();
5199 if (!Address || isa<UndefValue>(Address) ||
5200 (Address->use_empty() && !isa<Argument>(Address))) {
5201 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug info for " <<
DI << "\n"; } } while (false)
;
5202 return nullptr;
5203 }
5204
5205 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5206
5207 // Check if this variable can be described by a frame index, typically
5208 // either as a static alloca or a byval parameter.
5209 int FI = std::numeric_limits<int>::max();
5210 if (const auto *AI =
5211 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5212 if (AI->isStaticAlloca()) {
5213 auto I = FuncInfo.StaticAllocaMap.find(AI);
5214 if (I != FuncInfo.StaticAllocaMap.end())
5215 FI = I->second;
5216 }
5217 } else if (const auto *Arg = dyn_cast<Argument>(
5218 Address->stripInBoundsConstantOffsets())) {
5219 FI = FuncInfo.getArgumentFrameIndex(Arg);
5220 }
5221
5222 // llvm.dbg.addr is control dependent and always generates indirect
5223 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5224 // the MachineFunction variable table.
5225 if (FI != std::numeric_limits<int>::max()) {
5226 if (Intrinsic == Intrinsic::dbg_addr) {
5227 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5228 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5229 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5230 }
5231 return nullptr;
5232 }
5233
5234 SDValue &N = NodeMap[Address];
5235 if (!N.getNode() && isa<Argument>(Address))
5236 // Check unused arguments map.
5237 N = UnusedArgNodeMap[Address];
5238 SDDbgValue *SDV;
5239 if (N.getNode()) {
5240 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5241 Address = BCI->getOperand(0);
5242 // Parameters are handled specially.
5243 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5244 if (isParameter && FINode) {
5245 // Byval parameter. We have a frame index at this point.
5246 SDV =
5247 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5248 /*IsIndirect*/ true, dl, SDNodeOrder);
5249 } else if (isa<Argument>(Address)) {
5250 // Address is an argument, so try to emit its dbg value using
5251 // virtual register info from the FuncInfo.ValueMap.
5252 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5253 return nullptr;
5254 } else {
5255 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5256 true, dl, SDNodeOrder);
5257 }
5258 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5259 } else {
5260 // If Address is an argument then try to emit its dbg value using
5261 // virtual register info from the FuncInfo.ValueMap.
5262 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5263 N)) {
5264 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug info for " <<
DI << "\n"; } } while (false)
;
5265 }
5266 }
5267 return nullptr;
5268 }
5269 case Intrinsic::dbg_label: {
5270 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5271 DILabel *Label = DI.getLabel();
5272 assert(Label && "Missing label")((Label && "Missing label") ? static_cast<void>
(0) : __assert_fail ("Label && \"Missing label\"", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5272, __PRETTY_FUNCTION__))
;
5273
5274 SDDbgLabel *SDV;
5275 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5276 DAG.AddDbgLabel(SDV);
5277 return nullptr;
5278 }
5279 case Intrinsic::dbg_value: {
5280 const DbgValueInst &DI = cast<DbgValueInst>(I);
5281 assert(DI.getVariable() && "Missing variable")((DI.getVariable() && "Missing variable") ? static_cast
<void> (0) : __assert_fail ("DI.getVariable() && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5281, __PRETTY_FUNCTION__))
;
5282
5283 DILocalVariable *Variable = DI.getVariable();
5284 DIExpression *Expression = DI.getExpression();
5285 dropDanglingDebugInfo(Variable, Expression);
5286 const Value *V = DI.getValue();
5287 if (!V)
5288 return nullptr;
5289
5290 SDDbgValue *SDV;
5291 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
5292 SDV = DAG.getConstantDbgValue(Variable, Expression, V, dl, SDNodeOrder);
5293 DAG.AddDbgValue(SDV, nullptr, false);
5294 return nullptr;
5295 }
5296
5297 // Do not use getValue() in here; we don't want to generate code at
5298 // this point if it hasn't been done yet.
5299 SDValue N = NodeMap[V];
5300 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
5301 N = UnusedArgNodeMap[V];
5302 if (N.getNode()) {
5303 if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, false, N))
5304 return nullptr;
5305 SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder);
5306 DAG.AddDbgValue(SDV, N.getNode(), false);
5307 return nullptr;
5308 }
5309
5310 // PHI nodes have already been selected, so we should know which VReg that
5311 // is assigns to already.
5312 if (isa<PHINode>(V)) {
5313 auto VMI = FuncInfo.ValueMap.find(V);
5314 if (VMI != FuncInfo.ValueMap.end()) {
5315 unsigned Reg = VMI->second;
5316 // The PHI node may be split up into several MI PHI nodes (in
5317 // FunctionLoweringInfo::set).
5318 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
5319 V->getType(), None);
5320 if (RFV.occupiesMultipleRegs()) {
5321 unsigned Offset = 0;
5322 unsigned BitsToDescribe = 0;
5323 if (auto VarSize = Variable->getSizeInBits())
5324 BitsToDescribe = *VarSize;
5325 if (auto Fragment = Expression->getFragmentInfo())
5326 BitsToDescribe = Fragment->SizeInBits;
5327 for (auto RegAndSize : RFV.getRegsAndSizes()) {
5328 unsigned RegisterSize = RegAndSize.second;
5329 // Bail out if all bits are described already.
5330 if (Offset >= BitsToDescribe)
5331 break;
5332 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
5333 ? BitsToDescribe - Offset
5334 : RegisterSize;
5335 auto FragmentExpr = DIExpression::createFragmentExpression(
5336 Expression, Offset, FragmentSize);
5337 if (!FragmentExpr)
5338 continue;
5339 SDV = DAG.getVRegDbgValue(Variable, *FragmentExpr, RegAndSize.first,
5340 false, dl, SDNodeOrder);
5341 DAG.AddDbgValue(SDV, nullptr, false);
5342 Offset += RegisterSize;
5343 }
5344 } else {
5345 SDV = DAG.getVRegDbgValue(Variable, Expression, Reg, false, dl,
5346 SDNodeOrder);
5347 DAG.AddDbgValue(SDV, nullptr, false);
5348 }
5349 return nullptr;
5350 }
5351 }
5352
5353 // TODO: When we get here we will either drop the dbg.value completely, or
5354 // we try to move it forward by letting it dangle for awhile. So we should
5355 // probably add an extra DbgValue to the DAG here, with a reference to
5356 // "noreg", to indicate that we have lost the debug location for the
5357 // variable.
5358
5359 if (!V->use_empty() ) {
5360 // Do not call getValue(V) yet, as we don't want to generate code.
5361 // Remember it for later.
5362 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5363 return nullptr;
5364 }
5365
5366 LLVM_DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << "Dropping debug location info for:\n "
<< DI << "\n"; } } while (false)
;
5367 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { dbgs() << " Last seen at:\n " << *
V << "\n"; } } while (false)
;
5368 return nullptr;
5369 }
5370
5371 case Intrinsic::eh_typeid_for: {
5372 // Find the type id for the given typeinfo.
5373 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5374 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5375 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5376 setValue(&I, Res);
5377 return nullptr;
5378 }
5379
5380 case Intrinsic::eh_return_i32:
5381 case Intrinsic::eh_return_i64:
5382 DAG.getMachineFunction().setCallsEHReturn(true);
5383 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5384 MVT::Other,
5385 getControlRoot(),
5386 getValue(I.getArgOperand(0)),
5387 getValue(I.getArgOperand(1))));
5388 return nullptr;
5389 case Intrinsic::eh_unwind_init:
5390 DAG.getMachineFunction().setCallsUnwindInit(true);
5391 return nullptr;
5392 case Intrinsic::eh_dwarf_cfa:
5393 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5394 TLI.getPointerTy(DAG.getDataLayout()),
5395 getValue(I.getArgOperand(0))));
5396 return nullptr;
5397 case Intrinsic::eh_sjlj_callsite: {
5398 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5399 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5400 assert(CI && "Non-constant call site value in eh.sjlj.callsite!")((CI && "Non-constant call site value in eh.sjlj.callsite!"
) ? static_cast<void> (0) : __assert_fail ("CI && \"Non-constant call site value in eh.sjlj.callsite!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5400, __PRETTY_FUNCTION__))
;
5401 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!")((MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"
) ? static_cast<void> (0) : __assert_fail ("MMI.getCurrentCallSite() == 0 && \"Overlapping call sites!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5401, __PRETTY_FUNCTION__))
;
5402
5403 MMI.setCurrentCallSite(CI->getZExtValue());
5404 return nullptr;
5405 }
5406 case Intrinsic::eh_sjlj_functioncontext: {
5407 // Get and store the index of the function context.
5408 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5409 AllocaInst *FnCtx =
5410 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5411 int FI = FuncInfo.StaticAllocaMap[FnCtx];
5412 MFI.setFunctionContextIndex(FI);
5413 return nullptr;
5414 }
5415 case Intrinsic::eh_sjlj_setjmp: {
5416 SDValue Ops[2];
5417 Ops[0] = getRoot();
5418 Ops[1] = getValue(I.getArgOperand(0));
5419 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5420 DAG.getVTList(MVT::i32, MVT::Other), Ops);
5421 setValue(&I, Op.getValue(0));
5422 DAG.setRoot(Op.getValue(1));
5423 return nullptr;
5424 }
5425 case Intrinsic::eh_sjlj_longjmp:
5426 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5427 getRoot(), getValue(I.getArgOperand(0))));
5428 return nullptr;
5429 case Intrinsic::eh_sjlj_setup_dispatch:
5430 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5431 getRoot()));
5432 return nullptr;
5433 case Intrinsic::masked_gather:
5434 visitMaskedGather(I);
5435 return nullptr;
5436 case Intrinsic::masked_load:
5437 visitMaskedLoad(I);
5438 return nullptr;
5439 case Intrinsic::masked_scatter:
5440 visitMaskedScatter(I);
5441 return nullptr;
5442 case Intrinsic::masked_store:
5443 visitMaskedStore(I);
5444 return nullptr;
5445 case Intrinsic::masked_expandload:
5446 visitMaskedLoad(I, true /* IsExpanding */);
5447 return nullptr;
5448 case Intrinsic::masked_compressstore:
5449 visitMaskedStore(I, true /* IsCompressing */);
5450 return nullptr;
5451 case Intrinsic::x86_mmx_pslli_w:
5452 case Intrinsic::x86_mmx_pslli_d:
5453 case Intrinsic::x86_mmx_pslli_q:
5454 case Intrinsic::x86_mmx_psrli_w:
5455 case Intrinsic::x86_mmx_psrli_d:
5456 case Intrinsic::x86_mmx_psrli_q:
5457 case Intrinsic::x86_mmx_psrai_w:
5458 case Intrinsic::x86_mmx_psrai_d: {
5459 SDValue ShAmt = getValue(I.getArgOperand(1));
5460 if (isa<ConstantSDNode>(ShAmt)) {
5461 visitTargetIntrinsic(I, Intrinsic);
5462 return nullptr;
5463 }
5464 unsigned NewIntrinsic = 0;
5465 EVT ShAmtVT = MVT::v2i32;
5466 switch (Intrinsic) {
5467 case Intrinsic::x86_mmx_pslli_w:
5468 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5469 break;
5470 case Intrinsic::x86_mmx_pslli_d:
5471 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5472 break;
5473 case Intrinsic::x86_mmx_pslli_q:
5474 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5475 break;
5476 case Intrinsic::x86_mmx_psrli_w:
5477 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5478 break;
5479 case Intrinsic::x86_mmx_psrli_d:
5480 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5481 break;
5482 case Intrinsic::x86_mmx_psrli_q:
5483 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5484 break;
5485 case Intrinsic::x86_mmx_psrai_w:
5486 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5487 break;
5488 case Intrinsic::x86_mmx_psrai_d:
5489 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5490 break;
5491 default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5491)
; // Can't reach here.
5492 }
5493
5494 // The vector shift intrinsics with scalars uses 32b shift amounts but
5495 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5496 // to be zero.
5497 // We must do this early because v2i32 is not a legal type.
5498 SDValue ShOps[2];
5499 ShOps[0] = ShAmt;
5500 ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5501 ShAmt = DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5502 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5503 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5504 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5505 DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5506 getValue(I.getArgOperand(0)), ShAmt);
5507 setValue(&I, Res);
5508 return nullptr;
5509 }
5510 case Intrinsic::powi:
5511 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5512 getValue(I.getArgOperand(1)), DAG));
5513 return nullptr;
5514 case Intrinsic::log:
5515 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5516 return nullptr;
5517 case Intrinsic::log2:
5518 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5519 return nullptr;
5520 case Intrinsic::log10:
5521 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5522 return nullptr;
5523 case Intrinsic::exp:
5524 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5525 return nullptr;
5526 case Intrinsic::exp2:
5527 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5528 return nullptr;
5529 case Intrinsic::pow:
5530 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5531 getValue(I.getArgOperand(1)), DAG, TLI));
5532 return nullptr;
5533 case Intrinsic::sqrt:
5534 case Intrinsic::fabs:
5535 case Intrinsic::sin:
5536 case Intrinsic::cos:
5537 case Intrinsic::floor:
5538 case Intrinsic::ceil:
5539 case Intrinsic::trunc:
5540 case Intrinsic::rint:
5541 case Intrinsic::nearbyint:
5542 case Intrinsic::round:
5543 case Intrinsic::canonicalize: {
5544 unsigned Opcode;
5545 switch (Intrinsic) {
5546 default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5546)
; // Can't reach here.
5547 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
5548 case Intrinsic::fabs: Opcode = ISD::FABS; break;
5549 case Intrinsic::sin: Opcode = ISD::FSIN; break;
5550 case Intrinsic::cos: Opcode = ISD::FCOS; break;
5551 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
5552 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
5553 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
5554 case Intrinsic::rint: Opcode = ISD::FRINT; break;
5555 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5556 case Intrinsic::round: Opcode = ISD::FROUND; break;
5557 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5558 }
5559
5560 setValue(&I, DAG.getNode(Opcode, sdl,
5561 getValue(I.getArgOperand(0)).getValueType(),
5562 getValue(I.getArgOperand(0))));
5563 return nullptr;
5564 }
5565 case Intrinsic::minnum: {
5566 auto VT = getValue(I.getArgOperand(0)).getValueType();
5567 unsigned Opc =
5568 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT)
5569 ? ISD::FMINIMUM
5570 : ISD::FMINNUM;
5571 setValue(&I, DAG.getNode(Opc, sdl, VT,
5572 getValue(I.getArgOperand(0)),
5573 getValue(I.getArgOperand(1))));
5574 return nullptr;
5575 }
5576 case Intrinsic::maxnum: {
5577 auto VT = getValue(I.getArgOperand(0)).getValueType();
5578 unsigned Opc =
5579 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT)
5580 ? ISD::FMAXIMUM
5581 : ISD::FMAXNUM;
5582 setValue(&I, DAG.getNode(Opc, sdl, VT,
5583 getValue(I.getArgOperand(0)),
5584 getValue(I.getArgOperand(1))));
5585 return nullptr;
5586 }
5587 case Intrinsic::minimum:
5588 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
5589 getValue(I.getArgOperand(0)).getValueType(),
5590 getValue(I.getArgOperand(0)),
5591 getValue(I.getArgOperand(1))));
5592 return nullptr;
5593 case Intrinsic::maximum:
5594 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
5595 getValue(I.getArgOperand(0)).getValueType(),
5596 getValue(I.getArgOperand(0)),
5597 getValue(I.getArgOperand(1))));
5598 return nullptr;
5599 case Intrinsic::copysign:
5600 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5601 getValue(I.getArgOperand(0)).getValueType(),
5602 getValue(I.getArgOperand(0)),
5603 getValue(I.getArgOperand(1))));
5604 return nullptr;
5605 case Intrinsic::fma:
5606 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5607 getValue(I.getArgOperand(0)).getValueType(),
5608 getValue(I.getArgOperand(0)),
5609 getValue(I.getArgOperand(1)),
5610 getValue(I.getArgOperand(2))));
5611 return nullptr;
5612 case Intrinsic::experimental_constrained_fadd:
5613 case Intrinsic::experimental_constrained_fsub:
5614 case Intrinsic::experimental_constrained_fmul:
5615 case Intrinsic::experimental_constrained_fdiv:
5616 case Intrinsic::experimental_constrained_frem:
5617 case Intrinsic::experimental_constrained_fma:
5618 case Intrinsic::experimental_constrained_sqrt:
5619 case Intrinsic::experimental_constrained_pow:
5620 case Intrinsic::experimental_constrained_powi:
5621 case Intrinsic::experimental_constrained_sin:
5622 case Intrinsic::experimental_constrained_cos:
5623 case Intrinsic::experimental_constrained_exp:
5624 case Intrinsic::experimental_constrained_exp2:
5625 case Intrinsic::experimental_constrained_log:
5626 case Intrinsic::experimental_constrained_log10:
5627 case Intrinsic::experimental_constrained_log2:
5628 case Intrinsic::experimental_constrained_rint:
5629 case Intrinsic::experimental_constrained_nearbyint:
5630 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5631 return nullptr;
5632 case Intrinsic::fmuladd: {
5633 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5634 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5635 TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5636 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5637 getValue(I.getArgOperand(0)).getValueType(),
5638 getValue(I.getArgOperand(0)),
5639 getValue(I.getArgOperand(1)),
5640 getValue(I.getArgOperand(2))));
5641 } else {
5642 // TODO: Intrinsic calls should have fast-math-flags.
5643 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5644 getValue(I.getArgOperand(0)).getValueType(),
5645 getValue(I.getArgOperand(0)),
5646 getValue(I.getArgOperand(1)));
5647 SDValue Add = DAG.getNode(ISD::FADD, sdl,
5648 getValue(I.getArgOperand(0)).getValueType(),
5649 Mul,
5650 getValue(I.getArgOperand(2)));
5651 setValue(&I, Add);
5652 }
5653 return nullptr;
5654 }
5655 case Intrinsic::convert_to_fp16:
5656 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5657 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5658 getValue(I.getArgOperand(0)),
5659 DAG.getTargetConstant(0, sdl,
5660 MVT::i32))));
5661 return nullptr;
5662 case Intrinsic::convert_from_fp16:
5663 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5664 TLI.getValueType(DAG.getDataLayout(), I.getType()),
5665 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5666 getValue(I.getArgOperand(0)))));
5667 return nullptr;
5668 case Intrinsic::pcmarker: {
5669 SDValue Tmp = getValue(I.getArgOperand(0));
5670 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5671 return nullptr;
5672 }
5673 case Intrinsic::readcyclecounter: {
5674 SDValue Op = getRoot();
5675 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5676 DAG.getVTList(MVT::i64, MVT::Other), Op);
5677 setValue(&I, Res);
5678 DAG.setRoot(Res.getValue(1));
5679 return nullptr;
5680 }
5681 case Intrinsic::bitreverse:
5682 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5683 getValue(I.getArgOperand(0)).getValueType(),
5684 getValue(I.getArgOperand(0))));
5685 return nullptr;
5686 case Intrinsic::bswap:
5687 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5688 getValue(I.getArgOperand(0)).getValueType(),
5689 getValue(I.getArgOperand(0))));
5690 return nullptr;
5691 case Intrinsic::cttz: {
5692 SDValue Arg = getValue(I.getArgOperand(0));
5693 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5694 EVT Ty = Arg.getValueType();
5695 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5696 sdl, Ty, Arg));
5697 return nullptr;
5698 }
5699 case Intrinsic::ctlz: {
5700 SDValue Arg = getValue(I.getArgOperand(0));
5701 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5702 EVT Ty = Arg.getValueType();
5703 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5704 sdl, Ty, Arg));
5705 return nullptr;
5706 }
5707 case Intrinsic::ctpop: {
5708 SDValue Arg = getValue(I.getArgOperand(0));
5709 EVT Ty = Arg.getValueType();
5710 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5711 return nullptr;
5712 }
5713 case Intrinsic::fshl:
5714 case Intrinsic::fshr: {
5715 bool IsFSHL = Intrinsic == Intrinsic::fshl;
5716 SDValue X = getValue(I.getArgOperand(0));
5717 SDValue Y = getValue(I.getArgOperand(1));
5718 SDValue Z = getValue(I.getArgOperand(2));
5719 EVT VT = X.getValueType();
5720 SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
5721 SDValue Zero = DAG.getConstant(0, sdl, VT);
5722 SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
5723
5724 // When X == Y, this is rotate. If the data type has a power-of-2 size, we
5725 // avoid the select that is necessary in the general case to filter out
5726 // the 0-shift possibility that leads to UB.
5727 if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
5728 // TODO: This should also be done if the operation is custom, but we have
5729 // to make sure targets are handling the modulo shift amount as expected.
5730 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
5731 if (TLI.isOperationLegal(RotateOpcode, VT)) {
5732 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
5733 return nullptr;
5734 }
5735
5736 // Some targets only rotate one way. Try the opposite direction.
5737 RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
5738 if (TLI.isOperationLegal(RotateOpcode, VT)) {
5739 // Negate the shift amount because it is safe to ignore the high bits.
5740 SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5741 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
5742 return nullptr;
5743 }
5744
5745 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
5746 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
5747 SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5748 SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
5749 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
5750 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
5751 setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
5752 return nullptr;
5753 }
5754
5755 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
5756 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
5757 SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
5758 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
5759 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
5760 SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
5761
5762 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
5763 // and that is undefined. We must compare and select to avoid UB.
5764 EVT CCVT = MVT::i1;
5765 if (VT.isVector())
5766 CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
5767
5768 // For fshl, 0-shift returns the 1st arg (X).
5769 // For fshr, 0-shift returns the 2nd arg (Y).
5770 SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
5771 setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
5772 return nullptr;
5773 }
5774 case Intrinsic::sadd_sat: {
5775 SDValue Op1 = getValue(I.getArgOperand(0));
5776 SDValue Op2 = getValue(I.getArgOperand(1));
5777 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
5778 return nullptr;
5779 }
5780 case Intrinsic::uadd_sat: {
5781 SDValue Op1 = getValue(I.getArgOperand(0));
5782 SDValue Op2 = getValue(I.getArgOperand(1));
5783 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
5784 return nullptr;
5785 }
5786 case Intrinsic::stacksave: {
5787 SDValue Op = getRoot();
5788 Res = DAG.getNode(
5789 ISD::STACKSAVE, sdl,
5790 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5791 setValue(&I, Res);
5792 DAG.setRoot(Res.getValue(1));
5793 return nullptr;
5794 }
5795 case Intrinsic::stackrestore:
5796 Res = getValue(I.getArgOperand(0));
5797 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5798 return nullptr;
5799 case Intrinsic::get_dynamic_area_offset: {
5800 SDValue Op = getRoot();
5801 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5802 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5803 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5804 // target.
5805 if (PtrTy != ResTy)
5806 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5807 " intrinsic!");
5808 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5809 Op);
5810 DAG.setRoot(Op);
5811 setValue(&I, Res);
5812 return nullptr;
5813 }
5814 case Intrinsic::stackguard: {
5815 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5816 MachineFunction &MF = DAG.getMachineFunction();
5817 const Module &M = *MF.getFunction().getParent();
5818 SDValue Chain = getRoot();
5819 if (TLI.useLoadStackGuardNode()) {
5820 Res = getLoadStackGuard(DAG, sdl, Chain);
5821 } else {
5822 const Value *Global = TLI.getSDagStackGuard(M);
5823 unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5824 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5825 MachinePointerInfo(Global, 0), Align,
5826 MachineMemOperand::MOVolatile);
5827 }
5828 if (TLI.useStackGuardXorFP())
5829 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
5830 DAG.setRoot(Chain);
5831 setValue(&I, Res);
5832 return nullptr;
5833 }
5834 case Intrinsic::stackprotector: {
5835 // Emit code into the DAG to store the stack guard onto the stack.
5836 MachineFunction &MF = DAG.getMachineFunction();
5837 MachineFrameInfo &MFI = MF.getFrameInfo();
5838 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5839 SDValue Src, Chain = getRoot();
5840
5841 if (TLI.useLoadStackGuardNode())
5842 Src = getLoadStackGuard(DAG, sdl, Chain);
5843 else
5844 Src = getValue(I.getArgOperand(0)); // The guard's value.
5845
5846 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5847
5848 int FI = FuncInfo.StaticAllocaMap[Slot];
5849 MFI.setStackProtectorIndex(FI);
5850
5851 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5852
5853 // Store the stack protector onto the stack.
5854 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5855 DAG.getMachineFunction(), FI),
5856 /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5857 setValue(&I, Res);
5858 DAG.setRoot(Res);
5859 return nullptr;
5860 }
5861 case Intrinsic::objectsize: {
5862 // If we don't know by now, we're never going to know.
5863 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5864
5865 assert(CI && "Non-constant type in __builtin_object_size?")((CI && "Non-constant type in __builtin_object_size?"
) ? static_cast<void> (0) : __assert_fail ("CI && \"Non-constant type in __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5865, __PRETTY_FUNCTION__))
;
5866
5867 SDValue Arg = getValue(I.getCalledValue());
5868 EVT Ty = Arg.getValueType();
5869
5870 if (CI->isZero())
5871 Res = DAG.getConstant(-1ULL, sdl, Ty);
5872 else
5873 Res = DAG.getConstant(0, sdl, Ty);
5874
5875 setValue(&I, Res);
5876 return nullptr;
5877 }
5878 case Intrinsic::annotation:
5879 case Intrinsic::ptr_annotation:
5880 case Intrinsic::launder_invariant_group:
5881 case Intrinsic::strip_invariant_group:
5882 // Drop the intrinsic, but forward the value
5883 setValue(&I, getValue(I.getOperand(0)));
5884 return nullptr;
5885 case Intrinsic::assume:
5886 case Intrinsic::var_annotation:
5887 case Intrinsic::sideeffect:
5888 // Discard annotate attributes, assumptions, and artificial side-effects.
5889 return nullptr;
5890
5891 case Intrinsic::codeview_annotation: {
5892 // Emit a label associated with this metadata.
5893 MachineFunction &MF = DAG.getMachineFunction();
5894 MCSymbol *Label =
5895 MF.getMMI().getContext().createTempSymbol("annotation", true);
5896 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
5897 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
5898 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
5899 DAG.setRoot(Res);
5900 return nullptr;
5901 }
5902
5903 case Intrinsic::init_trampoline: {
5904 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5905
5906 SDValue Ops[6];
5907 Ops[0] = getRoot();
5908 Ops[1] = getValue(I.getArgOperand(0));
5909 Ops[2] = getValue(I.getArgOperand(1));
5910 Ops[3] = getValue(I.getArgOperand(2));
5911 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5912 Ops[5] = DAG.getSrcValue(F);
5913
5914 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5915
5916 DAG.setRoot(Res);
5917 return nullptr;
5918 }
5919 case Intrinsic::adjust_trampoline:
5920 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5921 TLI.getPointerTy(DAG.getDataLayout()),
5922 getValue(I.getArgOperand(0))));
5923 return nullptr;
5924 case Intrinsic::gcroot: {
5925 assert(DAG.getMachineFunction().getFunction().hasGC() &&((DAG.getMachineFunction().getFunction().hasGC() && "only valid in functions with gc specified, enforced by Verifier"
) ? static_cast<void> (0) : __assert_fail ("DAG.getMachineFunction().getFunction().hasGC() && \"only valid in functions with gc specified, enforced by Verifier\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5926, __PRETTY_FUNCTION__))
5926 "only valid in functions with gc specified, enforced by Verifier")((DAG.getMachineFunction().getFunction().hasGC() && "only valid in functions with gc specified, enforced by Verifier"
) ? static_cast<void> (0) : __assert_fail ("DAG.getMachineFunction().getFunction().hasGC() && \"only valid in functions with gc specified, enforced by Verifier\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp"
, 5926, __PRETTY_FUNCTION__))
;
5927 assert(GFI && "implied by previous")((GFI && "implied by previous") ? static_cast<void
> (0) : __assert_fail ("GFI && \"implied by previous\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/Select