LLVM 20.0.0git
SelectionDAGBuilder.cpp
Go to the documentation of this file.
1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
26#include "llvm/Analysis/Loads.h"
57#include "llvm/IR/Argument.h"
58#include "llvm/IR/Attributes.h"
59#include "llvm/IR/BasicBlock.h"
60#include "llvm/IR/CFG.h"
61#include "llvm/IR/CallingConv.h"
62#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugInfo.h"
71#include "llvm/IR/Function.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstrTypes.h"
77#include "llvm/IR/Intrinsics.h"
78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
81#include "llvm/IR/LLVMContext.h"
83#include "llvm/IR/Metadata.h"
84#include "llvm/IR/Module.h"
85#include "llvm/IR/Operator.h"
87#include "llvm/IR/Statepoint.h"
88#include "llvm/IR/Type.h"
89#include "llvm/IR/User.h"
90#include "llvm/IR/Value.h"
91#include "llvm/MC/MCContext.h"
96#include "llvm/Support/Debug.h"
105#include <cstddef>
106#include <limits>
107#include <optional>
108#include <tuple>
109
110using namespace llvm;
111using namespace PatternMatch;
112using namespace SwitchCG;
113
114#define DEBUG_TYPE "isel"
115
116/// LimitFloatPrecision - Generate low-precision inline sequences for
117/// some float libcalls (6, 8 or 12 bits).
118static unsigned LimitFloatPrecision;
119
120static cl::opt<bool>
121 InsertAssertAlign("insert-assert-align", cl::init(true),
122 cl::desc("Insert the experimental `assertalign` node."),
124
126 LimitFPPrecision("limit-float-precision",
127 cl::desc("Generate low-precision inline sequences "
128 "for some float libcalls"),
130 cl::init(0));
131
133 "switch-peel-threshold", cl::Hidden, cl::init(66),
134 cl::desc("Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
136 "optimization"));
137
138// Limit the width of DAG chains. This is important in general to prevent
139// DAG-based analysis from blowing up. For example, alias analysis and
140// load clustering may not complete in reasonable time. It is difficult to
141// recognize and avoid this situation within each individual analysis, and
142// future analyses are likely to have the same behavior. Limiting DAG width is
143// the safe approach and will be especially important with global DAGs.
144//
145// MaxParallelChains default is arbitrarily high to avoid affecting
146// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
147// sequence over this should have been converted to llvm.memcpy by the
148// frontend. It is easy to induce this behavior with .ll code such as:
149// %buffer = alloca [4096 x i8]
150// %data = load [4096 x i8]* %argPtr
151// store [4096 x i8] %data, [4096 x i8]* %buffer
152static const unsigned MaxParallelChains = 64;
153
155 const SDValue *Parts, unsigned NumParts,
156 MVT PartVT, EVT ValueVT, const Value *V,
157 SDValue InChain,
158 std::optional<CallingConv::ID> CC);
159
160/// getCopyFromParts - Create a value that contains the specified legal parts
161/// combined into the value they represent. If the parts combine to a type
162/// larger than ValueVT then AssertOp can be used to specify whether the extra
163/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
164/// (ISD::AssertSext).
165static SDValue
166getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
167 unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
168 SDValue InChain,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
171 // Let the target assemble the parts if it wants to
172 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
173 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
174 PartVT, ValueVT, CC))
175 return Val;
176
177 if (ValueVT.isVector())
178 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
179 InChain, CC);
180
181 assert(NumParts > 0 && "No parts to assemble!");
182 SDValue Val = Parts[0];
183
184 if (NumParts > 1) {
185 // Assemble the value from multiple parts.
186 if (ValueVT.isInteger()) {
187 unsigned PartBits = PartVT.getSizeInBits();
188 unsigned ValueBits = ValueVT.getSizeInBits();
189
190 // Assemble the power of 2 part.
191 unsigned RoundParts = llvm::bit_floor(NumParts);
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
194 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
195 SDValue Lo, Hi;
196
197 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
198
199 if (RoundParts > 2) {
200 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
201 InChain);
202 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
203 PartVT, HalfVT, V, InChain);
204 } else {
205 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
206 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
207 }
208
209 if (DAG.getDataLayout().isBigEndian())
210 std::swap(Lo, Hi);
211
212 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
213
214 if (RoundParts < NumParts) {
215 // Assemble the trailing non-power-of-2 part.
216 unsigned OddParts = NumParts - RoundParts;
217 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
218 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
219 OddVT, V, InChain, CC);
220
221 // Combine the round and odd parts.
222 Lo = Val;
223 if (DAG.getDataLayout().isBigEndian())
224 std::swap(Lo, Hi);
225 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
226 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
227 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
228 DAG.getConstant(Lo.getValueSizeInBits(), DL,
230 TotalVT, DAG.getDataLayout())));
231 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
232 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
233 }
234 } else if (PartVT.isFloatingPoint()) {
235 // FP split into multiple FP parts (for ppcf128)
236 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
237 "Unexpected split");
238 SDValue Lo, Hi;
239 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
240 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
241 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
242 std::swap(Lo, Hi);
243 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
244 } else {
245 // FP split into integer parts (soft fp)
246 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
247 !PartVT.isVector() && "Unexpected split");
248 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
249 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
250 InChain, CC);
251 }
252 }
253
254 // There is now one part, held in Val. Correct it to match ValueVT.
255 // PartEVT is the type of the register class that holds the value.
256 // ValueVT is the type of the inline asm operation.
257 EVT PartEVT = Val.getValueType();
258
259 if (PartEVT == ValueVT)
260 return Val;
261
262 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
263 ValueVT.bitsLT(PartEVT)) {
264 // For an FP value in an integer part, we need to truncate to the right
265 // width first.
266 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
267 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
268 }
269
270 // Handle types that have the same size.
271 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
272 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
273
274 // Handle types with different sizes.
275 if (PartEVT.isInteger() && ValueVT.isInteger()) {
276 if (ValueVT.bitsLT(PartEVT)) {
277 // For a truncate, see if we have any information to
278 // indicate whether the truncated bits will always be
279 // zero or sign-extension.
280 if (AssertOp)
281 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
282 DAG.getValueType(ValueVT));
283 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
284 }
285 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
286 }
287
288 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
289 // FP_ROUND's are always exact here.
290 if (ValueVT.bitsLT(Val.getValueType())) {
291
292 SDValue NoChange =
294
296 llvm::Attribute::StrictFP)) {
297 return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
298 DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
299 NoChange);
300 }
301
302 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
303 }
304
305 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
306 }
307
308 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
309 // then truncating.
310 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
311 ValueVT.bitsLT(PartEVT)) {
312 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
313 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
314 }
315
316 report_fatal_error("Unknown mismatch in getCopyFromParts!");
317}
318
320 const Twine &ErrMsg) {
321 const Instruction *I = dyn_cast_or_null<Instruction>(V);
322 if (!I)
323 return Ctx.emitError(ErrMsg);
324
325 if (const CallInst *CI = dyn_cast<CallInst>(I))
326 if (CI->isInlineAsm()) {
328 *CI, ErrMsg + ", possible invalid constraint for vector type"));
329 }
330
331 return Ctx.emitError(I, ErrMsg);
332}
333
334/// getCopyFromPartsVector - Create a value that contains the specified legal
335/// parts combined into the value they represent. If the parts combine to a
336/// type larger than ValueVT then AssertOp can be used to specify whether the
337/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
338/// ValueVT (ISD::AssertSext).
340 const SDValue *Parts, unsigned NumParts,
341 MVT PartVT, EVT ValueVT, const Value *V,
342 SDValue InChain,
343 std::optional<CallingConv::ID> CallConv) {
344 assert(ValueVT.isVector() && "Not a vector value");
345 assert(NumParts > 0 && "No parts to assemble!");
346 const bool IsABIRegCopy = CallConv.has_value();
347
348 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
349 SDValue Val = Parts[0];
350
351 // Handle a multi-element vector.
352 if (NumParts > 1) {
353 EVT IntermediateVT;
354 MVT RegisterVT;
355 unsigned NumIntermediates;
356 unsigned NumRegs;
357
358 if (IsABIRegCopy) {
360 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
361 NumIntermediates, RegisterVT);
362 } else {
363 NumRegs =
364 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
365 NumIntermediates, RegisterVT);
366 }
367
368 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
369 NumParts = NumRegs; // Silence a compiler warning.
370 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
371 assert(RegisterVT.getSizeInBits() ==
372 Parts[0].getSimpleValueType().getSizeInBits() &&
373 "Part type sizes don't match!");
374
375 // Assemble the parts into intermediate operands.
376 SmallVector<SDValue, 8> Ops(NumIntermediates);
377 if (NumIntermediates == NumParts) {
378 // If the register was not expanded, truncate or copy the value,
379 // as appropriate.
380 for (unsigned i = 0; i != NumParts; ++i)
381 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
382 V, InChain, CallConv);
383 } else if (NumParts > 0) {
384 // If the intermediate type was expanded, build the intermediate
385 // operands from the parts.
386 assert(NumParts % NumIntermediates == 0 &&
387 "Must expand into a divisible number of parts!");
388 unsigned Factor = NumParts / NumIntermediates;
389 for (unsigned i = 0; i != NumIntermediates; ++i)
390 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
391 IntermediateVT, V, InChain, CallConv);
392 }
393
394 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
395 // intermediate operands.
396 EVT BuiltVectorTy =
397 IntermediateVT.isVector()
399 *DAG.getContext(), IntermediateVT.getScalarType(),
400 IntermediateVT.getVectorElementCount() * NumParts)
402 IntermediateVT.getScalarType(),
403 NumIntermediates);
404 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
406 DL, BuiltVectorTy, Ops);
407 }
408
409 // There is now one part, held in Val. Correct it to match ValueVT.
410 EVT PartEVT = Val.getValueType();
411
412 if (PartEVT == ValueVT)
413 return Val;
414
415 if (PartEVT.isVector()) {
416 // Vector/Vector bitcast.
417 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
418 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
419
420 // If the parts vector has more elements than the value vector, then we
421 // have a vector widening case (e.g. <2 x float> -> <4 x float>).
422 // Extract the elements we want.
423 if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
426 (PartEVT.getVectorElementCount().isScalable() ==
427 ValueVT.getVectorElementCount().isScalable()) &&
428 "Cannot narrow, it would be a lossy transformation");
429 PartEVT =
431 ValueVT.getVectorElementCount());
432 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
433 DAG.getVectorIdxConstant(0, DL));
434 if (PartEVT == ValueVT)
435 return Val;
436 if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
437 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
438
439 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
440 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
441 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
442 }
443
444 // Promoted vector extract
445 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
446 }
447
448 // Trivial bitcast if the types are the same size and the destination
449 // vector type is legal.
450 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
451 TLI.isTypeLegal(ValueVT))
452 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
453
454 if (ValueVT.getVectorNumElements() != 1) {
455 // Certain ABIs require that vectors are passed as integers. For vectors
456 // are the same size, this is an obvious bitcast.
457 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
458 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
459 } else if (ValueVT.bitsLT(PartEVT)) {
460 const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
461 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
462 // Drop the extra bits.
463 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
464 return DAG.getBitcast(ValueVT, Val);
465 }
466
468 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
469 return DAG.getUNDEF(ValueVT);
470 }
471
472 // Handle cases such as i8 -> <1 x i1>
473 EVT ValueSVT = ValueVT.getVectorElementType();
474 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
475 unsigned ValueSize = ValueSVT.getSizeInBits();
476 if (ValueSize == PartEVT.getSizeInBits()) {
477 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
478 } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
479 // It's possible a scalar floating point type gets softened to integer and
480 // then promoted to a larger integer. If PartEVT is the larger integer
481 // we need to truncate it and then bitcast to the FP type.
482 assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
483 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
484 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
485 Val = DAG.getBitcast(ValueSVT, Val);
486 } else {
487 Val = ValueVT.isFloatingPoint()
488 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
489 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
490 }
491 }
492
493 return DAG.getBuildVector(ValueVT, DL, Val);
494}
495
496static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
497 SDValue Val, SDValue *Parts, unsigned NumParts,
498 MVT PartVT, const Value *V,
499 std::optional<CallingConv::ID> CallConv);
500
501/// getCopyToParts - Create a series of nodes that contain the specified value
502/// split into legal parts. If the parts contain more bits than Val, then, for
503/// integers, ExtendKind can be used to specify how to generate the extra bits.
504static void
506 unsigned NumParts, MVT PartVT, const Value *V,
507 std::optional<CallingConv::ID> CallConv = std::nullopt,
508 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
509 // Let the target split the parts if it wants to
510 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
511 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
512 CallConv))
513 return;
514 EVT ValueVT = Val.getValueType();
515
516 // Handle the vector case separately.
517 if (ValueVT.isVector())
518 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
519 CallConv);
520
521 unsigned OrigNumParts = NumParts;
523 "Copying to an illegal type!");
524
525 if (NumParts == 0)
526 return;
527
528 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
529 EVT PartEVT = PartVT;
530 if (PartEVT == ValueVT) {
531 assert(NumParts == 1 && "No-op copy with multiple parts!");
532 Parts[0] = Val;
533 return;
534 }
535
536 unsigned PartBits = PartVT.getSizeInBits();
537 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
538 // If the parts cover more bits than the value has, promote the value.
539 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
540 assert(NumParts == 1 && "Do not know what to promote to!");
541 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
542 } else {
543 if (ValueVT.isFloatingPoint()) {
544 // FP values need to be bitcast, then extended if they are being put
545 // into a larger container.
546 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
547 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
548 }
549 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
550 ValueVT.isInteger() &&
551 "Unknown mismatch!");
552 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
553 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
554 if (PartVT == MVT::x86mmx)
555 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
556 }
557 } else if (PartBits == ValueVT.getSizeInBits()) {
558 // Different types of the same size.
559 assert(NumParts == 1 && PartEVT != ValueVT);
560 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
561 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
562 // If the parts cover less bits than value has, truncate the value.
563 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
564 ValueVT.isInteger() &&
565 "Unknown mismatch!");
566 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
567 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
568 if (PartVT == MVT::x86mmx)
569 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
570 }
571
572 // The value may have changed - recompute ValueVT.
573 ValueVT = Val.getValueType();
574 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
575 "Failed to tile the value with PartVT!");
576
577 if (NumParts == 1) {
578 if (PartEVT != ValueVT) {
580 "scalar-to-vector conversion failed");
581 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
582 }
583
584 Parts[0] = Val;
585 return;
586 }
587
588 // Expand the value into multiple parts.
589 if (NumParts & (NumParts - 1)) {
590 // The number of parts is not a power of 2. Split off and copy the tail.
591 assert(PartVT.isInteger() && ValueVT.isInteger() &&
592 "Do not know what to expand to!");
593 unsigned RoundParts = llvm::bit_floor(NumParts);
594 unsigned RoundBits = RoundParts * PartBits;
595 unsigned OddParts = NumParts - RoundParts;
596 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
597 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
598
599 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
600 CallConv);
601
602 if (DAG.getDataLayout().isBigEndian())
603 // The odd parts were reversed by getCopyToParts - unreverse them.
604 std::reverse(Parts + RoundParts, Parts + NumParts);
605
606 NumParts = RoundParts;
607 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
608 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
609 }
610
611 // The number of parts is a power of 2. Repeatedly bisect the value using
612 // EXTRACT_ELEMENT.
613 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
615 ValueVT.getSizeInBits()),
616 Val);
617
618 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
619 for (unsigned i = 0; i < NumParts; i += StepSize) {
620 unsigned ThisBits = StepSize * PartBits / 2;
621 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
622 SDValue &Part0 = Parts[i];
623 SDValue &Part1 = Parts[i+StepSize/2];
624
625 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
626 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
627 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
628 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
629
630 if (ThisBits == PartBits && ThisVT != PartVT) {
631 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
632 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
633 }
634 }
635 }
636
637 if (DAG.getDataLayout().isBigEndian())
638 std::reverse(Parts, Parts + OrigNumParts);
639}
640
642 const SDLoc &DL, EVT PartVT) {
643 if (!PartVT.isVector())
644 return SDValue();
645
646 EVT ValueVT = Val.getValueType();
647 EVT PartEVT = PartVT.getVectorElementType();
648 EVT ValueEVT = ValueVT.getVectorElementType();
649 ElementCount PartNumElts = PartVT.getVectorElementCount();
650 ElementCount ValueNumElts = ValueVT.getVectorElementCount();
651
652 // We only support widening vectors with equivalent element types and
653 // fixed/scalable properties. If a target needs to widen a fixed-length type
654 // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
655 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
656 PartNumElts.isScalable() != ValueNumElts.isScalable())
657 return SDValue();
658
659 // Have a try for bf16 because some targets share its ABI with fp16.
660 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
662 "Cannot widen to illegal type");
663 Val = DAG.getNode(ISD::BITCAST, DL,
664 ValueVT.changeVectorElementType(MVT::f16), Val);
665 } else if (PartEVT != ValueEVT) {
666 return SDValue();
667 }
668
669 // Widening a scalable vector to another scalable vector is done by inserting
670 // the vector into a larger undef one.
671 if (PartNumElts.isScalable())
672 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
673 Val, DAG.getVectorIdxConstant(0, DL));
674
675 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
676 // undef elements.
678 DAG.ExtractVectorElements(Val, Ops);
679 SDValue EltUndef = DAG.getUNDEF(PartEVT);
680 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
681
682 // FIXME: Use CONCAT for 2x -> 4x.
683 return DAG.getBuildVector(PartVT, DL, Ops);
684}
685
686/// getCopyToPartsVector - Create a series of nodes that contain the specified
687/// value split into legal parts.
688static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
689 SDValue Val, SDValue *Parts, unsigned NumParts,
690 MVT PartVT, const Value *V,
691 std::optional<CallingConv::ID> CallConv) {
692 EVT ValueVT = Val.getValueType();
693 assert(ValueVT.isVector() && "Not a vector");
694 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
695 const bool IsABIRegCopy = CallConv.has_value();
696
697 if (NumParts == 1) {
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
700 // Nothing to do.
701 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
702 // Bitconvert vector->vector case.
703 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
704 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
705 Val = Widened;
706 } else if (PartVT.isVector() &&
708 ValueVT.getVectorElementType()) &&
709 PartEVT.getVectorElementCount() ==
710 ValueVT.getVectorElementCount()) {
711
712 // Promoted vector extract
713 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
714 } else if (PartEVT.isVector() &&
715 PartEVT.getVectorElementType() !=
716 ValueVT.getVectorElementType() &&
717 TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
718 TargetLowering::TypeWidenVector) {
719 // Combination of widening and promotion.
720 EVT WidenVT =
722 PartVT.getVectorElementCount());
723 SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
724 Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
725 } else {
726 // Don't extract an integer from a float vector. This can happen if the
727 // FP type gets softened to integer and then promoted. The promotion
728 // prevents it from being picked up by the earlier bitcast case.
729 if (ValueVT.getVectorElementCount().isScalar() &&
730 (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
731 // If we reach this condition and PartVT is FP, this means that
732 // ValueVT is also FP and both have a different size, otherwise we
733 // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here
734 // would be invalid since that would mean the smaller FP type has to
735 // be extended to the larger one.
736 if (PartVT.isFloatingPoint()) {
737 Val = DAG.getBitcast(ValueVT.getScalarType(), Val);
738 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
739 } else
740 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
741 DAG.getVectorIdxConstant(0, DL));
742 } else {
743 uint64_t ValueSize = ValueVT.getFixedSizeInBits();
744 assert(PartVT.getFixedSizeInBits() > ValueSize &&
745 "lossy conversion of vector to scalar type");
746 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
747 Val = DAG.getBitcast(IntermediateType, Val);
748 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
749 }
750 }
751
752 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
753 Parts[0] = Val;
754 return;
755 }
756
757 // Handle a multi-element vector.
758 EVT IntermediateVT;
759 MVT RegisterVT;
760 unsigned NumIntermediates;
761 unsigned NumRegs;
762 if (IsABIRegCopy) {
764 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
765 RegisterVT);
766 } else {
767 NumRegs =
768 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
769 NumIntermediates, RegisterVT);
770 }
771
772 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
773 NumParts = NumRegs; // Silence a compiler warning.
774 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
775
776 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
777 "Mixing scalable and fixed vectors when copying in parts");
778
779 std::optional<ElementCount> DestEltCnt;
780
781 if (IntermediateVT.isVector())
782 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
783 else
784 DestEltCnt = ElementCount::getFixed(NumIntermediates);
785
786 EVT BuiltVectorTy = EVT::getVectorVT(
787 *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
788
789 if (ValueVT == BuiltVectorTy) {
790 // Nothing to do.
791 } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
792 // Bitconvert vector->vector case.
793 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
794 } else {
795 if (BuiltVectorTy.getVectorElementType().bitsGT(
796 ValueVT.getVectorElementType())) {
797 // Integer promotion.
798 ValueVT = EVT::getVectorVT(*DAG.getContext(),
799 BuiltVectorTy.getVectorElementType(),
800 ValueVT.getVectorElementCount());
801 Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
802 }
803
804 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
805 Val = Widened;
806 }
807 }
808
809 assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
810
811 // Split the vector into intermediate operands.
812 SmallVector<SDValue, 8> Ops(NumIntermediates);
813 for (unsigned i = 0; i != NumIntermediates; ++i) {
814 if (IntermediateVT.isVector()) {
815 // This does something sensible for scalable vectors - see the
816 // definition of EXTRACT_SUBVECTOR for further details.
817 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
818 Ops[i] =
819 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
820 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
821 } else {
822 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
823 DAG.getVectorIdxConstant(i, DL));
824 }
825 }
826
827 // Split the intermediate operands into legal parts.
828 if (NumParts == NumIntermediates) {
829 // If the register was not expanded, promote or copy the value,
830 // as appropriate.
831 for (unsigned i = 0; i != NumParts; ++i)
832 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
833 } else if (NumParts > 0) {
834 // If the intermediate type was expanded, split each the value into
835 // legal parts.
836 assert(NumIntermediates != 0 && "division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (unsigned i = 0; i != NumIntermediates; ++i)
841 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
842 CallConv);
843 }
844}
845
847 EVT valuevt, std::optional<CallingConv::ID> CC)
848 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
849 RegCount(1, regs.size()), CallConv(CC) {}
850
852 const DataLayout &DL, Register Reg, Type *Ty,
853 std::optional<CallingConv::ID> CC) {
854 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
855
856 CallConv = CC;
857
858 for (EVT ValueVT : ValueVTs) {
859 unsigned NumRegs =
861 ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
862 : TLI.getNumRegisters(Context, ValueVT);
863 MVT RegisterVT =
865 ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
866 : TLI.getRegisterType(Context, ValueVT);
867 for (unsigned i = 0; i != NumRegs; ++i)
868 Regs.push_back(Reg + i);
869 RegVTs.push_back(RegisterVT);
870 RegCount.push_back(NumRegs);
871 Reg = Reg.id() + NumRegs;
872 }
873}
874
876 FunctionLoweringInfo &FuncInfo,
877 const SDLoc &dl, SDValue &Chain,
878 SDValue *Glue, const Value *V) const {
879 // A Value with type {} or [0 x %t] needs no registers.
880 if (ValueVTs.empty())
881 return SDValue();
882
883 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
884
885 // Assemble the legal parts into the final values.
886 SmallVector<SDValue, 4> Values(ValueVTs.size());
888 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
889 // Copy the legal parts from the registers.
890 EVT ValueVT = ValueVTs[Value];
891 unsigned NumRegs = RegCount[Value];
892 MVT RegisterVT = isABIMangled()
894 *DAG.getContext(), *CallConv, RegVTs[Value])
895 : RegVTs[Value];
896
897 Parts.resize(NumRegs);
898 for (unsigned i = 0; i != NumRegs; ++i) {
899 SDValue P;
900 if (!Glue) {
901 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
902 } else {
903 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
904 *Glue = P.getValue(2);
905 }
906
907 Chain = P.getValue(1);
908 Parts[i] = P;
909
910 // If the source register was virtual and if we know something about it,
911 // add an assert node.
912 if (!Register::isVirtualRegister(Regs[Part + i]) ||
913 !RegisterVT.isInteger())
914 continue;
915
917 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
918 if (!LOI)
919 continue;
920
921 unsigned RegSize = RegisterVT.getScalarSizeInBits();
922 unsigned NumSignBits = LOI->NumSignBits;
923 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
924
925 if (NumZeroBits == RegSize) {
926 // The current value is a zero.
927 // Explicitly express that as it would be easier for
928 // optimizations to kick in.
929 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
930 continue;
931 }
932
933 // FIXME: We capture more information than the dag can represent. For
934 // now, just use the tightest assertzext/assertsext possible.
935 bool isSExt;
936 EVT FromVT(MVT::Other);
937 if (NumZeroBits) {
938 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
939 isSExt = false;
940 } else if (NumSignBits > 1) {
941 FromVT =
942 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
943 isSExt = true;
944 } else {
945 continue;
946 }
947 // Add an assertion node.
948 assert(FromVT != MVT::Other);
949 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
950 RegisterVT, P, DAG.getValueType(FromVT));
951 }
952
953 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
954 RegisterVT, ValueVT, V, Chain, CallConv);
955 Part += NumRegs;
956 Parts.clear();
957 }
958
959 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
960}
961
963 const SDLoc &dl, SDValue &Chain, SDValue *Glue,
964 const Value *V,
965 ISD::NodeType PreferredExtendType) const {
966 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
967 ISD::NodeType ExtendKind = PreferredExtendType;
968
969 // Get the list of the values's legal parts.
970 unsigned NumRegs = Regs.size();
971 SmallVector<SDValue, 8> Parts(NumRegs);
972 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
973 unsigned NumParts = RegCount[Value];
974
975 MVT RegisterVT = isABIMangled()
977 *DAG.getContext(), *CallConv, RegVTs[Value])
978 : RegVTs[Value];
979
980 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
981 ExtendKind = ISD::ZERO_EXTEND;
982
983 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
984 NumParts, RegisterVT, V, CallConv, ExtendKind);
985 Part += NumParts;
986 }
987
988 // Copy the parts into the registers.
989 SmallVector<SDValue, 8> Chains(NumRegs);
990 for (unsigned i = 0; i != NumRegs; ++i) {
991 SDValue Part;
992 if (!Glue) {
993 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
994 } else {
995 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
996 *Glue = Part.getValue(1);
997 }
998
999 Chains[i] = Part.getValue(0);
1000 }
1001
1002 if (NumRegs == 1 || Glue)
1003 // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
1004 // flagged to it. That is the CopyToReg nodes and the user are considered
1005 // a single scheduling unit. If we create a TokenFactor and return it as
1006 // chain, then the TokenFactor is both a predecessor (operand) of the
1007 // user as well as a successor (the TF operands are flagged to the user).
1008 // c1, f1 = CopyToReg
1009 // c2, f2 = CopyToReg
1010 // c3 = TokenFactor c1, c2
1011 // ...
1012 // = op c3, ..., f2
1013 Chain = Chains[NumRegs-1];
1014 else
1015 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1016}
1017
1019 unsigned MatchingIdx, const SDLoc &dl,
1020 SelectionDAG &DAG,
1021 std::vector<SDValue> &Ops) const {
1022 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1023
1024 InlineAsm::Flag Flag(Code, Regs.size());
1025 if (HasMatching)
1026 Flag.setMatchingOp(MatchingIdx);
1027 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1028 // Put the register class of the virtual registers in the flag word. That
1029 // way, later passes can recompute register class constraints for inline
1030 // assembly as well as normal instructions.
1031 // Don't do this for tied operands that can use the regclass information
1032 // from the def.
1034 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1035 Flag.setRegClass(RC->getID());
1036 }
1037
1038 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1039 Ops.push_back(Res);
1040
1041 if (Code == InlineAsm::Kind::Clobber) {
1042 // Clobbers should always have a 1:1 mapping with registers, and may
1043 // reference registers that have illegal (e.g. vector) types. Hence, we
1044 // shouldn't try to apply any sort of splitting logic to them.
1045 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1046 "No 1:1 mapping from clobbers to regs?");
1048 (void)SP;
1049 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1050 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1051 assert(
1052 (Regs[I] != SP ||
1054 "If we clobbered the stack pointer, MFI should know about it.");
1055 }
1056 return;
1057 }
1058
1059 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1060 MVT RegisterVT = RegVTs[Value];
1061 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1062 RegisterVT);
1063 for (unsigned i = 0; i != NumRegs; ++i) {
1064 assert(Reg < Regs.size() && "Mismatch in # registers expected");
1065 unsigned TheReg = Regs[Reg++];
1066 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1067 }
1068 }
1069}
1070
1074 unsigned I = 0;
1075 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1076 unsigned RegCount = std::get<0>(CountAndVT);
1077 MVT RegisterVT = std::get<1>(CountAndVT);
1078 TypeSize RegisterSize = RegisterVT.getSizeInBits();
1079 for (unsigned E = I + RegCount; I != E; ++I)
1080 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1081 }
1082 return OutVec;
1083}
1084
1086 AssumptionCache *ac,
1087 const TargetLibraryInfo *li) {
1088 AA = aa;
1089 AC = ac;
1090 GFI = gfi;
1091 LibInfo = li;
1093 LPadToCallSiteMap.clear();
1095 AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1097}
1098
1100 NodeMap.clear();
1101 UnusedArgNodeMap.clear();
1102 PendingLoads.clear();
1103 PendingExports.clear();
1104 PendingConstrainedFP.clear();
1105 PendingConstrainedFPStrict.clear();
1106 CurInst = nullptr;
1107 HasTailCall = false;
1108 SDNodeOrder = LowestSDNodeOrder;
1110}
1111
1113 DanglingDebugInfoMap.clear();
1114}
1115
1116// Update DAG root to include dependencies on Pending chains.
1117SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1118 SDValue Root = DAG.getRoot();
1119
1120 if (Pending.empty())
1121 return Root;
1122
1123 // Add current root to PendingChains, unless we already indirectly
1124 // depend on it.
1125 if (Root.getOpcode() != ISD::EntryToken) {
1126 unsigned i = 0, e = Pending.size();
1127 for (; i != e; ++i) {
1128 assert(Pending[i].getNode()->getNumOperands() > 1);
1129 if (Pending[i].getNode()->getOperand(0) == Root)
1130 break; // Don't add the root if we already indirectly depend on it.
1131 }
1132
1133 if (i == e)
1134 Pending.push_back(Root);
1135 }
1136
1137 if (Pending.size() == 1)
1138 Root = Pending[0];
1139 else
1140 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1141
1142 DAG.setRoot(Root);
1143 Pending.clear();
1144 return Root;
1145}
1146
1148 return updateRoot(PendingLoads);
1149}
1150
1152 // Chain up all pending constrained intrinsics together with all
1153 // pending loads, by simply appending them to PendingLoads and
1154 // then calling getMemoryRoot().
1155 PendingLoads.reserve(PendingLoads.size() +
1156 PendingConstrainedFP.size() +
1157 PendingConstrainedFPStrict.size());
1158 PendingLoads.append(PendingConstrainedFP.begin(),
1159 PendingConstrainedFP.end());
1160 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1161 PendingConstrainedFPStrict.end());
1162 PendingConstrainedFP.clear();
1163 PendingConstrainedFPStrict.clear();
1164 return getMemoryRoot();
1165}
1166
1168 // We need to emit pending fpexcept.strict constrained intrinsics,
1169 // so append them to the PendingExports list.
1170 PendingExports.append(PendingConstrainedFPStrict.begin(),
1171 PendingConstrainedFPStrict.end());
1172 PendingConstrainedFPStrict.clear();
1173 return updateRoot(PendingExports);
1174}
1175
1177 DILocalVariable *Variable,
1179 DebugLoc DL) {
1180 assert(Variable && "Missing variable");
1181
1182 // Check if address has undef value.
1183 if (!Address || isa<UndefValue>(Address) ||
1184 (Address->use_empty() && !isa<Argument>(Address))) {
1185 LLVM_DEBUG(
1186 dbgs()
1187 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1188 return;
1189 }
1190
1191 bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1192
1193 SDValue &N = NodeMap[Address];
1194 if (!N.getNode() && isa<Argument>(Address))
1195 // Check unused arguments map.
1196 N = UnusedArgNodeMap[Address];
1197 SDDbgValue *SDV;
1198 if (N.getNode()) {
1199 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1200 Address = BCI->getOperand(0);
1201 // Parameters are handled specially.
1202 auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1203 if (IsParameter && FINode) {
1204 // Byval parameter. We have a frame index at this point.
1205 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1206 /*IsIndirect*/ true, DL, SDNodeOrder);
1207 } else if (isa<Argument>(Address)) {
1208 // Address is an argument, so try to emit its dbg value using
1209 // virtual register info from the FuncInfo.ValueMap.
1210 EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1211 FuncArgumentDbgValueKind::Declare, N);
1212 return;
1213 } else {
1214 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1215 true, DL, SDNodeOrder);
1216 }
1217 DAG.AddDbgValue(SDV, IsParameter);
1218 } else {
1219 // If Address is an argument then try to emit its dbg value using
1220 // virtual register info from the FuncInfo.ValueMap.
1221 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1222 FuncArgumentDbgValueKind::Declare, N)) {
1223 LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1224 << " (could not emit func-arg dbg_value)\n");
1225 }
1226 }
1227}
1228
1230 // Add SDDbgValue nodes for any var locs here. Do so before updating
1231 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1232 if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1233 // Add SDDbgValue nodes for any var locs here. Do so before updating
1234 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1235 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1236 It != End; ++It) {
1237 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1238 dropDanglingDebugInfo(Var, It->Expr);
1239 if (It->Values.isKillLocation(It->Expr)) {
1240 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1241 continue;
1242 }
1243 SmallVector<Value *> Values(It->Values.location_ops());
1244 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1245 It->Values.hasArgList())) {
1246 SmallVector<Value *, 4> Vals(It->Values.location_ops());
1248 FnVarLocs->getDILocalVariable(It->VariableID),
1249 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1250 }
1251 }
1252 }
1253
1254 // We must skip DbgVariableRecords if they've already been processed above as
1255 // we have just emitted the debug values resulting from assignment tracking
1256 // analysis, making any existing DbgVariableRecords redundant (and probably
1257 // less correct). We still need to process DbgLabelRecords. This does sink
1258 // DbgLabelRecords to the bottom of the group of debug records. That sholdn't
1259 // be important as it does so deterministcally and ordering between
1260 // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR
1261 // printing).
1262 bool SkipDbgVariableRecords = DAG.getFunctionVarLocs();
1263 // Is there is any debug-info attached to this instruction, in the form of
1264 // DbgRecord non-instruction debug-info records.
1265 for (DbgRecord &DR : I.getDbgRecordRange()) {
1266 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1267 assert(DLR->getLabel() && "Missing label");
1268 SDDbgLabel *SDV =
1269 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1270 DAG.AddDbgLabel(SDV);
1271 continue;
1272 }
1273
1274 if (SkipDbgVariableRecords)
1275 continue;
1276 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1277 DILocalVariable *Variable = DVR.getVariable();
1280
1282 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1283 continue;
1284 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR
1285 << "\n");
1287 DVR.getDebugLoc());
1288 continue;
1289 }
1290
1291 // A DbgVariableRecord with no locations is a kill location.
1293 if (Values.empty()) {
1295 SDNodeOrder);
1296 continue;
1297 }
1298
1299 // A DbgVariableRecord with an undef or absent location is also a kill
1300 // location.
1301 if (llvm::any_of(Values,
1302 [](Value *V) { return !V || isa<UndefValue>(V); })) {
1304 SDNodeOrder);
1305 continue;
1306 }
1307
1308 bool IsVariadic = DVR.hasArgList();
1309 if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(),
1310 SDNodeOrder, IsVariadic)) {
1311 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1312 DVR.getDebugLoc(), SDNodeOrder);
1313 }
1314 }
1315}
1316
1318 visitDbgInfo(I);
1319
1320 // Set up outgoing PHI node register values before emitting the terminator.
1321 if (I.isTerminator()) {
1322 HandlePHINodesInSuccessorBlocks(I.getParent());
1323 }
1324
1325 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1326 if (!isa<DbgInfoIntrinsic>(I))
1327 ++SDNodeOrder;
1328
1329 CurInst = &I;
1330
1331 // Set inserted listener only if required.
1332 bool NodeInserted = false;
1333 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1334 MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1335 MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra);
1336 if (PCSectionsMD || MMRA) {
1337 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1338 DAG, [&](SDNode *) { NodeInserted = true; });
1339 }
1340
1341 visit(I.getOpcode(), I);
1342
1343 if (!I.isTerminator() && !HasTailCall &&
1344 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1346
1347 // Handle metadata.
1348 if (PCSectionsMD || MMRA) {
1349 auto It = NodeMap.find(&I);
1350 if (It != NodeMap.end()) {
1351 if (PCSectionsMD)
1352 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1353 if (MMRA)
1354 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1355 } else if (NodeInserted) {
1356 // This should not happen; if it does, don't let it go unnoticed so we can
1357 // fix it. Relevant visit*() function is probably missing a setValue().
1358 errs() << "warning: loosing !pcsections and/or !mmra metadata ["
1359 << I.getModule()->getName() << "]\n";
1360 LLVM_DEBUG(I.dump());
1361 assert(false);
1362 }
1363 }
1364
1365 CurInst = nullptr;
1366}
1367
1368void SelectionDAGBuilder::visitPHI(const PHINode &) {
1369 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1370}
1371
1372void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1373 // Note: this doesn't use InstVisitor, because it has to work with
1374 // ConstantExpr's in addition to instructions.
1375 switch (Opcode) {
1376 default: llvm_unreachable("Unknown instruction type encountered!");
1377 // Build the switch statement using the Instruction.def file.
1378#define HANDLE_INST(NUM, OPCODE, CLASS) \
1379 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1380#include "llvm/IR/Instruction.def"
1381 }
1382}
1383
1385 DILocalVariable *Variable,
1386 DebugLoc DL, unsigned Order,
1389 // For variadic dbg_values we will now insert an undef.
1390 // FIXME: We can potentially recover these!
1392 for (const Value *V : Values) {
1393 auto *Undef = UndefValue::get(V->getType());
1395 }
1396 SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1397 /*IsIndirect=*/false, DL, Order,
1398 /*IsVariadic=*/true);
1399 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1400 return true;
1401}
1402
1404 DILocalVariable *Var,
1405 DIExpression *Expr,
1406 bool IsVariadic, DebugLoc DL,
1407 unsigned Order) {
1408 if (IsVariadic) {
1409 handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1410 return;
1411 }
1412 // TODO: Dangling debug info will eventually either be resolved or produce
1413 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1414 // between the original dbg.value location and its resolved DBG_VALUE,
1415 // which we should ideally fill with an extra Undef DBG_VALUE.
1416 assert(Values.size() == 1);
1417 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1418}
1419
1421 const DIExpression *Expr) {
1422 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1423 DIVariable *DanglingVariable = DDI.getVariable();
1424 DIExpression *DanglingExpr = DDI.getExpression();
1425 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1426 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1427 << printDDI(nullptr, DDI) << "\n");
1428 return true;
1429 }
1430 return false;
1431 };
1432
1433 for (auto &DDIMI : DanglingDebugInfoMap) {
1434 DanglingDebugInfoVector &DDIV = DDIMI.second;
1435
1436 // If debug info is to be dropped, run it through final checks to see
1437 // whether it can be salvaged.
1438 for (auto &DDI : DDIV)
1439 if (isMatchingDbgValue(DDI))
1440 salvageUnresolvedDbgValue(DDIMI.first, DDI);
1441
1442 erase_if(DDIV, isMatchingDbgValue);
1443 }
1444}
1445
1446// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1447// generate the debug data structures now that we've seen its definition.
1449 SDValue Val) {
1450 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1451 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1452 return;
1453
1454 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1455 for (auto &DDI : DDIV) {
1456 DebugLoc DL = DDI.getDebugLoc();
1457 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1458 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1459 DILocalVariable *Variable = DDI.getVariable();
1460 DIExpression *Expr = DDI.getExpression();
1462 "Expected inlined-at fields to agree");
1463 SDDbgValue *SDV;
1464 if (Val.getNode()) {
1465 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1466 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1467 // we couldn't resolve it directly when examining the DbgValue intrinsic
1468 // in the first place we should not be more successful here). Unless we
1469 // have some test case that prove this to be correct we should avoid
1470 // calling EmitFuncArgumentDbgValue here.
1471 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1472 FuncArgumentDbgValueKind::Value, Val)) {
1473 LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1474 << printDDI(V, DDI) << "\n");
1475 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1476 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1477 // inserted after the definition of Val when emitting the instructions
1478 // after ISel. An alternative could be to teach
1479 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1480 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1481 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1482 << ValSDNodeOrder << "\n");
1483 SDV = getDbgValue(Val, Variable, Expr, DL,
1484 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1485 DAG.AddDbgValue(SDV, false);
1486 } else
1487 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1488 << printDDI(V, DDI)
1489 << " in EmitFuncArgumentDbgValue\n");
1490 } else {
1491 LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1492 << "\n");
1493 auto Undef = UndefValue::get(V->getType());
1494 auto SDV =
1495 DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1496 DAG.AddDbgValue(SDV, false);
1497 }
1498 }
1499 DDIV.clear();
1500}
1501
1503 DanglingDebugInfo &DDI) {
1504 // TODO: For the variadic implementation, instead of only checking the fail
1505 // state of `handleDebugValue`, we need know specifically which values were
1506 // invalid, so that we attempt to salvage only those values when processing
1507 // a DIArgList.
1508 const Value *OrigV = V;
1509 DILocalVariable *Var = DDI.getVariable();
1510 DIExpression *Expr = DDI.getExpression();
1511 DebugLoc DL = DDI.getDebugLoc();
1512 unsigned SDOrder = DDI.getSDNodeOrder();
1513
1514 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1515 // that DW_OP_stack_value is desired.
1516 bool StackValue = true;
1517
1518 // Can this Value can be encoded without any further work?
1519 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1520 return;
1521
1522 // Attempt to salvage back through as many instructions as possible. Bail if
1523 // a non-instruction is seen, such as a constant expression or global
1524 // variable. FIXME: Further work could recover those too.
1525 while (isa<Instruction>(V)) {
1526 const Instruction &VAsInst = *cast<const Instruction>(V);
1527 // Temporary "0", awaiting real implementation.
1529 SmallVector<Value *, 4> AdditionalValues;
1530 V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1531 Expr->getNumLocationOperands(), Ops,
1532 AdditionalValues);
1533 // If we cannot salvage any further, and haven't yet found a suitable debug
1534 // expression, bail out.
1535 if (!V)
1536 break;
1537
1538 // TODO: If AdditionalValues isn't empty, then the salvage can only be
1539 // represented with a DBG_VALUE_LIST, so we give up. When we have support
1540 // here for variadic dbg_values, remove that condition.
1541 if (!AdditionalValues.empty())
1542 break;
1543
1544 // New value and expr now represent this debuginfo.
1545 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1546
1547 // Some kind of simplification occurred: check whether the operand of the
1548 // salvaged debug expression can be encoded in this DAG.
1549 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1550 LLVM_DEBUG(
1551 dbgs() << "Salvaged debug location info for:\n " << *Var << "\n"
1552 << *OrigV << "\nBy stripping back to:\n " << *V << "\n");
1553 return;
1554 }
1555 }
1556
1557 // This was the final opportunity to salvage this debug information, and it
1558 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1559 // any earlier variable location.
1560 assert(OrigV && "V shouldn't be null");
1561 auto *Undef = UndefValue::get(OrigV->getType());
1562 auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1563 DAG.AddDbgValue(SDV, false);
1564 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n "
1565 << printDDI(OrigV, DDI) << "\n");
1566}
1567
1569 DIExpression *Expr,
1570 DebugLoc DbgLoc,
1571 unsigned Order) {
1575 handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1576 /*IsVariadic*/ false);
1577}
1578
1580 DILocalVariable *Var,
1581 DIExpression *Expr, DebugLoc DbgLoc,
1582 unsigned Order, bool IsVariadic) {
1583 if (Values.empty())
1584 return true;
1585
1586 // Filter EntryValue locations out early.
1587 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1588 return true;
1589
1590 SmallVector<SDDbgOperand> LocationOps;
1591 SmallVector<SDNode *> Dependencies;
1592 for (const Value *V : Values) {
1593 // Constant value.
1594 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1595 isa<ConstantPointerNull>(V)) {
1596 LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1597 continue;
1598 }
1599
1600 // Look through IntToPtr constants.
1601 if (auto *CE = dyn_cast<ConstantExpr>(V))
1602 if (CE->getOpcode() == Instruction::IntToPtr) {
1603 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1604 continue;
1605 }
1606
1607 // If the Value is a frame index, we can create a FrameIndex debug value
1608 // without relying on the DAG at all.
1609 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1610 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1611 if (SI != FuncInfo.StaticAllocaMap.end()) {
1612 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1613 continue;
1614 }
1615 }
1616
1617 // Do not use getValue() in here; we don't want to generate code at
1618 // this point if it hasn't been done yet.
1619 SDValue N = NodeMap[V];
1620 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1621 N = UnusedArgNodeMap[V];
1622
1623 if (N.getNode()) {
1624 // Only emit func arg dbg value for non-variadic dbg.values for now.
1625 if (!IsVariadic &&
1626 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1627 FuncArgumentDbgValueKind::Value, N))
1628 return true;
1629 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1630 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1631 // describe stack slot locations.
1632 //
1633 // Consider "int x = 0; int *px = &x;". There are two kinds of
1634 // interesting debug values here after optimization:
1635 //
1636 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
1637 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1638 //
1639 // Both describe the direct values of their associated variables.
1640 Dependencies.push_back(N.getNode());
1641 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1642 continue;
1643 }
1644 LocationOps.emplace_back(
1645 SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1646 continue;
1647 }
1648
1650 // Special rules apply for the first dbg.values of parameter variables in a
1651 // function. Identify them by the fact they reference Argument Values, that
1652 // they're parameters, and they are parameters of the current function. We
1653 // need to let them dangle until they get an SDNode.
1654 bool IsParamOfFunc =
1655 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1656 if (IsParamOfFunc)
1657 return false;
1658
1659 // The value is not used in this block yet (or it would have an SDNode).
1660 // We still want the value to appear for the user if possible -- if it has
1661 // an associated VReg, we can refer to that instead.
1662 auto VMI = FuncInfo.ValueMap.find(V);
1663 if (VMI != FuncInfo.ValueMap.end()) {
1664 unsigned Reg = VMI->second;
1665 // If this is a PHI node, it may be split up into several MI PHI nodes
1666 // (in FunctionLoweringInfo::set).
1667 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1668 V->getType(), std::nullopt);
1669 if (RFV.occupiesMultipleRegs()) {
1670 // FIXME: We could potentially support variadic dbg_values here.
1671 if (IsVariadic)
1672 return false;
1673 unsigned Offset = 0;
1674 unsigned BitsToDescribe = 0;
1675 if (auto VarSize = Var->getSizeInBits())
1676 BitsToDescribe = *VarSize;
1677 if (auto Fragment = Expr->getFragmentInfo())
1678 BitsToDescribe = Fragment->SizeInBits;
1679 for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1680 // Bail out if all bits are described already.
1681 if (Offset >= BitsToDescribe)
1682 break;
1683 // TODO: handle scalable vectors.
1684 unsigned RegisterSize = RegAndSize.second;
1685 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1686 ? BitsToDescribe - Offset
1687 : RegisterSize;
1688 auto FragmentExpr = DIExpression::createFragmentExpression(
1689 Expr, Offset, FragmentSize);
1690 if (!FragmentExpr)
1691 continue;
1693 Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order);
1694 DAG.AddDbgValue(SDV, false);
1695 Offset += RegisterSize;
1696 }
1697 return true;
1698 }
1699 // We can use simple vreg locations for variadic dbg_values as well.
1700 LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1701 continue;
1702 }
1703 // We failed to create a SDDbgOperand for V.
1704 return false;
1705 }
1706
1707 // We have created a SDDbgOperand for each Value in Values.
1708 assert(!LocationOps.empty());
1709 SDDbgValue *SDV =
1710 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1711 /*IsIndirect=*/false, DbgLoc, Order, IsVariadic);
1712 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1713 return true;
1714}
1715
1717 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1718 for (auto &Pair : DanglingDebugInfoMap)
1719 for (auto &DDI : Pair.second)
1720 salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1722}
1723
1724/// getCopyFromRegs - If there was virtual register allocated for the value V
1725/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1728 SDValue Result;
1729
1730 if (It != FuncInfo.ValueMap.end()) {
1731 Register InReg = It->second;
1732
1734 DAG.getDataLayout(), InReg, Ty,
1735 std::nullopt); // This is not an ABI copy.
1736 SDValue Chain = DAG.getEntryNode();
1737 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1738 V);
1739 resolveDanglingDebugInfo(V, Result);
1740 }
1741
1742 return Result;
1743}
1744
1745/// getValue - Return an SDValue for the given Value.
1747 // If we already have an SDValue for this value, use it. It's important
1748 // to do this first, so that we don't create a CopyFromReg if we already
1749 // have a regular SDValue.
1750 SDValue &N = NodeMap[V];
1751 if (N.getNode()) return N;
1752
1753 // If there's a virtual register allocated and initialized for this
1754 // value, use it.
1755 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1756 return copyFromReg;
1757
1758 // Otherwise create a new SDValue and remember it.
1759 SDValue Val = getValueImpl(V);
1760 NodeMap[V] = Val;
1762 return Val;
1763}
1764
1765/// getNonRegisterValue - Return an SDValue for the given Value, but
1766/// don't look in FuncInfo.ValueMap for a virtual register.
1768 // If we already have an SDValue for this value, use it.
1769 SDValue &N = NodeMap[V];
1770 if (N.getNode()) {
1771 if (isIntOrFPConstant(N)) {
1772 // Remove the debug location from the node as the node is about to be used
1773 // in a location which may differ from the original debug location. This
1774 // is relevant to Constant and ConstantFP nodes because they can appear
1775 // as constant expressions inside PHI nodes.
1776 N->setDebugLoc(DebugLoc());
1777 }
1778 return N;
1779 }
1780
1781 // Otherwise create a new SDValue and remember it.
1782 SDValue Val = getValueImpl(V);
1783 NodeMap[V] = Val;
1785 return Val;
1786}
1787
1788/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1789/// Create an SDValue for the given value.
1792
1793 if (const Constant *C = dyn_cast<Constant>(V)) {
1794 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1795
1796 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1797 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1798
1799 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1800 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1801
1802 if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) {
1804 getValue(CPA->getPointer()), getValue(CPA->getKey()),
1805 getValue(CPA->getAddrDiscriminator()),
1806 getValue(CPA->getDiscriminator()));
1807 }
1808
1809 if (isa<ConstantPointerNull>(C)) {
1810 unsigned AS = V->getType()->getPointerAddressSpace();
1811 return DAG.getConstant(0, getCurSDLoc(),
1812 TLI.getPointerTy(DAG.getDataLayout(), AS));
1813 }
1814
1815 if (match(C, m_VScale()))
1816 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1817
1818 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1819 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1820
1821 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1822 return DAG.getUNDEF(VT);
1823
1824 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1825 visit(CE->getOpcode(), *CE);
1826 SDValue N1 = NodeMap[V];
1827 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1828 return N1;
1829 }
1830
1831 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1832 SmallVector<SDValue, 4> Constants;
1833 for (const Use &U : C->operands()) {
1834 SDNode *Val = getValue(U).getNode();
1835 // If the operand is an empty aggregate, there are no values.
1836 if (!Val) continue;
1837 // Add each leaf value from the operand to the Constants list
1838 // to form a flattened list of all the values.
1839 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1840 Constants.push_back(SDValue(Val, i));
1841 }
1842
1843 return DAG.getMergeValues(Constants, getCurSDLoc());
1844 }
1845
1846 if (const ConstantDataSequential *CDS =
1847 dyn_cast<ConstantDataSequential>(C)) {
1849 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1850 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1851 // Add each leaf value from the operand to the Constants list
1852 // to form a flattened list of all the values.
1853 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1854 Ops.push_back(SDValue(Val, i));
1855 }
1856
1857 if (isa<ArrayType>(CDS->getType()))
1858 return DAG.getMergeValues(Ops, getCurSDLoc());
1859 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1860 }
1861
1862 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1863 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1864 "Unknown struct or array constant!");
1865
1866 SmallVector<EVT, 4> ValueVTs;
1867 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1868 unsigned NumElts = ValueVTs.size();
1869 if (NumElts == 0)
1870 return SDValue(); // empty struct
1871 SmallVector<SDValue, 4> Constants(NumElts);
1872 for (unsigned i = 0; i != NumElts; ++i) {
1873 EVT EltVT = ValueVTs[i];
1874 if (isa<UndefValue>(C))
1875 Constants[i] = DAG.getUNDEF(EltVT);
1876 else if (EltVT.isFloatingPoint())
1877 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1878 else
1879 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1880 }
1881
1882 return DAG.getMergeValues(Constants, getCurSDLoc());
1883 }
1884
1885 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1886 return DAG.getBlockAddress(BA, VT);
1887
1888 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1889 return getValue(Equiv->getGlobalValue());
1890
1891 if (const auto *NC = dyn_cast<NoCFIValue>(C))
1892 return getValue(NC->getGlobalValue());
1893
1894 if (VT == MVT::aarch64svcount) {
1895 assert(C->isNullValue() && "Can only zero this target type!");
1896 return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1897 DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1898 }
1899
1900 if (VT.isRISCVVectorTuple()) {
1901 assert(C->isNullValue() && "Can only zero this target type!");
1902 return NodeMap[V] = DAG.getNode(
1904 DAG.getNode(
1906 EVT::getVectorVT(*DAG.getContext(), MVT::i8,
1908 true),
1910 }
1911
1912 VectorType *VecTy = cast<VectorType>(V->getType());
1913
1914 // Now that we know the number and type of the elements, get that number of
1915 // elements into the Ops array based on what kind of constant it is.
1916 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1918 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1919 for (unsigned i = 0; i != NumElements; ++i)
1920 Ops.push_back(getValue(CV->getOperand(i)));
1921
1922 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1923 }
1924
1925 if (isa<ConstantAggregateZero>(C)) {
1926 EVT EltVT =
1928
1929 SDValue Op;
1930 if (EltVT.isFloatingPoint())
1931 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1932 else
1933 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1934
1935 return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1936 }
1937
1938 llvm_unreachable("Unknown vector constant");
1939 }
1940
1941 // If this is a static alloca, generate it as the frameindex instead of
1942 // computation.
1943 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1945 FuncInfo.StaticAllocaMap.find(AI);
1946 if (SI != FuncInfo.StaticAllocaMap.end())
1947 return DAG.getFrameIndex(
1948 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1949 }
1950
1951 // If this is an instruction which fast-isel has deferred, select it now.
1952 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1954
1955 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1956 Inst->getType(), std::nullopt);
1957 SDValue Chain = DAG.getEntryNode();
1958 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1959 }
1960
1961 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1962 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1963
1964 if (const auto *BB = dyn_cast<BasicBlock>(V))
1965 return DAG.getBasicBlock(FuncInfo.getMBB(BB));
1966
1967 llvm_unreachable("Can't get register for value!");
1968}
1969
1970void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1972 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1973 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1974 bool IsSEH = isAsynchronousEHPersonality(Pers);
1975 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1976 if (!IsSEH)
1977 CatchPadMBB->setIsEHScopeEntry();
1978 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1979 if (IsMSVCCXX || IsCoreCLR)
1980 CatchPadMBB->setIsEHFuncletEntry();
1981}
1982
1983void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1984 // Update machine-CFG edge.
1985 MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor());
1986 FuncInfo.MBB->addSuccessor(TargetMBB);
1987 TargetMBB->setIsEHCatchretTarget(true);
1989
1991 bool IsSEH = isAsynchronousEHPersonality(Pers);
1992 if (IsSEH) {
1993 // If this is not a fall-through branch or optimizations are switched off,
1994 // emit the branch.
1995 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1997 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1998 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1999 return;
2000 }
2001
2002 // Figure out the funclet membership for the catchret's successor.
2003 // This will be used by the FuncletLayout pass to determine how to order the
2004 // BB's.
2005 // A 'catchret' returns to the outer scope's color.
2006 Value *ParentPad = I.getCatchSwitchParentPad();
2007 const BasicBlock *SuccessorColor;
2008 if (isa<ConstantTokenNone>(ParentPad))
2009 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
2010 else
2011 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2012 assert(SuccessorColor && "No parent funclet for catchret!");
2013 MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor);
2014 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
2015
2016 // Create the terminator node.
2018 getControlRoot(), DAG.getBasicBlock(TargetMBB),
2019 DAG.getBasicBlock(SuccessorColorMBB));
2020 DAG.setRoot(Ret);
2021}
2022
2023void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
2024 // Don't emit any special code for the cleanuppad instruction. It just marks
2025 // the start of an EH scope/funclet.
2028 if (Pers != EHPersonality::Wasm_CXX) {
2031 }
2032}
2033
2034// In wasm EH, even though a catchpad may not catch an exception if a tag does
2035// not match, it is OK to add only the first unwind destination catchpad to the
2036// successors, because there will be at least one invoke instruction within the
2037// catch scope that points to the next unwind destination, if one exists, so
2038// CFGSort cannot mess up with BB sorting order.
2039// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
2040// call within them, and catchpads only consisting of 'catch (...)' have a
2041// '__cxa_end_catch' call within them, both of which generate invokes in case
2042// the next unwind destination exists, i.e., the next unwind destination is not
2043// the caller.)
2044//
2045// Having at most one EH pad successor is also simpler and helps later
2046// transformations.
2047//
2048// For example,
2049// current:
2050// invoke void @foo to ... unwind label %catch.dispatch
2051// catch.dispatch:
2052// %0 = catchswitch within ... [label %catch.start] unwind label %next
2053// catch.start:
2054// ...
2055// ... in this BB or some other child BB dominated by this BB there will be an
2056// invoke that points to 'next' BB as an unwind destination
2057//
2058// next: ; We don't need to add this to 'current' BB's successor
2059// ...
2061 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2062 BranchProbability Prob,
2063 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2064 &UnwindDests) {
2065 while (EHPadBB) {
2066 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2067 if (isa<CleanupPadInst>(Pad)) {
2068 // Stop on cleanup pads.
2069 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2070 UnwindDests.back().first->setIsEHScopeEntry();
2071 break;
2072 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2073 // Add the catchpad handlers to the possible destinations. We don't
2074 // continue to the unwind destination of the catchswitch for wasm.
2075 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2076 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2077 UnwindDests.back().first->setIsEHScopeEntry();
2078 }
2079 break;
2080 } else {
2081 continue;
2082 }
2083 }
2084}
2085
2086/// When an invoke or a cleanupret unwinds to the next EH pad, there are
2087/// many places it could ultimately go. In the IR, we have a single unwind
2088/// destination, but in the machine CFG, we enumerate all the possible blocks.
2089/// This function skips over imaginary basic blocks that hold catchswitch
2090/// instructions, and finds all the "real" machine
2091/// basic block destinations. As those destinations may not be successors of
2092/// EHPadBB, here we also calculate the edge probability to those destinations.
2093/// The passed-in Prob is the edge probability to EHPadBB.
2095 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2096 BranchProbability Prob,
2097 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2098 &UnwindDests) {
2099 EHPersonality Personality =
2101 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2102 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2103 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2104 bool IsSEH = isAsynchronousEHPersonality(Personality);
2105
2106 if (IsWasmCXX) {
2107 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2108 assert(UnwindDests.size() <= 1 &&
2109 "There should be at most one unwind destination for wasm");
2110 return;
2111 }
2112
2113 while (EHPadBB) {
2114 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2115 BasicBlock *NewEHPadBB = nullptr;
2116 if (isa<LandingPadInst>(Pad)) {
2117 // Stop on landingpads. They are not funclets.
2118 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2119 break;
2120 } else if (isa<CleanupPadInst>(Pad)) {
2121 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2122 // personalities.
2123 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2124 UnwindDests.back().first->setIsEHScopeEntry();
2125 UnwindDests.back().first->setIsEHFuncletEntry();
2126 break;
2127 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2128 // Add the catchpad handlers to the possible destinations.
2129 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2130 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2131 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2132 if (IsMSVCCXX || IsCoreCLR)
2133 UnwindDests.back().first->setIsEHFuncletEntry();
2134 if (!IsSEH)
2135 UnwindDests.back().first->setIsEHScopeEntry();
2136 }
2137 NewEHPadBB = CatchSwitch->getUnwindDest();
2138 } else {
2139 continue;
2140 }
2141
2142 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2143 if (BPI && NewEHPadBB)
2144 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2145 EHPadBB = NewEHPadBB;
2146 }
2147}
2148
2149void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2150 // Update successor info.
2152 auto UnwindDest = I.getUnwindDest();
2154 BranchProbability UnwindDestProb =
2155 (BPI && UnwindDest)
2156 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2158 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2159 for (auto &UnwindDest : UnwindDests) {
2160 UnwindDest.first->setIsEHPad();
2161 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2162 }
2164
2165 // Create the terminator node.
2166 MachineBasicBlock *CleanupPadMBB =
2167 FuncInfo.getMBB(I.getCleanupPad()->getParent());
2169 getControlRoot(), DAG.getBasicBlock(CleanupPadMBB));
2170 DAG.setRoot(Ret);
2171}
2172
2173void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2174 report_fatal_error("visitCatchSwitch not yet implemented!");
2175}
2176
2177void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2179 auto &DL = DAG.getDataLayout();
2180 SDValue Chain = getControlRoot();
2183
2184 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2185 // lower
2186 //
2187 // %val = call <ty> @llvm.experimental.deoptimize()
2188 // ret <ty> %val
2189 //
2190 // differently.
2191 if (I.getParent()->getTerminatingDeoptimizeCall()) {
2193 return;
2194 }
2195
2196 if (!FuncInfo.CanLowerReturn) {
2197 Register DemoteReg = FuncInfo.DemoteRegister;
2198
2199 // Emit a store of the return value through the virtual register.
2200 // Leave Outs empty so that LowerReturn won't try to load return
2201 // registers the usual way.
2202 MVT PtrValueVT = TLI.getPointerTy(DL, DL.getAllocaAddrSpace());
2203 SDValue RetPtr =
2204 DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVT);
2205 SDValue RetOp = getValue(I.getOperand(0));
2206
2207 SmallVector<EVT, 4> ValueVTs, MemVTs;
2209 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2210 &Offsets, 0);
2211 unsigned NumValues = ValueVTs.size();
2212
2213 SmallVector<SDValue, 4> Chains(NumValues);
2214 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2215 for (unsigned i = 0; i != NumValues; ++i) {
2216 // An aggregate return value cannot wrap around the address space, so
2217 // offsets to its parts don't wrap either.
2219 TypeSize::getFixed(Offsets[i]));
2220
2221 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2222 if (MemVTs[i] != ValueVTs[i])
2223 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2224 Chains[i] = DAG.getStore(
2225 Chain, getCurSDLoc(), Val,
2226 // FIXME: better loc info would be nice.
2228 commonAlignment(BaseAlign, Offsets[i]));
2229 }
2230
2232 MVT::Other, Chains);
2233 } else if (I.getNumOperands() != 0) {
2234 SmallVector<EVT, 4> ValueVTs;
2235 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2236 unsigned NumValues = ValueVTs.size();
2237 if (NumValues) {
2238 SDValue RetOp = getValue(I.getOperand(0));
2239
2240 const Function *F = I.getParent()->getParent();
2241
2242 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2243 I.getOperand(0)->getType(), F->getCallingConv(),
2244 /*IsVarArg*/ false, DL);
2245
2246 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2247 if (F->getAttributes().hasRetAttr(Attribute::SExt))
2248 ExtendKind = ISD::SIGN_EXTEND;
2249 else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2250 ExtendKind = ISD::ZERO_EXTEND;
2251
2252 LLVMContext &Context = F->getContext();
2253 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2254
2255 for (unsigned j = 0; j != NumValues; ++j) {
2256 EVT VT = ValueVTs[j];
2257
2258 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2259 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2260
2261 CallingConv::ID CC = F->getCallingConv();
2262
2263 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2264 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2265 SmallVector<SDValue, 4> Parts(NumParts);
2267 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2268 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2269
2270 // 'inreg' on function refers to return value
2272 if (RetInReg)
2273 Flags.setInReg();
2274
2275 if (I.getOperand(0)->getType()->isPointerTy()) {
2276 Flags.setPointer();
2277 Flags.setPointerAddrSpace(
2278 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2279 }
2280
2281 if (NeedsRegBlock) {
2282 Flags.setInConsecutiveRegs();
2283 if (j == NumValues - 1)
2284 Flags.setInConsecutiveRegsLast();
2285 }
2286
2287 // Propagate extension type if any
2288 if (ExtendKind == ISD::SIGN_EXTEND)
2289 Flags.setSExt();
2290 else if (ExtendKind == ISD::ZERO_EXTEND)
2291 Flags.setZExt();
2292 else if (F->getAttributes().hasRetAttr(Attribute::NoExt))
2293 Flags.setNoExt();
2294
2295 for (unsigned i = 0; i < NumParts; ++i) {
2296 Outs.push_back(ISD::OutputArg(Flags,
2297 Parts[i].getValueType().getSimpleVT(),
2298 VT, /*isfixed=*/true, 0, 0));
2299 OutVals.push_back(Parts[i]);
2300 }
2301 }
2302 }
2303 }
2304
2305 // Push in swifterror virtual register as the last element of Outs. This makes
2306 // sure swifterror virtual register will be returned in the swifterror
2307 // physical register.
2308 const Function *F = I.getParent()->getParent();
2309 if (TLI.supportSwiftError() &&
2310 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2311 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2313 Flags.setSwiftError();
2315 Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2316 /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2317 // Create SDNode for the swifterror virtual register.
2318 OutVals.push_back(
2321 EVT(TLI.getPointerTy(DL))));
2322 }
2323
2324 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2325 CallingConv::ID CallConv =
2328 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2329
2330 // Verify that the target's LowerReturn behaved as expected.
2331 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2332 "LowerReturn didn't return a valid chain!");
2333
2334 // Update the DAG with the new chain value resulting from return lowering.
2335 DAG.setRoot(Chain);
2336}
2337
2338/// CopyToExportRegsIfNeeded - If the given value has virtual registers
2339/// created for it, emit nodes to copy the value into the virtual
2340/// registers.
2342 // Skip empty types
2343 if (V->getType()->isEmptyTy())
2344 return;
2345
2347 if (VMI != FuncInfo.ValueMap.end()) {
2348 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2349 "Unused value assigned virtual registers!");
2350 CopyValueToVirtualRegister(V, VMI->second);
2351 }
2352}
2353
2354/// ExportFromCurrentBlock - If this condition isn't known to be exported from
2355/// the current basic block, add it to ValueMap now so that we'll get a
2356/// CopyTo/FromReg.
2358 // No need to export constants.
2359 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2360
2361 // Already exported?
2362 if (FuncInfo.isExportedInst(V)) return;
2363
2366}
2367
2369 const BasicBlock *FromBB) {
2370 // The operands of the setcc have to be in this block. We don't know
2371 // how to export them from some other block.
2372 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2373 // Can export from current BB.
2374 if (VI->getParent() == FromBB)
2375 return true;
2376
2377 // Is already exported, noop.
2378 return FuncInfo.isExportedInst(V);
2379 }
2380
2381 // If this is an argument, we can export it if the BB is the entry block or
2382 // if it is already exported.
2383 if (isa<Argument>(V)) {
2384 if (FromBB->isEntryBlock())
2385 return true;
2386
2387 // Otherwise, can only export this if it is already exported.
2388 return FuncInfo.isExportedInst(V);
2389 }
2390
2391 // Otherwise, constants can always be exported.
2392 return true;
2393}
2394
2395/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2397SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2398 const MachineBasicBlock *Dst) const {
2400 const BasicBlock *SrcBB = Src->getBasicBlock();
2401 const BasicBlock *DstBB = Dst->getBasicBlock();
2402 if (!BPI) {
2403 // If BPI is not available, set the default probability as 1 / N, where N is
2404 // the number of successors.
2405 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2406 return BranchProbability(1, SuccSize);
2407 }
2408 return BPI->getEdgeProbability(SrcBB, DstBB);
2409}
2410
2411void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2412 MachineBasicBlock *Dst,
2413 BranchProbability Prob) {
2414 if (!FuncInfo.BPI)
2415 Src->addSuccessorWithoutProb(Dst);
2416 else {
2417 if (Prob.isUnknown())
2418 Prob = getEdgeProbability(Src, Dst);
2419 Src->addSuccessor(Dst, Prob);
2420 }
2421}
2422
2423static bool InBlock(const Value *V, const BasicBlock *BB) {
2424 if (const Instruction *I = dyn_cast<Instruction>(V))
2425 return I->getParent() == BB;
2426 return true;
2427}
2428
2429/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2430/// This function emits a branch and is used at the leaves of an OR or an
2431/// AND operator tree.
2432void
2435 MachineBasicBlock *FBB,
2436 MachineBasicBlock *CurBB,
2437 MachineBasicBlock *SwitchBB,
2438 BranchProbability TProb,
2439 BranchProbability FProb,
2440 bool InvertCond) {
2441 const BasicBlock *BB = CurBB->getBasicBlock();
2442
2443 // If the leaf of the tree is a comparison, merge the condition into
2444 // the caseblock.
2445 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2446 // The operands of the cmp have to be in this block. We don't know
2447 // how to export them from some other block. If this is the first block
2448 // of the sequence, no exporting is needed.
2449 if (CurBB == SwitchBB ||
2450 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2451 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2452 ISD::CondCode Condition;
2453 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2454 ICmpInst::Predicate Pred =
2455 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2456 Condition = getICmpCondCode(Pred);
2457 } else {
2458 const FCmpInst *FC = cast<FCmpInst>(Cond);
2459 FCmpInst::Predicate Pred =
2460 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2461 Condition = getFCmpCondCode(Pred);
2462 if (TM.Options.NoNaNsFPMath)
2463 Condition = getFCmpCodeWithoutNaN(Condition);
2464 }
2465
2466 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2467 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2468 SL->SwitchCases.push_back(CB);
2469 return;
2470 }
2471 }
2472
2473 // Create a CaseBlock record representing this branch.
2474 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2476 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2477 SL->SwitchCases.push_back(CB);
2478}
2479
2480// Collect dependencies on V recursively. This is used for the cost analysis in
2481// `shouldKeepJumpConditionsTogether`.
2485 unsigned Depth = 0) {
2486 // Return false if we have an incomplete count.
2488 return false;
2489
2490 auto *I = dyn_cast<Instruction>(V);
2491 if (I == nullptr)
2492 return true;
2493
2494 if (Necessary != nullptr) {
2495 // This instruction is necessary for the other side of the condition so
2496 // don't count it.
2497 if (Necessary->contains(I))
2498 return true;
2499 }
2500
2501 // Already added this dep.
2502 if (!Deps->try_emplace(I, false).second)
2503 return true;
2504
2505 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2506 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2507 Depth + 1))
2508 return false;
2509 return true;
2510}
2511
2513 const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
2514 Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
2516 if (I.getNumSuccessors() != 2)
2517 return false;
2518
2519 if (!I.isConditional())
2520 return false;
2521
2522 if (Params.BaseCost < 0)
2523 return false;
2524
2525 // Baseline cost.
2526 InstructionCost CostThresh = Params.BaseCost;
2527
2528 BranchProbabilityInfo *BPI = nullptr;
2529 if (Params.LikelyBias || Params.UnlikelyBias)
2530 BPI = FuncInfo.BPI;
2531 if (BPI != nullptr) {
2532 // See if we are either likely to get an early out or compute both lhs/rhs
2533 // of the condition.
2534 BasicBlock *IfFalse = I.getSuccessor(0);
2535 BasicBlock *IfTrue = I.getSuccessor(1);
2536
2537 std::optional<bool> Likely;
2538 if (BPI->isEdgeHot(I.getParent(), IfTrue))
2539 Likely = true;
2540 else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2541 Likely = false;
2542
2543 if (Likely) {
2544 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2545 // Its likely we will have to compute both lhs and rhs of condition
2546 CostThresh += Params.LikelyBias;
2547 else {
2548 if (Params.UnlikelyBias < 0)
2549 return false;
2550 // Its likely we will get an early out.
2551 CostThresh -= Params.UnlikelyBias;
2552 }
2553 }
2554 }
2555
2556 if (CostThresh <= 0)
2557 return false;
2558
2559 // Collect "all" instructions that lhs condition is dependent on.
2560 // Use map for stable iteration (to avoid non-determanism of iteration of
2561 // SmallPtrSet). The `bool` value is just a dummy.
2563 collectInstructionDeps(&LhsDeps, Lhs);
2564 // Collect "all" instructions that rhs condition is dependent on AND are
2565 // dependencies of lhs. This gives us an estimate on which instructions we
2566 // stand to save by splitting the condition.
2567 if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
2568 return false;
2569 // Add the compare instruction itself unless its a dependency on the LHS.
2570 if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
2571 if (!LhsDeps.contains(RhsI))
2572 RhsDeps.try_emplace(RhsI, false);
2573
2574 const auto &TLI = DAG.getTargetLoweringInfo();
2575 const auto &TTI =
2576 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
2577
2578 InstructionCost CostOfIncluding = 0;
2579 // See if this instruction will need to computed independently of whether RHS
2580 // is.
2581 Value *BrCond = I.getCondition();
2582 auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) {
2583 for (const auto *U : Ins->users()) {
2584 // If user is independent of RHS calculation we don't need to count it.
2585 if (auto *UIns = dyn_cast<Instruction>(U))
2586 if (UIns != BrCond && !RhsDeps.contains(UIns))
2587 return false;
2588 }
2589 return true;
2590 };
2591
2592 // Prune instructions from RHS Deps that are dependencies of unrelated
2593 // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
2594 // arbitrary and just meant to cap the how much time we spend in the pruning
2595 // loop. Its highly unlikely to come into affect.
2596 const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
2597 // Stop after a certain point. No incorrectness from including too many
2598 // instructions.
2599 for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2600 const Instruction *ToDrop = nullptr;
2601 for (const auto &InsPair : RhsDeps) {
2602 if (!ShouldCountInsn(InsPair.first)) {
2603 ToDrop = InsPair.first;
2604 break;
2605 }
2606 }
2607 if (ToDrop == nullptr)
2608 break;
2609 RhsDeps.erase(ToDrop);
2610 }
2611
2612 for (const auto &InsPair : RhsDeps) {
2613 // Finally accumulate latency that we can only attribute to computing the
2614 // RHS condition. Use latency because we are essentially trying to calculate
2615 // the cost of the dependency chain.
2616 // Possible TODO: We could try to estimate ILP and make this more precise.
2617 CostOfIncluding +=
2619
2620 if (CostOfIncluding > CostThresh)
2621 return false;
2622 }
2623 return true;
2624}
2625
2628 MachineBasicBlock *FBB,
2629 MachineBasicBlock *CurBB,
2630 MachineBasicBlock *SwitchBB,
2632 BranchProbability TProb,
2633 BranchProbability FProb,
2634 bool InvertCond) {
2635 // Skip over not part of the tree and remember to invert op and operands at
2636 // next level.
2637 Value *NotCond;
2638 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2639 InBlock(NotCond, CurBB->getBasicBlock())) {
2640 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2641 !InvertCond);
2642 return;
2643 }
2644
2645 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2646 const Value *BOpOp0, *BOpOp1;
2647 // Compute the effective opcode for Cond, taking into account whether it needs
2648 // to be inverted, e.g.
2649 // and (not (or A, B)), C
2650 // gets lowered as
2651 // and (and (not A, not B), C)
2653 if (BOp) {
2654 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2655 ? Instruction::And
2656 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2657 ? Instruction::Or
2659 if (InvertCond) {
2660 if (BOpc == Instruction::And)
2661 BOpc = Instruction::Or;
2662 else if (BOpc == Instruction::Or)
2663 BOpc = Instruction::And;
2664 }
2665 }
2666
2667 // If this node is not part of the or/and tree, emit it as a branch.
2668 // Note that all nodes in the tree should have same opcode.
2669 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2670 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2671 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2672 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2673 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2674 TProb, FProb, InvertCond);
2675 return;
2676 }
2677
2678 // Create TmpBB after CurBB.
2679 MachineFunction::iterator BBI(CurBB);
2682 CurBB->getParent()->insert(++BBI, TmpBB);
2683
2684 if (Opc == Instruction::Or) {
2685 // Codegen X | Y as:
2686 // BB1:
2687 // jmp_if_X TBB
2688 // jmp TmpBB
2689 // TmpBB:
2690 // jmp_if_Y TBB
2691 // jmp FBB
2692 //
2693
2694 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2695 // The requirement is that
2696 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2697 // = TrueProb for original BB.
2698 // Assuming the original probabilities are A and B, one choice is to set
2699 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2700 // A/(1+B) and 2B/(1+B). This choice assumes that
2701 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2702 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2703 // TmpBB, but the math is more complicated.
2704
2705 auto NewTrueProb = TProb / 2;
2706 auto NewFalseProb = TProb / 2 + FProb;
2707 // Emit the LHS condition.
2708 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2709 NewFalseProb, InvertCond);
2710
2711 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2712 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2713 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2714 // Emit the RHS condition into TmpBB.
2715 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2716 Probs[1], InvertCond);
2717 } else {
2718 assert(Opc == Instruction::And && "Unknown merge op!");
2719 // Codegen X & Y as:
2720 // BB1:
2721 // jmp_if_X TmpBB
2722 // jmp FBB
2723 // TmpBB:
2724 // jmp_if_Y TBB
2725 // jmp FBB
2726 //
2727 // This requires creation of TmpBB after CurBB.
2728
2729 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2730 // The requirement is that
2731 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2732 // = FalseProb for original BB.
2733 // Assuming the original probabilities are A and B, one choice is to set
2734 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2735 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2736 // TrueProb for BB1 * FalseProb for TmpBB.
2737
2738 auto NewTrueProb = TProb + FProb / 2;
2739 auto NewFalseProb = FProb / 2;
2740 // Emit the LHS condition.
2741 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2742 NewFalseProb, InvertCond);
2743
2744 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2745 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2746 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2747 // Emit the RHS condition into TmpBB.
2748 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2749 Probs[1], InvertCond);
2750 }
2751}
2752
2753/// If the set of cases should be emitted as a series of branches, return true.
2754/// If we should emit this as a bunch of and/or'd together conditions, return
2755/// false.
2756bool
2757SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2758 if (Cases.size() != 2) return true;
2759
2760 // If this is two comparisons of the same values or'd or and'd together, they
2761 // will get folded into a single comparison, so don't emit two blocks.
2762 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2763 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2764 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2765 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2766 return false;
2767 }
2768
2769 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2770 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2771 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2772 Cases[0].CC == Cases[1].CC &&
2773 isa<Constant>(Cases[0].CmpRHS) &&
2774 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2775 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2776 return false;
2777 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2778 return false;
2779 }
2780
2781 return true;
2782}
2783
2784void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2786
2787 // Update machine-CFG edges.
2788 MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2789
2790 if (I.isUnconditional()) {
2791 // Update machine-CFG edges.
2792 BrMBB->addSuccessor(Succ0MBB);
2793
2794 // If this is not a fall-through branch or optimizations are switched off,
2795 // emit the branch.
2796 if (Succ0MBB != NextBlock(BrMBB) ||
2798 auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2799 getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2800 setValue(&I, Br);
2801 DAG.setRoot(Br);
2802 }
2803
2804 return;
2805 }
2806
2807 // If this condition is one of the special cases we handle, do special stuff
2808 // now.
2809 const Value *CondVal = I.getCondition();
2810 MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1));
2811
2812 // If this is a series of conditions that are or'd or and'd together, emit
2813 // this as a sequence of branches instead of setcc's with and/or operations.
2814 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2815 // unpredictable branches, and vector extracts because those jumps are likely
2816 // expensive for any target), this should improve performance.
2817 // For example, instead of something like:
2818 // cmp A, B
2819 // C = seteq
2820 // cmp D, E
2821 // F = setle
2822 // or C, F
2823 // jnz foo
2824 // Emit:
2825 // cmp A, B
2826 // je foo
2827 // cmp D, E
2828 // jle foo
2829 bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable);
2830 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2831 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2832 BOp->hasOneUse() && !IsUnpredictable) {
2833 Value *Vec;
2834 const Value *BOp0, *BOp1;
2836 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2837 Opcode = Instruction::And;
2838 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2839 Opcode = Instruction::Or;
2840
2841 if (Opcode &&
2842 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2843 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
2845 FuncInfo, I, Opcode, BOp0, BOp1,
2847 Opcode, BOp0, BOp1))) {
2848 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2849 getEdgeProbability(BrMBB, Succ0MBB),
2850 getEdgeProbability(BrMBB, Succ1MBB),
2851 /*InvertCond=*/false);
2852 // If the compares in later blocks need to use values not currently
2853 // exported from this block, export them now. This block should always
2854 // be the first entry.
2855 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2856
2857 // Allow some cases to be rejected.
2858 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2859 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2860 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2861 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2862 }
2863
2864 // Emit the branch for this block.
2865 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2866 SL->SwitchCases.erase(SL->SwitchCases.begin());
2867 return;
2868 }
2869
2870 // Okay, we decided not to do this, remove any inserted MBB's and clear
2871 // SwitchCases.
2872 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2873 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2874
2875 SL->SwitchCases.clear();
2876 }
2877 }
2878
2879 // Create a CaseBlock record representing this branch.
2881 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(),
2883 IsUnpredictable);
2884
2885 // Use visitSwitchCase to actually insert the fast branch sequence for this
2886 // cond branch.
2887 visitSwitchCase(CB, BrMBB);
2888}
2889
2890/// visitSwitchCase - Emits the necessary code to represent a single node in
2891/// the binary search tree resulting from lowering a switch instruction.
2893 MachineBasicBlock *SwitchBB) {
2894 SDValue Cond;
2895 SDValue CondLHS = getValue(CB.CmpLHS);
2896 SDLoc dl = CB.DL;
2897
2898 if (CB.CC == ISD::SETTRUE) {
2899 // Branch or fall through to TrueBB.
2900 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2901 SwitchBB->normalizeSuccProbs();
2902 if (CB.TrueBB != NextBlock(SwitchBB)) {
2903 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2904 DAG.getBasicBlock(CB.TrueBB)));
2905 }
2906 return;
2907 }
2908
2909 auto &TLI = DAG.getTargetLoweringInfo();
2910 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2911
2912 // Build the setcc now.
2913 if (!CB.CmpMHS) {
2914 // Fold "(X == true)" to X and "(X == false)" to !X to
2915 // handle common cases produced by branch lowering.
2916 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2917 CB.CC == ISD::SETEQ)
2918 Cond = CondLHS;
2919 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2920 CB.CC == ISD::SETEQ) {
2921 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2922 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2923 } else {
2924 SDValue CondRHS = getValue(CB.CmpRHS);
2925
2926 // If a pointer's DAG type is larger than its memory type then the DAG
2927 // values are zero-extended. This breaks signed comparisons so truncate
2928 // back to the underlying type before doing the compare.
2929 if (CondLHS.getValueType() != MemVT) {
2930 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2931 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2932 }
2933 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2934 }
2935 } else {
2936 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2937
2938 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2939 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2940
2941 SDValue CmpOp = getValue(CB.CmpMHS);
2942 EVT VT = CmpOp.getValueType();
2943
2944 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2945 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2946 ISD::SETLE);
2947 } else {
2948 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2949 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2950 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2951 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2952 }
2953 }
2954
2955 // Update successor info
2956 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2957 // TrueBB and FalseBB are always different unless the incoming IR is
2958 // degenerate. This only happens when running llc on weird IR.
2959 if (CB.TrueBB != CB.FalseBB)
2960 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2961 SwitchBB->normalizeSuccProbs();
2962
2963 // If the lhs block is the next block, invert the condition so that we can
2964 // fall through to the lhs instead of the rhs block.
2965 if (CB.TrueBB == NextBlock(SwitchBB)) {
2966 std::swap(CB.TrueBB, CB.FalseBB);
2967 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2968 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2969 }
2970
2971 SDNodeFlags Flags;
2972 Flags.setUnpredictable(CB.IsUnpredictable);
2973 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
2974 Cond, DAG.getBasicBlock(CB.TrueBB), Flags);
2975
2976 setValue(CurInst, BrCond);
2977
2978 // Insert the false branch. Do this even if it's a fall through branch,
2979 // this makes it easier to do DAG optimizations which require inverting
2980 // the branch condition.
2981 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2983
2984 DAG.setRoot(BrCond);
2985}
2986
2987/// visitJumpTable - Emit JumpTable node in the current MBB
2989 // Emit the code for the jump table
2990 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2991 assert(JT.Reg && "Should lower JT Header first!");
2993 SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2994 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2995 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2996 Index.getValue(1), Table, Index);
2997 DAG.setRoot(BrJumpTable);
2998}
2999
3000/// visitJumpTableHeader - This function emits necessary code to produce index
3001/// in the JumpTable from switch case.
3003 JumpTableHeader &JTH,
3004 MachineBasicBlock *SwitchBB) {
3005 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
3006 const SDLoc &dl = *JT.SL;
3007
3008 // Subtract the lowest switch case value from the value being switched on.
3009 SDValue SwitchOp = getValue(JTH.SValue);
3010 EVT VT = SwitchOp.getValueType();
3011 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
3012 DAG.getConstant(JTH.First, dl, VT));
3013
3014 // The SDNode we just created, which holds the value being switched on minus
3015 // the smallest case value, needs to be copied to a virtual register so it
3016 // can be used as an index into the jump table in a subsequent basic block.
3017 // This value may be smaller or larger than the target's pointer type, and
3018 // therefore require extension or truncating.
3020 SwitchOp =
3022
3023 Register JumpTableReg =
3025 SDValue CopyTo =
3026 DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp);
3027 JT.Reg = JumpTableReg;
3028
3029 if (!JTH.FallthroughUnreachable) {
3030 // Emit the range check for the jump table, and branch to the default block
3031 // for the switch statement if the value being switched on exceeds the
3032 // largest case in the switch.
3033 SDValue CMP = DAG.getSetCC(
3035 Sub.getValueType()),
3036 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
3037
3038 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3039 MVT::Other, CopyTo, CMP,
3040 DAG.getBasicBlock(JT.Default));
3041
3042 // Avoid emitting unnecessary branches to the next block.
3043 if (JT.MBB != NextBlock(SwitchBB))
3044 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3045 DAG.getBasicBlock(JT.MBB));
3046
3047 DAG.setRoot(BrCond);
3048 } else {
3049 // Avoid emitting unnecessary branches to the next block.
3050 if (JT.MBB != NextBlock(SwitchBB))
3051 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3052 DAG.getBasicBlock(JT.MBB)));
3053 else
3054 DAG.setRoot(CopyTo);
3055 }
3056}
3057
3058/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
3059/// variable if there exists one.
3061 SDValue &Chain) {
3062 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3063 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3064 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3068 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
3069 if (Global) {
3070 MachinePointerInfo MPInfo(Global);
3074 MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8),
3075 DAG.getEVTAlign(PtrTy));
3076 DAG.setNodeMemRefs(Node, {MemRef});
3077 }
3078 if (PtrTy != PtrMemTy)
3079 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
3080 return SDValue(Node, 0);
3081}
3082
3083/// Codegen a new tail for a stack protector check ParentMBB which has had its
3084/// tail spliced into a stack protector check success bb.
3085///
3086/// For a high level explanation of how this fits into the stack protector
3087/// generation see the comment on the declaration of class
3088/// StackProtectorDescriptor.
3090 MachineBasicBlock *ParentBB) {
3091
3092 // First create the loads to the guard/stack slot for the comparison.
3094 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3095 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3096
3097 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3098 int FI = MFI.getStackProtectorIndex();
3099
3100 SDValue Guard;
3101 SDLoc dl = getCurSDLoc();
3102 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3103 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3104 Align Align =
3105 DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
3106
3107 // Generate code to load the content of the guard slot.
3108 SDValue GuardVal = DAG.getLoad(
3109 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3112
3113 if (TLI.useStackGuardXorFP())
3114 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3115
3116 // Retrieve guard check function, nullptr if instrumentation is inlined.
3117 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3118 // The target provides a guard check function to validate the guard value.
3119 // Generate a call to that function with the content of the guard slot as
3120 // argument.
3121 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3122 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3123
3126 Entry.Node = GuardVal;
3127 Entry.Ty = FnTy->getParamType(0);
3128 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3129 Entry.IsInReg = true;
3130 Args.push_back(Entry);
3131
3135 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3136 getValue(GuardCheckFn), std::move(Args));
3137
3138 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
3139 DAG.setRoot(Result.second);
3140 return;
3141 }
3142
3143 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3144 // Otherwise, emit a volatile load to retrieve the stack guard value.
3145 SDValue Chain = DAG.getEntryNode();
3146 if (TLI.useLoadStackGuardNode(M)) {
3147 Guard = getLoadStackGuard(DAG, dl, Chain);
3148 } else {
3149 const Value *IRGuard = TLI.getSDagStackGuard(M);
3150 SDValue GuardPtr = getValue(IRGuard);
3151
3152 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3153 MachinePointerInfo(IRGuard, 0), Align,
3155 }
3156
3157 // Perform the comparison via a getsetcc.
3159 *DAG.getContext(),
3160 Guard.getValueType()),
3161 Guard, GuardVal, ISD::SETNE);
3162
3163 // If the guard/stackslot do not equal, branch to failure MBB.
3164 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3165 MVT::Other, GuardVal.getOperand(0),
3166 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
3167 // Otherwise branch to success MBB.
3168 SDValue Br = DAG.getNode(ISD::BR, dl,
3169 MVT::Other, BrCond,
3171
3172 DAG.setRoot(Br);
3173}
3174
3175/// Codegen the failure basic block for a stack protector check.
3176///
3177/// A failure stack protector machine basic block consists simply of a call to
3178/// __stack_chk_fail().
3179///
3180/// For a high level explanation of how this fits into the stack protector
3181/// generation see the comment on the declaration of class
3182/// StackProtectorDescriptor.
3183void
3187 CallOptions.setDiscardResult(true);
3188 SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
3189 MVT::isVoid, {}, CallOptions, getCurSDLoc())
3190 .second;
3191
3192 // Emit a trap instruction if we are required to do so.
3193 const TargetOptions &TargetOpts = DAG.getTarget().Options;
3194 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3195 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3196
3197 DAG.setRoot(Chain);
3198}
3199
3200/// visitBitTestHeader - This function emits necessary code to produce value
3201/// suitable for "bit tests"
3203 MachineBasicBlock *SwitchBB) {
3204 SDLoc dl = getCurSDLoc();
3205
3206 // Subtract the minimum value.
3207 SDValue SwitchOp = getValue(B.SValue);
3208 EVT VT = SwitchOp.getValueType();
3209 SDValue RangeSub =
3210 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3211
3212 // Determine the type of the test operands.
3214 bool UsePtrType = false;
3215 if (!TLI.isTypeLegal(VT)) {
3216 UsePtrType = true;
3217 } else {
3218 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
3219 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3220 // Switch table case range are encoded into series of masks.
3221 // Just use pointer type, it's guaranteed to fit.
3222 UsePtrType = true;
3223 break;
3224 }
3225 }
3226 SDValue Sub = RangeSub;
3227 if (UsePtrType) {
3228 VT = TLI.getPointerTy(DAG.getDataLayout());
3229 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3230 }
3231
3232 B.RegVT = VT.getSimpleVT();
3233 B.Reg = FuncInfo.CreateReg(B.RegVT);
3234 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3235
3236 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3237
3238 if (!B.FallthroughUnreachable)
3239 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3240 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3241 SwitchBB->normalizeSuccProbs();
3242
3243 SDValue Root = CopyTo;
3244 if (!B.FallthroughUnreachable) {
3245 // Conditional branch to the default block.
3246 SDValue RangeCmp = DAG.getSetCC(dl,
3248 RangeSub.getValueType()),
3249 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3250 ISD::SETUGT);
3251
3252 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3253 DAG.getBasicBlock(B.Default));
3254 }
3255
3256 // Avoid emitting unnecessary branches to the next block.
3257 if (MBB != NextBlock(SwitchBB))
3258 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3259
3260 DAG.setRoot(Root);
3261}
3262
3263/// visitBitTestCase - this function produces one "bit test"
3265 MachineBasicBlock *NextMBB,
3266 BranchProbability BranchProbToNext,
3267 Register Reg, BitTestCase &B,
3268 MachineBasicBlock *SwitchBB) {
3269 SDLoc dl = getCurSDLoc();
3270 MVT VT = BB.RegVT;
3271 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3272 SDValue Cmp;
3273 unsigned PopCount = llvm::popcount(B.Mask);
3275 if (PopCount == 1) {
3276 // Testing for a single bit; just compare the shift count with what it
3277 // would need to be to shift a 1 bit in that position.
3278 Cmp = DAG.getSetCC(
3280 ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3281 ISD::SETEQ);
3282 } else if (PopCount == BB.Range) {
3283 // There is only one zero bit in the range, test for it directly.
3284 Cmp = DAG.getSetCC(
3286 ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3287 } else {
3288 // Make desired shift
3289 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3290 DAG.getConstant(1, dl, VT), ShiftOp);
3291
3292 // Emit bit tests and jumps
3293 SDValue AndOp = DAG.getNode(ISD::AND, dl,
3294 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3295 Cmp = DAG.getSetCC(
3297 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3298 }
3299
3300 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3301 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3302 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3303 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3304 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3305 // one as they are relative probabilities (and thus work more like weights),
3306 // and hence we need to normalize them to let the sum of them become one.
3307 SwitchBB->normalizeSuccProbs();
3308
3309 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3310 MVT::Other, getControlRoot(),
3311 Cmp, DAG.getBasicBlock(B.TargetBB));
3312
3313 // Avoid emitting unnecessary branches to the next block.
3314 if (NextMBB != NextBlock(SwitchBB))
3315 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3316 DAG.getBasicBlock(NextMBB));
3317
3318 DAG.setRoot(BrAnd);
3319}
3320
3321void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3322 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3323
3324 // Retrieve successors. Look through artificial IR level blocks like
3325 // catchswitch for successors.
3326 MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0));
3327 const BasicBlock *EHPadBB = I.getSuccessor(1);
3328 MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB);
3329
3330 // Deopt and ptrauth bundles are lowered in helper functions, and we don't
3331 // have to do anything here to lower funclet bundles.
3332 assert(!I.hasOperandBundlesOtherThan(
3333 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3334 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3335 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3336 LLVMContext::OB_clang_arc_attachedcall}) &&
3337 "Cannot lower invokes with arbitrary operand bundles yet!");
3338
3339 const Value *Callee(I.getCalledOperand());
3340 const Function *Fn = dyn_cast<Function>(Callee);
3341 if (isa<InlineAsm>(Callee))
3342 visitInlineAsm(I, EHPadBB);
3343 else if (Fn && Fn->isIntrinsic()) {
3344 switch (Fn->getIntrinsicID()) {
3345 default:
3346 llvm_unreachable("Cannot invoke this intrinsic");
3347 case Intrinsic::donothing:
3348 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3349 case Intrinsic::seh_try_begin:
3350 case Intrinsic::seh_scope_begin:
3351 case Intrinsic::seh_try_end:
3352 case Intrinsic::seh_scope_end:
3353 if (EHPadMBB)
3354 // a block referenced by EH table
3355 // so dtor-funclet not removed by opts
3356 EHPadMBB->setMachineBlockAddressTaken();
3357 break;
3358 case Intrinsic::experimental_patchpoint_void:
3359 case Intrinsic::experimental_patchpoint:
3360 visitPatchpoint(I, EHPadBB);
3361 break;
3362 case Intrinsic::experimental_gc_statepoint:
3363 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3364 break;
3365 case Intrinsic::wasm_rethrow: {
3366 // This is usually done in visitTargetIntrinsic, but this intrinsic is
3367 // special because it can be invoked, so we manually lower it to a DAG
3368 // node here.
3370 Ops.push_back(getControlRoot()); // inchain for the terminator node
3372 Ops.push_back(
3373 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3375 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3377 break;
3378 }
3379 }
3380 } else if (I.hasDeoptState()) {
3381 // Currently we do not lower any intrinsic calls with deopt operand bundles.
3382 // Eventually we will support lowering the @llvm.experimental.deoptimize
3383 // intrinsic, and right now there are no plans to support other intrinsics
3384 // with deopt state.
3385 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3386 } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
3387 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), EHPadBB);
3388 } else {
3389 LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3390 }
3391
3392 // If the value of the invoke is used outside of its defining block, make it
3393 // available as a virtual register.
3394 // We already took care of the exported value for the statepoint instruction
3395 // during call to the LowerStatepoint.
3396 if (!isa<GCStatepointInst>(I)) {
3398 }
3399
3402 BranchProbability EHPadBBProb =
3403 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3405 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3406
3407 // Update successor info.
3408 addSuccessorWithProb(InvokeMBB, Return);
3409 for (auto &UnwindDest : UnwindDests) {
3410 UnwindDest.first->setIsEHPad();
3411 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3412 }
3413 InvokeMBB->normalizeSuccProbs();
3414
3415 // Drop into normal successor.
3417 DAG.getBasicBlock(Return)));
3418}
3419
3420void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3421 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3422
3423 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3424 // have to do anything here to lower funclet bundles.
3425 assert(!I.hasOperandBundlesOtherThan(
3426 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3427 "Cannot lower callbrs with arbitrary operand bundles yet!");
3428
3429 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3430 visitInlineAsm(I);
3432
3433 // Retrieve successors.
3435 Dests.insert(I.getDefaultDest());
3436 MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest());
3437
3438 // Update successor info.
3439 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3440 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3441 BasicBlock *Dest = I.getIndirectDest(i);
3443 Target->setIsInlineAsmBrIndirectTarget();
3444 Target->setMachineBlockAddressTaken();
3445 Target->setLabelMustBeEmitted();
3446 // Don't add duplicate machine successors.
3447 if (Dests.insert(Dest).second)
3448 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3449 }
3450 CallBrMBB->normalizeSuccProbs();
3451
3452 // Drop into default successor.
3454 MVT::Other, getControlRoot(),
3455 DAG.getBasicBlock(Return)));
3456}
3457
3458void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3459 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3460}
3461
3462void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3464 "Call to landingpad not in landing pad!");
3465
3466 // If there aren't registers to copy the values into (e.g., during SjLj
3467 // exceptions), then don't bother to create these DAG nodes.
3469 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3470 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3471 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3472 return;
3473
3474 // If landingpad's return type is token type, we don't create DAG nodes
3475 // for its exception pointer and selector value. The extraction of exception
3476 // pointer or selector value from token type landingpads is not currently
3477 // supported.
3478 if (LP.getType()->isTokenTy())
3479 return;
3480
3481 SmallVector<EVT, 2> ValueVTs;
3482 SDLoc dl = getCurSDLoc();
3483 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3484 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3485
3486 // Get the two live-in registers as SDValues. The physregs have already been
3487 // copied into virtual registers.
3488 SDValue Ops[2];
3490 Ops[0] = DAG.getZExtOrTrunc(
3494 dl, ValueVTs[0]);
3495 } else {
3496 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3497 }
3498 Ops[1] = DAG.getZExtOrTrunc(
3502 dl, ValueVTs[1]);
3503
3504 // Merge into one.
3506 DAG.getVTList(ValueVTs), Ops);
3507 setValue(&LP, Res);
3508}
3509
3512 // Update JTCases.
3513 for (JumpTableBlock &JTB : SL->JTCases)
3514 if (JTB.first.HeaderBB == First)
3515 JTB.first.HeaderBB = Last;
3516
3517 // Update BitTestCases.
3518 for (BitTestBlock &BTB : SL->BitTestCases)
3519 if (BTB.Parent == First)
3520 BTB.Parent = Last;
3521}
3522
3523void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3524 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3525
3526 // Update machine-CFG edges with unique successors.
3528 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3529 BasicBlock *BB = I.getSuccessor(i);
3530 bool Inserted = Done.insert(BB).second;
3531 if (!Inserted)
3532 continue;
3533
3534 MachineBasicBlock *Succ = FuncInfo.getMBB(BB);
3535 addSuccessorWithProb(IndirectBrMBB, Succ);
3536 }
3537 IndirectBrMBB->normalizeSuccProbs();
3538
3540 MVT::Other, getControlRoot(),
3541 getValue(I.getAddress())));
3542}
3543
3544void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3546 return;
3547
3548 // We may be able to ignore unreachable behind a noreturn call.
3549 if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode());
3550 Call && Call->doesNotReturn()) {
3552 return;
3553 // Do not emit an additional trap instruction.
3554 if (Call->isNonContinuableTrap())
3555 return;
3556 }
3557
3558 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3559}
3560
3561void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3563 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3564 Flags.copyFMF(*FPOp);
3565
3566 SDValue Op = getValue(I.getOperand(0));
3567 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3568 Op, Flags);
3569 setValue(&I, UnNodeValue);
3570}
3571
3572void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3574 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3575 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3576 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3577 }
3578 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3579 Flags.setExact(ExactOp->isExact());
3580 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3581 Flags.setDisjoint(DisjointOp->isDisjoint());
3582 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3583 Flags.copyFMF(*FPOp);
3584
3585 SDValue Op1 = getValue(I.getOperand(0));
3586 SDValue Op2 = getValue(I.getOperand(1));
3587 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3588 Op1, Op2, Flags);
3589 setValue(&I, BinNodeValue);
3590}
3591
3592void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3593 SDValue Op1 = getValue(I.getOperand(0));
3594 SDValue Op2 = getValue(I.getOperand(1));
3595
3597 Op1.getValueType(), DAG.getDataLayout());
3598
3599 // Coerce the shift amount to the right type if we can. This exposes the
3600 // truncate or zext to optimization early.
3601 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3603 "Unexpected shift type");
3604 Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3605 }
3606
3607 bool nuw = false;
3608 bool nsw = false;
3609 bool exact = false;
3610
3611 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3612
3613 if (const OverflowingBinaryOperator *OFBinOp =
3614 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3615 nuw = OFBinOp->hasNoUnsignedWrap();
3616 nsw = OFBinOp->hasNoSignedWrap();
3617 }
3618 if (const PossiblyExactOperator *ExactOp =
3619 dyn_cast<const PossiblyExactOperator>(&I))
3620 exact = ExactOp->isExact();
3621 }
3623 Flags.setExact(exact);
3624 Flags.setNoSignedWrap(nsw);
3625 Flags.setNoUnsignedWrap(nuw);
3626 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3627 Flags);
3628 setValue(&I, Res);
3629}
3630
3631void SelectionDAGBuilder::visitSDiv(const User &I) {
3632 SDValue Op1 = getValue(I.getOperand(0));
3633 SDValue Op2 = getValue(I.getOperand(1));
3634
3636 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3637 cast<PossiblyExactOperator>(&I)->isExact());
3639 Op2, Flags));
3640}
3641
3642void SelectionDAGBuilder::visitICmp(const ICmpInst &I) {
3643 ICmpInst::Predicate predicate = I.getPredicate();
3644 SDValue Op1 = getValue(I.getOperand(0));
3645 SDValue Op2 = getValue(I.getOperand(1));
3646 ISD::CondCode Opcode = getICmpCondCode(predicate);
3647
3648 auto &TLI = DAG.getTargetLoweringInfo();
3649 EVT MemVT =
3650 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3651
3652 // If a pointer's DAG type is larger than its memory type then the DAG values
3653 // are zero-extended. This breaks signed comparisons so truncate back to the
3654 // underlying type before doing the compare.
3655 if (Op1.getValueType() != MemVT) {
3656 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3657 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3658 }
3659
3661 Flags.setSameSign(I.hasSameSign());
3662 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3663
3665 I.getType());
3666 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3667}
3668
3669void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
3670 FCmpInst::Predicate predicate = I.getPredicate();
3671 SDValue Op1 = getValue(I.getOperand(0));
3672 SDValue Op2 = getValue(I.getOperand(1));
3673
3674 ISD::CondCode Condition = getFCmpCondCode(predicate);
3675 auto *FPMO = cast<FPMathOperator>(&I);
3676 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3677 Condition = getFCmpCodeWithoutNaN(Condition);
3678
3680 Flags.copyFMF(*FPMO);
3681 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3682
3684 I.getType());
3685 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3686}
3687
3688// Check if the condition of the select has one use or two users that are both
3689// selects with the same condition.
3690static bool hasOnlySelectUsers(const Value *Cond) {
3691 return llvm::all_of(Cond->users(), [](const Value *V) {
3692 return isa<SelectInst>(V);
3693 });
3694}
3695
3696void SelectionDAGBuilder::visitSelect(const User &I) {
3697 SmallVector<EVT, 4> ValueVTs;
3699 ValueVTs);
3700 unsigned NumValues = ValueVTs.size();
3701 if (NumValues == 0) return;
3702
3703 SmallVector<SDValue, 4> Values(NumValues);
3704 SDValue Cond = getValue(I.getOperand(0));
3705 SDValue LHSVal = getValue(I.getOperand(1));
3706 SDValue RHSVal = getValue(I.getOperand(2));
3707 SmallVector<SDValue, 1> BaseOps(1, Cond);
3709 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3710
3711 bool IsUnaryAbs = false;
3712 bool Negate = false;
3713
3715 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3716 Flags.copyFMF(*FPOp);
3717
3718 Flags.setUnpredictable(
3719 cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3720
3721 // Min/max matching is only viable if all output VTs are the same.
3722 if (all_equal(ValueVTs)) {
3723 EVT VT = ValueVTs[0];
3724 LLVMContext &Ctx = *DAG.getContext();
3725 auto &TLI = DAG.getTargetLoweringInfo();
3726
3727 // We care about the legality of the operation after it has been type
3728 // legalized.
3729 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3730 VT = TLI.getTypeToTransformTo(Ctx, VT);
3731
3732 // If the vselect is legal, assume we want to leave this as a vector setcc +
3733 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3734 // min/max is legal on the scalar type.
3735 bool UseScalarMinMax = VT.isVector() &&
3737
3738 // ValueTracking's select pattern matching does not account for -0.0,
3739 // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3740 // -0.0 is less than +0.0.
3741 const Value *LHS, *RHS;
3742 auto SPR = matchSelectPattern(&I, LHS, RHS);
3744 switch (SPR.Flavor) {
3745 case SPF_UMAX: Opc = ISD::UMAX; break;
3746 case SPF_UMIN: Opc = ISD::UMIN; break;
3747 case SPF_SMAX: Opc = ISD::SMAX; break;
3748 case SPF_SMIN: Opc = ISD::SMIN; break;
3749 case SPF_FMINNUM:
3750 switch (SPR.NaNBehavior) {
3751 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3752 case SPNB_RETURNS_NAN: break;
3753 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3754 case SPNB_RETURNS_ANY:
3756 (UseScalarMinMax &&
3758 Opc = ISD::FMINNUM;
3759 break;
3760 }
3761 break;
3762 case SPF_FMAXNUM:
3763 switch (SPR.NaNBehavior) {
3764 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3765 case SPNB_RETURNS_NAN: break;
3766 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3767 case SPNB_RETURNS_ANY:
3769 (UseScalarMinMax &&
3771 Opc = ISD::FMAXNUM;
3772 break;
3773 }
3774 break;
3775 case SPF_NABS:
3776 Negate = true;
3777 [[fallthrough]];
3778 case SPF_ABS:
3779 IsUnaryAbs = true;
3780 Opc = ISD::ABS;
3781 break;
3782 default: break;
3783 }
3784
3785 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3786 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3787 (UseScalarMinMax &&
3788 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3789 // If the underlying comparison instruction is used by any other
3790 // instruction, the consumed instructions won't be destroyed, so it is
3791 // not profitable to convert to a min/max.
3792 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3793 OpCode = Opc;
3794 LHSVal = getValue(LHS);
3795 RHSVal = getValue(RHS);
3796 BaseOps.clear();
3797 }
3798
3799 if (IsUnaryAbs) {
3800 OpCode = Opc;
3801 LHSVal = getValue(LHS);
3802 BaseOps.clear();
3803 }
3804 }
3805
3806 if (IsUnaryAbs) {
3807 for (unsigned i = 0; i != NumValues; ++i) {
3808 SDLoc dl = getCurSDLoc();
3809 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3810 Values[i] =
3811 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3812 if (Negate)
3813 Values[i] = DAG.getNegative(Values[i], dl, VT);
3814 }
3815 } else {
3816 for (unsigned i = 0; i != NumValues; ++i) {
3817 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3818 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3819 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3820 Values[i] = DAG.getNode(
3821 OpCode, getCurSDLoc(),
3822 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3823 }
3824 }
3825
3827 DAG.getVTList(ValueVTs), Values));
3828}
3829
3830void SelectionDAGBuilder::visitTrunc(const User &I) {
3831 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3832 SDValue N = getValue(I.getOperand(0));
3834 I.getType());
3836 if (auto *Trunc = dyn_cast<TruncInst>(&I)) {
3837 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3838 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3839 }
3840
3841 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N, Flags));
3842}
3843
3844void SelectionDAGBuilder::visitZExt(const User &I) {
3845 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3846 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3847 SDValue N = getValue(I.getOperand(0));
3848 auto &TLI = DAG.getTargetLoweringInfo();
3849 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3850
3852 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3853 Flags.setNonNeg(PNI->hasNonNeg());
3854
3855 // Eagerly use nonneg information to canonicalize towards sign_extend if
3856 // that is the target's preference.
3857 // TODO: Let the target do this later.
3858 if (Flags.hasNonNeg() &&
3859 TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3861 return;
3862 }
3863
3864 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3865}
3866
3867void SelectionDAGBuilder::visitSExt(const User &I) {
3868 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3869 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3870 SDValue N = getValue(I.getOperand(0));
3872 I.getType());
3874}
3875
3876void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3877 // FPTrunc is never a no-op cast, no need to check
3878 SDValue N = getValue(I.getOperand(0));
3879 SDLoc dl = getCurSDLoc();
3881 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3882 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3884 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3885}
3886
3887void SelectionDAGBuilder::visitFPExt(const User &I) {
3888 // FPExt is never a no-op cast, no need to check
3889 SDValue N = getValue(I.getOperand(0));
3891 I.getType());
3893}
3894
3895void SelectionDAGBuilder::visitFPToUI(const User &I) {
3896 // FPToUI is never a no-op cast, no need to check
3897 SDValue N = getValue(I.getOperand(0));
3899 I.getType());
3901}
3902
3903void SelectionDAGBuilder::visitFPToSI(const User &I) {
3904 // FPToSI is never a no-op cast, no need to check
3905 SDValue N = getValue(I.getOperand(0));
3907 I.getType());
3909}
3910
3911void SelectionDAGBuilder::visitUIToFP(const User &I) {
3912 // UIToFP is never a no-op cast, no need to check
3913 SDValue N = getValue(I.getOperand(0));
3915 I.getType());
3917 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3918 Flags.setNonNeg(PNI->hasNonNeg());
3919
3920 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags));
3921}
3922
3923void SelectionDAGBuilder::visitSIToFP(const User &I) {
3924 // SIToFP is never a no-op cast, no need to check
3925 SDValue N = getValue(I.getOperand(0));
3927 I.getType());
3929}
3930
3931void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3932 // What to do depends on the size of the integer and the size of the pointer.
3933 // We can either truncate, zero extend, or no-op, accordingly.
3934 SDValue N = getValue(I.getOperand(0));
3935 auto &TLI = DAG.getTargetLoweringInfo();
3937 I.getType());
3938 EVT PtrMemVT =
3939 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3940 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3941 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3942 setValue(&I, N);
3943}
3944
3945void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3946 // What to do depends on the size of the integer and the size of the pointer.
3947 // We can either truncate, zero extend, or no-op, accordingly.
3948 SDValue N = getValue(I.getOperand(0));
3949 auto &TLI = DAG.getTargetLoweringInfo();
3950 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3951 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3952 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3953 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3954 setValue(&I, N);
3955}
3956
3957void SelectionDAGBuilder::visitBitCast(const User &I) {
3958 SDValue N = getValue(I.getOperand(0));
3959 SDLoc dl = getCurSDLoc();
3961 I.getType());
3962
3963 // BitCast assures us that source and destination are the same size so this is
3964 // either a BITCAST or a no-op.
3965 if (DestVT != N.getValueType())
3967 DestVT, N)); // convert types.
3968 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3969 // might fold any kind of constant expression to an integer constant and that
3970 // is not what we are looking for. Only recognize a bitcast of a genuine
3971 // constant integer as an opaque constant.
3972 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3973 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3974 /*isOpaque*/true));
3975 else
3976 setValue(&I, N); // noop cast.
3977}
3978
3979void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3981 const Value *SV = I.getOperand(0);
3982 SDValue N = getValue(SV);
3983 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3984
3985 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3986 unsigned DestAS = I.getType()->getPointerAddressSpace();
3987
3988 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3989 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3990
3991 setValue(&I, N);
3992}
3993
3994void SelectionDAGBuilder::visitInsertElement(const User &I) {
3996 SDValue InVec = getValue(I.getOperand(0));
3997 SDValue InVal = getValue(I.getOperand(1));
3998 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
4001 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4002 InVec, InVal, InIdx));
4003}
4004
4005void SelectionDAGBuilder::visitExtractElement(const User &I) {
4007 SDValue InVec = getValue(I.getOperand(0));
4008 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
4011 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4012 InVec, InIdx));
4013}
4014
4015void SelectionDAGBuilder::visitShuffleVector(const User &I) {
4016 SDValue Src1 = getValue(I.getOperand(0));
4017 SDValue Src2 = getValue(I.getOperand(1));
4019 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
4020 Mask = SVI->getShuffleMask();
4021 else
4022 Mask = cast<ConstantExpr>(I).getShuffleMask();
4023 SDLoc DL = getCurSDLoc();
4025 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4026 EVT SrcVT = Src1.getValueType();
4027
4028 if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
4029 VT.isScalableVector()) {
4030 // Canonical splat form of first element of first input vector.
4031 SDValue FirstElt =
4034 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
4035 return;
4036 }
4037
4038 // For now, we only handle splats for scalable vectors.
4039 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4040 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4041 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
4042
4043 unsigned SrcNumElts = SrcVT.getVectorNumElements();
4044 unsigned MaskNumElts = Mask.size();
4045
4046 if (SrcNumElts == MaskNumElts) {
4047 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
4048 return;
4049 }
4050
4051 // Normalize the shuffle vector since mask and vector length don't match.
4052 if (SrcNumElts < MaskNumElts) {
4053 // Mask is longer than the source vectors. We can use concatenate vector to
4054 // make the mask and vectors lengths match.
4055
4056 if (MaskNumElts % SrcNumElts == 0) {
4057 // Mask length is a multiple of the source vector length.
4058 // Check if the shuffle is some kind of concatenation of the input
4059 // vectors.
4060 unsigned NumConcat = MaskNumElts / SrcNumElts;
4061 bool IsConcat = true;
4062 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4063 for (unsigned i = 0; i != MaskNumElts; ++i) {
4064 int Idx = Mask[i];
4065 if (Idx < 0)
4066 continue;
4067 // Ensure the indices in each SrcVT sized piece are sequential and that
4068 // the same source is used for the whole piece.
4069 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4070 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4071 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
4072 IsConcat = false;
4073 break;
4074 }
4075 // Remember which source this index came from.
4076 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4077 }
4078
4079 // The shuffle is concatenating multiple vectors together. Just emit
4080 // a CONCAT_VECTORS operation.
4081 if (IsConcat) {
4082 SmallVector<SDValue, 8> ConcatOps;
4083 for (auto Src : ConcatSrcs) {
4084 if (Src < 0)
4085 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
4086 else if (Src == 0)
4087 ConcatOps.push_back(Src1);
4088 else
4089 ConcatOps.push_back(Src2);
4090 }
4091 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
4092 return;
4093 }
4094 }
4095
4096 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
4097 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4098 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
4099 PaddedMaskNumElts);
4100
4101 // Pad both vectors with undefs to make them the same length as the mask.
4102 SDValue UndefVal = DAG.getUNDEF(SrcVT);
4103
4104 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
4105 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
4106 MOps1[0] = Src1;
4107 MOps2[0] = Src2;
4108
4109 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
4110 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
4111
4112 // Readjust mask for new input vector length.
4113 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4114 for (unsigned i = 0; i != MaskNumElts; ++i) {
4115 int Idx = Mask[i];
4116 if (Idx >= (int)SrcNumElts)
4117 Idx -= SrcNumElts - PaddedMaskNumElts;
4118 MappedOps[i] = Idx;
4119 }
4120
4121 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
4122
4123 // If the concatenated vector was padded, extract a subvector with the
4124 // correct number of elements.
4125 if (MaskNumElts != PaddedMaskNumElts)
4128
4129 setValue(&I, Result);
4130 return;
4131 }
4132
4133 assert(SrcNumElts > MaskNumElts);
4134
4135 // Analyze the access pattern of the vector to see if we can extract
4136 // two subvectors and do the shuffle.
4137 int StartIdx[2] = {-1, -1}; // StartIdx to extract from
4138 bool CanExtract = true;
4139 for (int Idx : Mask) {
4140 unsigned Input = 0;
4141 if (Idx < 0)
4142 continue;
4143
4144 if (Idx >= (int)SrcNumElts) {
4145 Input = 1;
4146 Idx -= SrcNumElts;
4147 }
4148
4149 // If all the indices come from the same MaskNumElts sized portion of
4150 // the sources we can use extract. Also make sure the extract wouldn't
4151 // extract past the end of the source.
4152 int NewStartIdx = alignDown(Idx, MaskNumElts);
4153 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4154 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4155 CanExtract = false;
4156 // Make sure we always update StartIdx as we use it to track if all
4157 // elements are undef.
4158 StartIdx[Input] = NewStartIdx;
4159 }
4160
4161 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4162 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
4163 return;
4164 }
4165 if (CanExtract) {
4166 // Extract appropriate subvector and generate a vector shuffle
4167 for (unsigned Input = 0; Input < 2; ++Input) {
4168 SDValue &Src = Input == 0 ? Src1 : Src2;
4169 if (StartIdx[Input] < 0)
4170 Src = DAG.getUNDEF(VT);
4171 else {
4172 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
4173 DAG.getVectorIdxConstant(StartIdx[Input], DL));
4174 }
4175 }
4176
4177 // Calculate new mask.
4178 SmallVector<int, 8> MappedOps(Mask);
4179 for (int &Idx : MappedOps) {
4180 if (Idx >= (int)SrcNumElts)
4181 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4182 else if (Idx >= 0)
4183 Idx -= StartIdx[0];
4184 }
4185
4186 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
4187 return;
4188 }
4189
4190 // We can't use either concat vectors or extract subvectors so fall back to
4191 // replacing the shuffle with extract and build vector.
4192 // to insert and build vector.
4193 EVT EltVT = VT.getVectorElementType();
4195 for (int Idx : Mask) {
4196 SDValue Res;
4197
4198 if (Idx < 0) {
4199 Res = DAG.getUNDEF(EltVT);
4200 } else {
4201 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4202 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4203
4204 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4206 }
4207
4208 Ops.push_back(Res);
4209 }
4210
4211 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4212}
4213
4214void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4215 ArrayRef<unsigned> Indices = I.getIndices();
4216 const Value *Op0 = I.getOperand(0);
4217 const Value *Op1 = I.getOperand(1);
4218 Type *AggTy = I.getType();
4219 Type *ValTy = Op1->getType();
4220 bool IntoUndef = isa<UndefValue>(Op0);
4221 bool FromUndef = isa<UndefValue>(Op1);
4222
4223 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4224
4226 SmallVector<EVT, 4> AggValueVTs;
4227 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4228 SmallVector<EVT, 4> ValValueVTs;
4229 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4230
4231 unsigned NumAggValues = AggValueVTs.size();
4232 unsigned NumValValues = ValValueVTs.size();
4233 SmallVector<SDValue, 4> Values(NumAggValues);
4234
4235 // Ignore an insertvalue that produces an empty object
4236 if (!NumAggValues) {
4237 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4238 return;
4239 }
4240
4241 SDValue Agg = getValue(Op0);
4242 unsigned i = 0;
4243 // Copy the beginning value(s) from the original aggregate.
4244 for (; i != LinearIndex; ++i)
4245 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4246 SDValue(Agg.getNode(), Agg.getResNo() + i);
4247 // Copy values from the inserted value(s).
4248 if (NumValValues) {
4249 SDValue Val = getValue(Op1);
4250 for (; i != LinearIndex + NumValValues; ++i)
4251 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4252 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4253 }
4254 // Copy remaining value(s) from the original aggregate.
4255 for (; i != NumAggValues; ++i)
4256 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4257 SDValue(Agg.getNode(), Agg.getResNo() + i);
4258
4260 DAG.getVTList(AggValueVTs), Values));
4261}
4262
4263void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4264 ArrayRef<unsigned> Indices = I.getIndices();
4265 const Value *Op0 = I.getOperand(0);
4266 Type *AggTy = Op0->getType();
4267 Type *ValTy = I.getType();
4268 bool OutOfUndef = isa<UndefValue>(Op0);
4269
4270 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4271
4273 SmallVector<EVT, 4> ValValueVTs;
4274 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4275
4276 unsigned NumValValues = ValValueVTs.size();
4277
4278 // Ignore a extractvalue that produces an empty object
4279 if (!NumValValues) {
4280 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4281 return;
4282 }
4283
4284 SmallVector<SDValue, 4> Values(NumValValues);
4285
4286 SDValue Agg = getValue(Op0);
4287 // Copy out the selected value(s).
4288 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4289 Values[i - LinearIndex] =
4290 OutOfUndef ?
4291 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4292 SDValue(Agg.getNode(), Agg.getResNo() + i);
4293
4295 DAG.getVTList(ValValueVTs), Values));
4296}
4297
4298void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4299 Value *Op0 = I.getOperand(0);
4300 // Note that the pointer operand may be a vector of pointers. Take the scalar
4301 // element which holds a pointer.
4302 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4303 SDValue N = getValue(Op0);
4304 SDLoc dl = getCurSDLoc();
4305 auto &TLI = DAG.getTargetLoweringInfo();
4306 GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
4307
4308 // Normalize Vector GEP - all scalar operands should be converted to the
4309 // splat vector.
4310 bool IsVectorGEP = I.getType()->isVectorTy();
4311 ElementCount VectorElementCount =
4312 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4314
4315 if (IsVectorGEP && !N.getValueType().isVector()) {
4317 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4318 N = DAG.getSplat(VT, dl, N);
4319 }
4320
4321 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4322 GTI != E; ++GTI) {
4323 const Value *Idx = GTI.getOperand();
4324 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4325 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4326 if (Field) {
4327 // N = N + Offset
4330
4331 // In an inbounds GEP with an offset that is nonnegative even when
4332 // interpreted as signed, assume there is no unsigned overflow.
4334 if (NW.hasNoUnsignedWrap() ||
4335 (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
4337
4338 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4339 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4340 }
4341 } else {
4342 // IdxSize is the width of the arithmetic according to IR semantics.
4343 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4344 // (and fix up the result later).
4345 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4346 MVT IdxTy = MVT::getIntegerVT(IdxSize);
4347 TypeSize ElementSize =
4348 GTI.getSequentialElementStride(DAG.getDataLayout());
4349 // We intentionally mask away the high bits here; ElementSize may not
4350 // fit in IdxTy.
4351 APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
4352 /*isSigned=*/false, /*implicitTrunc=*/true);
4353 bool ElementScalable = ElementSize.isScalable();
4354
4355 // If this is a scalar constant or a splat vector of constants,
4356 // handle it quickly.
4357 const auto *C = dyn_cast<Constant>(Idx);
4358 if (C && isa<VectorType>(C->getType()))
4359 C = C->getSplatValue();
4360
4361 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4362 if (CI && CI->isZero())
4363 continue;
4364 if (CI && !ElementScalable) {
4365 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4367 SDValue OffsVal;
4368 if (IsVectorGEP)
4369 OffsVal = DAG.getConstant(
4370 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4371 else
4372 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4373
4374 // In an inbounds GEP with an offset that is nonnegative even when
4375 // interpreted as signed, assume there is no unsigned overflow.
4377 if (NW.hasNoUnsignedWrap() ||
4378 (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
4379 Flags.setNoUnsignedWrap(true);
4380
4381 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4382
4383 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4384 continue;
4385 }
4386
4387 // N = N + Idx * ElementMul;
4388 SDValue IdxN = getValue(Idx);
4389
4390 if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4392 VectorElementCount);
4393 IdxN = DAG.getSplat(VT, dl, IdxN);
4394 }
4395
4396 // If the index is smaller or larger than intptr_t, truncate or extend
4397 // it.
4398 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4399
4400 SDNodeFlags ScaleFlags;
4401 // The multiplication of an index by the type size does not wrap the
4402 // pointer index type in a signed sense (mul nsw).
4404
4405 // The multiplication of an index by the type size does not wrap the
4406 // pointer index type in an unsigned sense (mul nuw).
4407 ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4408
4409 if (ElementScalable) {
4410 EVT VScaleTy = N.getValueType().getScalarType();
4411 SDValue VScale = DAG.getNode(
4412 ISD::VSCALE, dl, VScaleTy,
4413 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4414 if (IsVectorGEP)
4415 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4416 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale,
4417 ScaleFlags);
4418 } else {
4419 // If this is a multiply by a power of two, turn it into a shl
4420 // immediately. This is a very common case.
4421 if (ElementMul != 1) {
4422 if (ElementMul.isPowerOf2()) {
4423 unsigned Amt = ElementMul.logBase2();
4424 IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN,
4425 DAG.getConstant(Amt, dl, IdxN.getValueType()),
4426 ScaleFlags);
4427 } else {
4428 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4429 IdxN.getValueType());
4430 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale,
4431 ScaleFlags);
4432 }
4433 }
4434 }
4435
4436 // The successive addition of the current address, truncated to the
4437 // pointer index type and interpreted as an unsigned number, and each
4438 // offset, also interpreted as an unsigned number, does not wrap the
4439 // pointer index type (add nuw).
4440 SDNodeFlags AddFlags;
4441 AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4442
4443 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, IdxN, AddFlags);
4444 }
4445 }
4446
4447 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4448 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4449 if (IsVectorGEP) {
4450 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4451 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4452 }
4453
4454 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4455 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4456
4457 setValue(&I, N);
4458}
4459
4460void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4461 // If this is a fixed sized alloca in the entry block of the function,
4462 // allocate it statically on the stack.
4463 if (FuncInfo.StaticAllocaMap.count(&I))
4464 return; // getValue will auto-populate this.
4465
4466 SDLoc dl = getCurSDLoc();
4467 Type *Ty = I.getAllocatedType();
4469 auto &DL = DAG.getDataLayout();
4470 TypeSize TySize = DL.getTypeAllocSize(Ty);
4471 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4472
4473 SDValue AllocSize = getValue(I.getArraySize());
4474
4475 EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4476 if (AllocSize.getValueType() != IntPtr)
4477 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4478
4479 if (TySize.isScalable())
4480 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4481 DAG.getVScale(dl, IntPtr,
4482 APInt(IntPtr.getScalarSizeInBits(),
4483 TySize.getKnownMinValue())));
4484 else {
4485 SDValue TySizeValue =
4487 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4488 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4489 }
4490
4491 // Handle alignment. If the requested alignment is less than or equal to
4492 // the stack alignment, ignore it. If the size is greater than or equal to
4493 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4495 if (*Alignment <= StackAlign)
4496 Alignment = std::nullopt;
4497
4498 const uint64_t StackAlignMask = StackAlign.value() - 1U;
4499 // Round the size of the allocation up to the stack alignment size
4500 // by add SA-1 to the size. This doesn't overflow because we're computing
4501 // an address inside an alloca.
4502 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4503 DAG.getConstant(StackAlignMask, dl, IntPtr),
4505
4506 // Mask out the low bits for alignment purposes.
4507 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4508 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4509
4510 SDValue Ops[] = {
4511 getRoot(), AllocSize,
4512 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4513 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4514 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4515 setValue(&I, DSA);
4516 DAG.setRoot(DSA.getValue(1));
4517
4519}
4520
4521static const MDNode *getRangeMetadata(const Instruction &I) {
4522 // If !noundef is not present, then !range violation results in a poison
4523 // value rather than immediate undefined behavior. In theory, transferring
4524 // these annotations to SDAG is fine, but in practice there are key SDAG
4525 // transforms that are known not to be poison-safe, such as folding logical
4526 // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4527 // also present.
4528 if (!I.hasMetadata(LLVMContext::MD_noundef))
4529 return nullptr;
4530 return I.getMetadata(LLVMContext::MD_range);
4531}
4532
4533static std::optional<ConstantRange> getRange(const Instruction &I) {
4534 if (const auto *CB = dyn_cast<CallBase>(&I)) {
4535 // see comment in getRangeMetadata about this check
4536 if (CB->hasRetAttr(Attribute::NoUndef))
4537 return CB->getRange();
4538 }
4539 if (const MDNode *Range = getRangeMetadata(I))
4541 return std::nullopt;
4542}
4543
4544void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4545 if (I.isAtomic())
4546 return visitAtomicLoad(I);
4547
4549 const Value *SV = I.getOperand(0);
4550 if (TLI.supportSwiftError()) {
4551 // Swifterror values can come from either a function parameter with
4552 // swifterror attribute or an alloca with swifterror attribute.
4553 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4554 if (Arg->hasSwiftErrorAttr())
4555 return visitLoadFromSwiftError(I);
4556 }
4557
4558 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4559 if (Alloca->isSwiftError())
4560 return visitLoadFromSwiftError(I);
4561 }
4562 }
4563
4564 SDValue Ptr = getValue(SV);
4565
4566 Type *Ty = I.getType();
4567 SmallVector<EVT, 4> ValueVTs, MemVTs;
4569 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4570 unsigned NumValues = ValueVTs.size();
4571 if (NumValues == 0)
4572 return;
4573
4574 Align Alignment = I.getAlign();
4575 AAMDNodes AAInfo = I.getAAMetadata();
4576 const MDNode *Ranges = getRangeMetadata(I);
4577 bool isVolatile = I.isVolatile();
4578 MachineMemOperand::Flags MMOFlags =
4580
4581 SDValue Root;
4582 bool ConstantMemory = false;
4583 if (isVolatile)
4584 // Serialize volatile loads with other side effects.
4585 Root = getRoot();
4586 else if (NumValues > MaxParallelChains)
4587 Root = getMemoryRoot();
4588 else if (AA &&
4590 SV,
4592 AAInfo))) {
4593 // Do not serialize (non-volatile) loads of constant memory with anything.
4594 Root = DAG.getEntryNode();
4595 ConstantMemory = true;
4597 } else {
4598 // Do not serialize non-volatile loads against each other.
4599 Root = DAG.getRoot();
4600 }
4601
4602 SDLoc dl = getCurSDLoc();
4603
4604 if (isVolatile)
4605 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4606
4607 SmallVector<SDValue, 4> Values(NumValues);
4608 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4609
4610 unsigned ChainI = 0;
4611 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4612 // Serializing loads here may result in excessive register pressure, and
4613 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4614 // could recover a bit by hoisting nodes upward in the chain by recognizing
4615 // they are side-effect free or do not alias. The optimizer should really
4616 // avoid this case by converting large object/array copies to llvm.memcpy
4617 // (MaxParallelChains should always remain as failsafe).
4618 if (ChainI == MaxParallelChains) {
4619 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4620 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4621 ArrayRef(Chains.data(), ChainI));
4622 Root = Chain;
4623 ChainI = 0;
4624 }
4625
4626 // TODO: MachinePointerInfo only supports a fixed length offset.
4627 MachinePointerInfo PtrInfo =
4628 !Offsets[i].isScalable() || Offsets[i].isZero()
4629 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4631
4632 SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4633 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4634 MMOFlags, AAInfo, Ranges);
4635 Chains[ChainI] = L.getValue(1);
4636
4637 if (MemVTs[i] != ValueVTs[i])
4638 L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4639
4640 Values[i] = L;
4641 }
4642
4643 if (!ConstantMemory) {
4644 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4645 ArrayRef(Chains.data(), ChainI));
4646 if (isVolatile)
4647 DAG.setRoot(Chain);
4648 else
4649 PendingLoads.push_back(Chain);
4650 }
4651
4653 DAG.getVTList(ValueVTs), Values));
4654}
4655
4656void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4658 "call visitStoreToSwiftError when backend supports swifterror");
4659
4660 SmallVector<EVT, 4> ValueVTs;
4662 const Value *SrcV = I.getOperand(0);
4664 SrcV->getType(), ValueVTs, &Offsets, 0);
4665 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4666 "expect a single EVT for swifterror");
4667
4668 SDValue Src = getValue(SrcV);
4669 // Create a virtual register, then update the virtual register.
4670 Register VReg =
4671 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4672 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4673 // Chain can be getRoot or getControlRoot.
4674 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4675 SDValue(Src.getNode(), Src.getResNo()));
4676 DAG.setRoot(CopyNode);
4677}
4678
4679void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4681 "call visitLoadFromSwiftError when backend supports swifterror");
4682
4683 assert(!I.isVolatile() &&
4684 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4685 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4686 "Support volatile, non temporal, invariant for load_from_swift_error");
4687
4688 const Value *SV = I.getOperand(0);
4689 Type *Ty = I.getType();
4690 assert(
4691 (!AA ||
4694 I.getAAMetadata()))) &&
4695 "load_from_swift_error should not be constant memory");
4696
4697 SmallVector<EVT, 4> ValueVTs;
4700 ValueVTs, &Offsets, 0);
4701 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4702 "expect a single EVT for swifterror");
4703
4704 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4706 getRoot(), getCurSDLoc(),
4707 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4708
4709 setValue(&I, L);
4710}
4711
4712void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4713 if (I.isAtomic())
4714 return visitAtomicStore(I);
4715
4716 const Value *SrcV = I.getOperand(0);
4717 const Value *PtrV = I.getOperand(1);
4718
4720 if (TLI.supportSwiftError()) {
4721 // Swifterror values can come from either a function parameter with
4722 // swifterror attribute or an alloca with swifterror attribute.
4723 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4724 if (Arg->hasSwiftErrorAttr())
4725 return visitStoreToSwiftError(I);
4726 }
4727
4728 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4729 if (Alloca->isSwiftError())
4730 return visitStoreToSwiftError(I);
4731 }
4732 }
4733
4734 SmallVector<EVT, 4> ValueVTs, MemVTs;
4737 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4738 unsigned NumValues = ValueVTs.size();
4739 if (NumValues == 0)
4740 return;
4741
4742 // Get the lowered operands. Note that we do this after
4743 // checking if NumResults is zero, because with zero results
4744 // the operands won't have values in the map.
4745 SDValue Src = getValue(SrcV);
4746 SDValue Ptr = getValue(PtrV);
4747
4748 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4749 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4750 SDLoc dl = getCurSDLoc();
4751 Align Alignment = I.getAlign();
4752 AAMDNodes AAInfo = I.getAAMetadata();
4753
4754 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4755
4756 unsigned ChainI = 0;
4757 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4758 // See visitLoad comments.
4759 if (ChainI == MaxParallelChains) {
4760 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4761 ArrayRef(Chains.data(), ChainI));
4762 Root = Chain;
4763 ChainI = 0;
4764 }
4765
4766 // TODO: MachinePointerInfo only supports a fixed length offset.
4767 MachinePointerInfo PtrInfo =
4768 !Offsets[i].isScalable() || Offsets[i].isZero()
4769 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4771
4772 SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4773 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4774 if (MemVTs[i] != ValueVTs[i])
4775 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4776 SDValue St =
4777 DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4778 Chains[ChainI] = St;
4779 }
4780
4781 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4782 ArrayRef(Chains.data(), ChainI));
4783 setValue(&I, StoreNode);
4784 DAG.setRoot(StoreNode);
4785}
4786
4787void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4788 bool IsCompressing) {
4789 SDLoc sdl = getCurSDLoc();
4790
4791 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4792 Align &Alignment) {
4793 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4794 Src0 = I.getArgOperand(0);
4795 Ptr = I.getArgOperand(1);
4796 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue();
4797 Mask = I.getArgOperand(3);
4798 };
4799 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4800 Align &Alignment) {
4801 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4802 Src0 = I.getArgOperand(0);
4803 Ptr = I.getArgOperand(1);
4804 Mask = I.getArgOperand(2);
4805 Alignment = I.getParamAlign(1).valueOrOne();
4806 };
4807
4808 Value *PtrOperand, *MaskOperand, *Src0Operand;
4809 Align Alignment;
4810 if (IsCompressing)
4811 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4812 else
4813 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4814
4815 SDValue Ptr = getValue(PtrOperand);
4816 SDValue Src0 = getValue(Src0Operand);
4817 SDValue Mask = getValue(MaskOperand);
4818 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4819
4820 EVT VT = Src0.getValueType();
4821
4822 auto MMOFlags = MachineMemOperand::MOStore;
4823 if (I.hasMetadata(LLVMContext::MD_nontemporal))
4825
4827 MachinePointerInfo(PtrOperand), MMOFlags,
4828 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4829
4830 const auto &TLI = DAG.getTargetLoweringInfo();
4831 const auto &TTI =
4832 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
4833 SDValue StoreNode =
4834 !IsCompressing &&
4835 TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType())
4836 ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0,
4837 Mask)
4838 : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask,
4839 VT, MMO, ISD::UNINDEXED, /*Truncating=*/false,
4840 IsCompressing);
4841 DAG.setRoot(StoreNode);
4842 setValue(&I, StoreNode);
4843}
4844
4845// Get a uniform base for the Gather/Scatter intrinsic.
4846// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4847// We try to represent it as a base pointer + vector of indices.
4848// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4849// The first operand of the GEP may be a single pointer or a vector of pointers
4850// Example:
4851// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4852// or
4853// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4854// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4855//
4856// When the first GEP operand is a single pointer - it is the uniform base we
4857// are looking for. If first operand of the GEP is a splat vector - we
4858// extract the splat value and use it as a uniform base.
4859// In all other cases the function returns 'false'.
4860static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4861 ISD::MemIndexType &IndexType, SDValue &Scale,
4862 SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4863 uint64_t ElemSize) {
4864 SelectionDAG& DAG = SDB->DAG;
4865 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4866 const DataLayout &DL = DAG.getDataLayout();
4867
4868 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4869
4870 // Handle splat constant pointer.
4871 if (auto *C = dyn_cast<Constant>(Ptr)) {
4872 C = C->getSplatValue();
4873 if (!C)
4874 return false;
4875
4876 Base = SDB->getValue(C);
4877
4878 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4879 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4880 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4881 IndexType = ISD::SIGNED_SCALED;
4882 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4883 return true;
4884 }
4885
4886 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4887 if (!GEP || GEP->getParent() != CurBB)
4888 return false;
4889
4890 if (GEP->getNumOperands() != 2)
4891 return false;
4892
4893 const Value *BasePtr = GEP->getPointerOperand();
4894 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4895
4896 // Make sure the base is scalar and the index is a vector.
4897 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4898 return false;
4899
4900 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4901 if (ScaleVal.isScalable())
4902 return false;
4903
4904 // Target may not support the required addressing mode.
4905 if (ScaleVal != 1 &&
4906 !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4907 return false;
4908
4909 Base = SDB->getValue(BasePtr);
4910 Index = SDB->getValue(IndexVal);
4911 IndexType = ISD::SIGNED_SCALED;
4912
4913 Scale =
4914 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4915 return true;
4916}
4917
4918void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4919 SDLoc sdl = getCurSDLoc();
4920
4921 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4922 const Value *Ptr = I.getArgOperand(1);
4923 SDValue Src0 = getValue(I.getArgOperand(0));
4924 SDValue Mask = getValue(I.getArgOperand(3));
4925 EVT VT = Src0.getValueType();
4926 Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4927 ->getMaybeAlignValue()
4928 .value_or(DAG.getEVTAlign(VT.getScalarType()));
4930
4931 SDValue Base;
4932 SDValue Index;
4933 ISD::MemIndexType IndexType;
4934 SDValue Scale;
4935 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4936 I.getParent(), VT.getScalarStoreSize());
4937
4938 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4941 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4942 if (!UniformBase) {
4944 Index = getValue(Ptr);
4945 IndexType = ISD::SIGNED_SCALED;
4946 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4947 }
4948
4949 EVT IdxVT = Index.getValueType();
4950 EVT EltTy = IdxVT.getVectorElementType();
4951 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4952 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4953 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4954 }
4955
4956 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4957 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4958 Ops, MMO, IndexType, false);
4959 DAG.setRoot(Scatter);
4960 setValue(&I, Scatter);
4961}
4962
4963void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4964 SDLoc sdl = getCurSDLoc();
4965
4966 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4967 Align &Alignment) {
4968 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4969 Ptr = I.getArgOperand(0);
4970 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue();
4971 Mask = I.getArgOperand(2);
4972 Src0 = I.getArgOperand(3);
4973 };
4974 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4975 Align &Alignment) {
4976 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4977 Ptr = I.getArgOperand(0);
4978 Alignment = I.getParamAlign(0).valueOrOne();
4979 Mask = I.getArgOperand(1);
4980 Src0 = I.getArgOperand(2);
4981 };
4982
4983 Value *PtrOperand, *MaskOperand, *Src0Operand;
4984 Align Alignment;
4985 if (IsExpanding)
4986 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4987 else
4988 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4989
4990 SDValue Ptr = getValue(PtrOperand);
4991 SDValue Src0 = getValue(Src0Operand);
4992 SDValue Mask = getValue(MaskOperand);
4993 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4994
4995 EVT VT = Src0.getValueType();
4996 AAMDNodes AAInfo = I.getAAMetadata();
4997 const MDNode *Ranges = getRangeMetadata(I);
4998
4999 // Do not serialize masked loads of constant memory with anything.
5000 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
5001 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
5002
5003 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
5004
5005 auto MMOFlags = MachineMemOperand::MOLoad;
5006 if (I.hasMetadata(LLVMContext::MD_nontemporal))
5008
5010 MachinePointerInfo(PtrOperand), MMOFlags,
5011 LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges);
5012
5013 const auto &TLI = DAG.getTargetLoweringInfo();
5014 const auto &TTI =
5015 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
5016 // The Load/Res may point to different values and both of them are output
5017 // variables.
5018 SDValue Load;
5019 SDValue Res;
5020 if (!IsExpanding &&
5022 Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask);
5023 else
5024 Res = Load =
5025 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
5026 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
5027 if (AddToChain)
5028 PendingLoads.push_back(Load.getValue(1));
5029 setValue(&I, Res);
5030}
5031
5032void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
5033 SDLoc sdl = getCurSDLoc();
5034
5035 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
5036 const Value *Ptr = I.getArgOperand(0);
5037 SDValue Src0 = getValue(I.getArgOperand(3));
5038 SDValue Mask = getValue(I.getArgOperand(2));
5039
5041 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5042 Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
5043 ->getMaybeAlignValue()
5044 .value_or(DAG.getEVTAlign(VT.getScalarType()));
5045
5046 const MDNode *Ranges = getRangeMetadata(I);
5047
5048 SDValue Root = DAG.getRoot();
5049 SDValue Base;
5050 SDValue Index;
5051 ISD::MemIndexType IndexType;
5052 SDValue Scale;
5053 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
5054 I.getParent(), VT.getScalarStoreSize());
5055 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5058 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
5059 Ranges);
5060
5061 if (!UniformBase) {
5063 Index = getValue(Ptr);
5064 IndexType = ISD::SIGNED_SCALED;
5065 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5066 }
5067
5068 EVT IdxVT = Index.getValueType();
5069 EVT EltTy = IdxVT.getVectorElementType();
5070 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5071 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
5072 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5073 }
5074
5075 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
5076 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
5077 Ops, MMO, IndexType, ISD::NON_EXTLOAD);
5078
5079 PendingLoads.push_back(Gather.getValue(1));
5080 setValue(&I, Gather);
5081}
5082
5083void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
5084 SDLoc dl = getCurSDLoc();
5085 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
5086 AtomicOrdering FailureOrdering = I.getFailureOrdering();
5087 SyncScope::ID SSID = I.getSyncScopeID();
5088
5089 SDValue InChain = getRoot();
5090
5091 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
5092 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5093
5096
5099 MachinePointerInfo(I.getPointerOperand()), Flags,
5101 AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering);
5102
5104 dl, MemVT, VTs, InChain,
5105 getValue(I.getPointerOperand()),
5106 getValue(I.getCompareOperand()),
5107 getValue(I.getNewValOperand()), MMO);
5108
5109 SDValue OutChain = L.getValue(2);
5110
5111 setValue(&I, L);
5112 DAG.setRoot(OutChain);
5113}
5114
5115void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
5116 SDLoc dl = getCurSDLoc();
5118 switch (I.getOperation()) {
5119 default: llvm_unreachable("Unknown atomicrmw operation");
5137 break;
5140 break;
5143 break;
5146 break;
5147 }
5148 AtomicOrdering Ordering = I.getOrdering();
5149 SyncScope::ID SSID = I.getSyncScopeID();
5150
5151 SDValue InChain = getRoot();
5152
5153 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
5156
5159 MachinePointerInfo(I.getPointerOperand()), Flags,
5161 AAMDNodes(), nullptr, SSID, Ordering);
5162
5163 SDValue L =
5164 DAG.getAtomic(NT, dl, MemVT, InChain,
5165 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
5166 MMO);
5167
5168 SDValue OutChain = L.getValue(1);
5169
5170 setValue(&I, L);
5171 DAG.setRoot(OutChain);
5172}
5173
5174void SelectionDAGBuilder::visitFence(const FenceInst &I) {
5175 SDLoc dl = getCurSDLoc();
5177 SDValue Ops[3];
5178 Ops[0] = getRoot();
5179 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
5181 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
5183 SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
5184 setValue(&I, N);
5185 DAG.setRoot(N);
5186}
5187
5188void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
5189 SDLoc dl = getCurSDLoc();
5190 AtomicOrdering Order = I.getOrdering();
5191 SyncScope::ID SSID = I.getSyncScopeID();
5192
5193 SDValue InChain = getRoot();
5194
5196 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5197 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
5198
5199 if (!TLI.supportsUnalignedAtomics() &&
5200 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5201 report_fatal_error("Cannot generate unaligned atomic load");
5202
5204
5206 MachinePointerInfo(I.getPointerOperand()), Flags,
5207 LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5208 nullptr, SSID, Order);
5209
5210 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
5211
5212 SDValue Ptr = getValue(I.getPointerOperand());
5213 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
5214 Ptr, MMO);
5215
5216 SDValue OutChain = L.getValue(1);
5217 if (MemVT != VT)
5218 L = DAG.getPtrExtOrTrunc(L, dl, VT);
5219
5220 setValue(&I, L);
5221 DAG.setRoot(OutChain);
5222}
5223
5224void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
5225 SDLoc dl = getCurSDLoc();
5226
5227 AtomicOrdering Ordering = I.getOrdering();
5228 SyncScope::ID SSID = I.getSyncScopeID();
5229
5230 SDValue InChain = getRoot();
5231
5233 EVT MemVT =
5234 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5235
5236 if (!TLI.supportsUnalignedAtomics() &&
5237 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5238 report_fatal_error("Cannot generate unaligned atomic store");
5239
5241
5244 MachinePointerInfo(I.getPointerOperand()), Flags,
5245 LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5246 nullptr, SSID, Ordering);
5247
5248 SDValue Val = getValue(I.getValueOperand());
5249 if (Val.getValueType() != MemVT)
5250 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5251 SDValue Ptr = getValue(I.getPointerOperand());
5252
5253 SDValue OutChain =
5254 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5255
5256 setValue(&I, OutChain);
5257 DAG.setRoot(OutChain);
5258}
5259
5260/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5261/// node.
5262void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5263 unsigned Intrinsic) {
5264 // Ignore the callsite's attributes. A specific call site may be marked with
5265 // readnone, but the lowering code will expect the chain based on the
5266 // definition.
5267 const Function *F = I.getCalledFunction();
5268 bool HasChain = !F->doesNotAccessMemory();
5269 bool OnlyLoad =
5270 HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();
5271
5272 // Build the operand list.
5274 if (HasChain) { // If this intrinsic has side-effects, chainify it.
5275 if (OnlyLoad) {
5276 // We don't need to serialize loads against other loads.
5277 Ops.push_back(DAG.getRoot());
5278 } else {
5279 Ops.push_back(getRoot());
5280 }
5281 }
5282
5283 // Info is set by getTgtMemIntrinsic
5286 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5288 Intrinsic);
5289
5290 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5291 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5293 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5295
5296 // Add all operands of the call to the operand list.
5297 for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5298 const Value *Arg = I.getArgOperand(i);
5299 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5300 Ops.push_back(getValue(Arg));
5301 continue;
5302 }
5303
5304 // Use TargetConstant instead of a regular constant for immarg.
5305 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5306 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5307 assert(CI->getBitWidth() <= 64 &&
5308 "large intrinsic immediates not handled");
5309 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5310 } else {
5311 Ops.push_back(
5312 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5313 }
5314 }
5315
5316 SmallVector<EVT, 4> ValueVTs;
5317 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5318
5319 if (HasChain)
5320 ValueVTs.push_back(MVT::Other);
5321
5322 SDVTList VTs = DAG.getVTList(ValueVTs);
5323
5324 // Propagate fast-math-flags from IR to node(s).
5326 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5327 Flags.copyFMF(*FPMO);
5328 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5329
5330 // Create the node.
5332
5333 if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
5334 auto *Token = Bundle->Inputs[0].get();
5335 SDValue ConvControlToken = getValue(Token);
5336 assert(Ops.back().getValueType() != MVT::Glue &&
5337 "Did not expected another glue node here.");
5338 ConvControlToken =
5339 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5340 Ops.push_back(ConvControlToken);
5341 }
5342
5343 // In some cases, custom collection of operands from CallInst I may be needed.
5345 if (IsTgtIntrinsic) {
5346 // This is target intrinsic that touches memory
5347 //
5348 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5349 // didn't yield anything useful.
5351 if (Info.ptrVal)
5352 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5353 else if (Info.fallbackAddressSpace)
5354 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5355 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5356 Info.memVT, MPI, Info.align, Info.flags,
5357 Info.size, I.getAAMetadata());
5358 } else if (!HasChain) {
5360 } else if (!I.getType()->isVoidTy()) {
5362 } else {
5364 }
5365
5366 if (HasChain) {
5367 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5368 if (OnlyLoad)
5369 PendingLoads.push_back(Chain);
5370 else
5371 DAG.setRoot(Chain);
5372 }
5373
5374 if (!I.getType()->isVoidTy()) {
5375 if (!isa<VectorType>(I.getType()))
5376 Result = lowerRangeToAssertZExt(DAG, I, Result);
5377
5378 MaybeAlign Alignment = I.getRetAlign();
5379
5380 // Insert `assertalign` node if there's an alignment.
5381 if (InsertAssertAlign && Alignment) {
5382 Result =
5383 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5384 }
5385 }
5386
5387 setValue(&I, Result);
5388}
5389
5390/// GetSignificand - Get the significand and build it into a floating-point
5391/// number with exponent of 1:
5392///
5393/// Op = (Op & 0x007fffff) | 0x3f800000;
5394///
5395/// where Op is the hexadecimal representation of floating point value.
5397 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5398 DAG.getConstant(0x007fffff, dl, MVT::i32));
5399 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5400 DAG.getConstant(0x3f800000, dl, MVT::i32));
5401 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5402}
5403
5404/// GetExponent - Get the exponent:
5405///
5406/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5407///
5408/// where Op is the hexadecimal representation of floating point value.
5410 const TargetLowering &TLI, const SDLoc &dl) {
5411 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5412 DAG.getConstant(0x7f800000, dl, MVT::i32));
5413 SDValue t1 = DAG.getNode(
5414 ISD::SRL, dl, MVT::i32, t0,
5415 DAG.getConstant(23, dl,
5416 TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5417 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5418 DAG.getConstant(127, dl, MVT::i32));
5419 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5420}
5421
5422/// getF32Constant - Get 32-bit floating point constant.
5424 const SDLoc &dl) {
5425 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5426 MVT::f32);
5427}
5428
5430 SelectionDAG &DAG) {
5431 // TODO: What fast-math-flags should be set on the floating-point nodes?
5432
5433 // IntegerPartOfX = ((int32_t)(t0);
5434 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5435
5436 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
5437 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5438 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5439
5440 // IntegerPartOfX <<= 23;
5441 IntegerPartOfX =
5442 DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5443 DAG.getConstant(23, dl,
5445 MVT::i32, DAG.getDataLayout())));
5446
5447 SDValue TwoToFractionalPartOfX;
5448 if (LimitFloatPrecision <= 6) {
5449 // For floating-point precision of 6:
5450 //
5451 // TwoToFractionalPartOfX =
5452 // 0.997535578f +
5453 // (0.735607626f + 0.252464424f * x) * x;
5454 //
5455 // error 0.0144103317, which is 6 bits
5456 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5457 getF32Constant(DAG, 0x3e814304, dl));
5458 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5459 getF32Constant(DAG, 0x3f3c50c8, dl));
5460 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5461 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5462 getF32Constant(DAG, 0x3f7f5e7e, dl));
5463 } else if (LimitFloatPrecision <= 12) {
5464 // For floating-point precision of 12:
5465 //
5466 // TwoToFractionalPartOfX =
5467 // 0.999892986f +
5468 // (0.696457318f +
5469 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5470 //
5471 // error 0.000107046256, which is 13 to 14 bits
5472 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5473 getF32Constant(DAG, 0x3da235e3, dl));
5474 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5475 getF32Constant(DAG, 0x3e65b8f3, dl));
5476 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5477 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5478 getF32Constant(DAG, 0x3f324b07, dl));
5479 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5480 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5481 getF32Constant(DAG, 0x3f7ff8fd, dl));
5482 } else { // LimitFloatPrecision <= 18
5483 // For floating-point precision of 18:
5484 //
5485 // TwoToFractionalPartOfX =
5486 // 0.999999982f +
5487 // (0.693148872f +
5488 // (0.240227044f +
5489 // (0.554906021e-1f +
5490 // (0.961591928e-2f +
5491 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5492 // error 2.47208000*10^(-7), which is better than 18 bits
5493 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5494 getF32Constant(DAG, 0x3924b03e, dl));
5495 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5496 getF32Constant(DAG, 0x3ab24b87, dl));
5497 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5498 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5499 getF32Constant(DAG, 0x3c1d8c17, dl));
5500 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5501 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5502 getF32Constant(DAG, 0x3d634a1d, dl));
5503 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5504 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5505 getF32Constant(DAG, 0x3e75fe14, dl));
5506 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5507 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5508 getF32Constant(DAG, 0x3f317234, dl));
5509 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5510 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5511 getF32Constant(DAG, 0x3f800000, dl));
5512 }
5513
5514 // Add the exponent into the result in integer domain.
5515 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5516 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5517 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5518}
5519
5520/// expandExp - Lower an exp intrinsic. Handles the special sequences for
5521/// limited-precision mode.
5523 const TargetLowering &TLI, SDNodeFlags Flags) {
5524 if (Op.getValueType() == MVT::f32 &&
5526
5527 // Put the exponent in the right bit position for later addition to the
5528 // final result:
5529 //
5530 // t0 = Op * log2(e)
5531
5532 // TODO: What fast-math-flags should be set here?
5533 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5534 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5535 return getLimitedPrecisionExp2(t0, dl, DAG);
5536 }
5537
5538 // No special expansion.
5539 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5540}
5541
5542/// expandLog - Lower a log intrinsic. Handles the special sequences for
5543/// limited-precision mode.
5545 const TargetLowering &TLI, SDNodeFlags Flags) {
5546 // TODO: What fast-math-flags should be set on the floating-point nodes?
5547
5548 if (Op.getValueType() == MVT::f32 &&
5550 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5551
5552 // Scale the exponent by log(2).
5553 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5554 SDValue LogOfExponent =
5555 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5556 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5557
5558 // Get the significand and build it into a floating-point number with
5559 // exponent of 1.
5560 SDValue X = GetSignificand(DAG, Op1, dl);
5561
5562 SDValue LogOfMantissa;
5563 if (LimitFloatPrecision <= 6) {
5564 // For floating-point precision of 6:
5565 //
5566 // LogofMantissa =
5567 // -1.1609546f +
5568 // (1.4034025f - 0.23903021f * x) * x;
5569 //
5570 // error 0.0034276066, which is better than 8 bits
5571 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5572 getF32Constant(DAG, 0xbe74c456, dl));
5573 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5574 getF32Constant(DAG, 0x3fb3a2b1, dl));
5575 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5576 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5577 getF32Constant(DAG, 0x3f949a29, dl));
5578 } else if (LimitFloatPrecision <= 12) {
5579 // For floating-point precision of 12:
5580 //
5581 // LogOfMantissa =
5582 // -1.7417939f +
5583 // (2.8212026f +
5584 // (-1.4699568f +
5585 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5586 //
5587 // error 0.000061011436, which is 14 bits
5588 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5589 getF32Constant(DAG, 0xbd67b6d6, dl));
5590 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5591 getF32Constant(DAG, 0x3ee4f4b8, dl));
5592 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5593 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5594 getF32Constant(DAG, 0x3fbc278b, dl));
5595 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5596 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5597 getF32Constant(DAG, 0x40348e95, dl));
5598 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5599 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5600 getF32Constant(DAG, 0x3fdef31a, dl));
5601 } else { // LimitFloatPrecision <= 18
5602 // For floating-point precision of 18:
5603 //
5604 // LogOfMantissa =
5605 // -2.1072184f +
5606 // (4.2372794f +
5607 // (-3.7029485f +
5608 // (2.2781945f +
5609 // (-0.87823314f +
5610 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5611 //
5612 // error 0.0000023660568, which is better than 18 bits
5613 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5614 getF32Constant(DAG, 0xbc91e5ac, dl));
5615 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5616 getF32Constant(DAG, 0x3e4350aa, dl));
5617 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5618 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5619 getF32Constant(DAG, 0x3f60d3e3, dl));
5620 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5621 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5622 getF32Constant(DAG, 0x4011cdf0, dl));
5623 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5624 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5625 getF32Constant(DAG, 0x406cfd1c, dl));
5626 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5627 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5628 getF32Constant(DAG, 0x408797cb, dl));
5629 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5630 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5631 getF32Constant(DAG, 0x4006dcab, dl));
5632 }
5633
5634 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5635 }
5636
5637 // No special expansion.
5638 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5639}
5640
5641/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5642/// limited-precision mode.
5644 const TargetLowering &TLI, SDNodeFlags Flags) {
5645 // TODO: What fast-math-flags should be set on the floating-point nodes?
5646
5647 if (Op.getValueType() == MVT::f32 &&
5649 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5650
5651 // Get the exponent.
5652 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5653
5654 // Get the significand and build it into a floating-point number with
5655 // exponent of 1.
5656 SDValue X = GetSignificand(DAG, Op1, dl);
5657
5658 // Different possible minimax approximations of significand in
5659 // floating-point for various degrees of accuracy over [1,2].
5660 SDValue Log2ofMantissa;
5661 if (LimitFloatPrecision <= 6) {
5662 // For floating-point precision of 6:
5663 //
5664 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5665 //
5666 // error 0.0049451742, which is more than 7 bits
5667 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5668 getF32Constant(DAG, 0xbeb08fe0, dl));
5669 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5670 getF32Constant(DAG, 0x40019463, dl));
5671 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5672 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5673 getF32Constant(DAG, 0x3fd6633d, dl));
5674 } else if (LimitFloatPrecision <= 12) {
5675 // For floating-point precision of 12:
5676 //
5677 // Log2ofMantissa =
5678 // -2.51285454f +
5679 // (4.07009056f +
5680 // (-2.12067489f +
5681 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5682 //
5683 // error 0.0000876136000, which is better than 13 bits
5684 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5685 getF32Constant(DAG, 0xbda7262e, dl));
5686 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5687 getF32Constant(DAG, 0x3f25280b, dl));
5688 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5689 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5690 getF32Constant(DAG, 0x4007b923, dl));
5691 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5692 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5693 getF32Constant(DAG, 0x40823e2f, dl));
5694 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5695 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5696 getF32Constant(DAG, 0x4020d29c, dl));
5697 } else { // LimitFloatPrecision <= 18
5698 // For floating-point precision of 18:
5699 //
5700 // Log2ofMantissa =
5701 // -3.0400495f +
5702 // (6.1129976f +
5703 // (-5.3420409f +
5704 // (3.2865683f +
5705 // (-1.2669343f +
5706 // (0.27515199f -
5707 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5708 //
5709 // error 0.0000018516, which is better than 18 bits
5710 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5711 getF32Constant(DAG, 0xbcd2769e, dl));
5712 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5713 getF32Constant(DAG, 0x3e8ce0b9, dl));
5714 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5715 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5716 getF32Constant(DAG, 0x3fa22ae7, dl));
5717 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5718 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5719 getF32Constant(DAG, 0x40525723, dl));
5720 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5721 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5722 getF32Constant(DAG, 0x40aaf200, dl));
5723 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5724 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5725 getF32Constant(DAG, 0x40c39dad, dl));
5726 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5727 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5728 getF32Constant(DAG, 0x4042902c, dl));
5729 }
5730
5731 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5732 }
5733
5734 // No special expansion.
5735 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5736}
5737
5738/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5739/// limited-precision mode.
5741 const TargetLowering &TLI, SDNodeFlags Flags) {
5742 // TODO: What fast-math-flags should be set on the floating-point nodes?
5743
5744 if (Op.getValueType() == MVT::f32 &&
5746 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5747
5748 // Scale the exponent by log10(2) [0.30102999f].
5749 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5750 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5751 getF32Constant(DAG, 0x3e9a209a, dl));
5752
5753 // Get the significand and build it into a floating-point number with
5754 // exponent of 1.
5755 SDValue X = GetSignificand(DAG, Op1, dl);
5756
5757 SDValue Log10ofMantissa;
5758 if (LimitFloatPrecision <= 6) {
5759 // For floating-point precision of 6:
5760 //
5761 // Log10ofMantissa =
5762 // -0.50419619f +
5763 // (0.60948995f - 0.10380950f * x) * x;
5764 //
5765 // error 0.0014886165, which is 6 bits
5766 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5767 getF32Constant(DAG, 0xbdd49a13, dl));
5768 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5769 getF32Constant(DAG, 0x3f1c0789, dl));
5770 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5771 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5772 getF32Constant(DAG, 0x3f011300, dl));
5773 } else if (LimitFloatPrecision <= 12) {
5774 // For floating-point precision of 12:
5775 //
5776 // Log10ofMantissa =
5777 // -0.64831180f +
5778 // (0.91751397f +
5779 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5780 //
5781 // error 0.00019228036, which is better than 12 bits
5782 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5783 getF32Constant(DAG, 0x3d431f31, dl));
5784 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5785 getF32Constant(DAG, 0x3ea21fb2, dl));
5786 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5787 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5788 getF32Constant(DAG, 0x3f6ae232, dl));
5789 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5790 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5791 getF32Constant(DAG, 0x3f25f7c3, dl));
5792 } else { // LimitFloatPrecision <= 18
5793 // For floating-point precision of 18:
5794 //
5795 // Log10ofMantissa =
5796 // -0.84299375f +
5797 // (1.5327582f +
5798 // (-1.0688956f +
5799 // (0.49102474f +
5800 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5801 //
5802 // error 0.0000037995730, which is better than 18 bits
5803 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5804 getF32Constant(DAG, 0x3c5d51ce, dl));
5805 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5806 getF32Constant(DAG, 0x3e00685a, dl));
5807 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5808 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5809 getF32Constant(DAG, 0x3efb6798, dl));
5810 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5811 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5812 getF32Constant(DAG, 0x3f88d192, dl));
5813 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5814 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5815 getF32Constant(DAG, 0x3fc4316c, dl));
5816 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5817 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5818 getF32Constant(DAG, 0x3f57ce70, dl));
5819 }
5820
5821 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5822 }
5823
5824 // No special expansion.
5825 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5826}
5827
5828/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5829/// limited-precision mode.
5831 const TargetLowering &TLI, SDNodeFlags Flags) {
5832 if (Op.getValueType() == MVT::f32 &&
5834 return getLimitedPrecisionExp2(Op, dl, DAG);
5835
5836 // No special expansion.
5837 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5838}
5839
5840/// visitPow - Lower a pow intrinsic. Handles the special sequences for
5841/// limited-precision mode with x == 10.0f.
5842static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5843 SelectionDAG &DAG, const TargetLowering &TLI,
5844 SDNodeFlags Flags) {
5845 bool IsExp10 = false;
5846 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5848 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5849 APFloat Ten(10.0f);
5850 IsExp10 = LHSC->isExactlyValue(Ten);
5851 }
5852 }
5853
5854 // TODO: What fast-math-flags should be set on the FMUL node?
5855 if (IsExp10) {
5856 // Put the exponent in the right bit position for later addition to the
5857 // final result:
5858 //
5859 // #define LOG2OF10 3.3219281f
5860 // t0 = Op * LOG2OF10;
5861 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5862 getF32Constant(DAG, 0x40549a78, dl));
5863 return getLimitedPrecisionExp2(t0, dl, DAG);
5864 }
5865
5866 // No special expansion.
5867 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5868}
5869
5870/// ExpandPowI - Expand a llvm.powi intrinsic.
5871static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5872 SelectionDAG &DAG) {
5873 // If RHS is a constant, we can expand this out to a multiplication tree if
5874 // it's beneficial on the target, otherwise we end up lowering to a call to
5875 // __powidf2 (for example).
5876 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5877 unsigned Val = RHSC->getSExtValue();
5878
5879 // powi(x, 0) -> 1.0
5880 if (Val == 0)
5881 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5882
5884 Val, DAG.shouldOptForSize())) {
5885 // Get the exponent as a positive value.
5886 if ((int)Val < 0)
5887 Val = -Val;
5888 // We use the simple binary decomposition method to generate the multiply
5889 // sequence. There are more optimal ways to do this (for example,
5890 // powi(x,15) generates one more multiply than it should), but this has
5891 // the benefit of being both really simple and much better than a libcall.
5892 SDValue Res; // Logically starts equal to 1.0
5893 SDValue CurSquare = LHS;
5894 // TODO: Intrinsics should have fast-math-flags that propagate to these
5895 // nodes.
5896 while (Val) {
5897 if (Val & 1) {
5898 if (Res.getNode())
5899 Res =
5900 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5901 else
5902 Res = CurSquare; // 1.0*CurSquare.
5903 }
5904
5905 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5906 CurSquare, CurSquare);
5907 Val >>= 1;
5908 }
5909
5910 // If the original was negative, invert the result, producing 1/(x*x*x).
5911 if (RHSC->getSExtValue() < 0)
5912 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5913 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5914 return Res;
5915 }
5916 }
5917
5918 // Otherwise, expand to a libcall.
5919 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5920}
5921
5922static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5923 SDValue LHS, SDValue RHS, SDValue Scale,
5924 SelectionDAG &DAG, const TargetLowering &TLI) {
5925 EVT VT = LHS.getValueType();
5926 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5927 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5928 LLVMContext &Ctx = *DAG.getContext();
5929
5930 // If the type is legal but the operation isn't, this node might survive all
5931 // the way to operation legalization. If we end up there and we do not have
5932 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5933 // node.
5934
5935 // Coax the legalizer into expanding the node during type legalization instead
5936 // by bumping the size by one bit. This will force it to Promote, enabling the
5937 // early expansion and avoiding the need to expand later.
5938
5939 // We don't have to do this if Scale is 0; that can always be expanded, unless
5940 // it's a saturating signed operation. Those can experience true integer
5941 // division overflow, a case which we must avoid.
5942
5943 // FIXME: We wouldn't have to do this (or any of the early
5944 // expansion/promotion) if it was possible to expand a libcall of an
5945 // illegal type during operation legalization. But it's not, so things
5946 // get a bit hacky.
5947 unsigned ScaleInt = Scale->getAsZExtVal();
5948 if ((ScaleInt > 0 || (Saturating && Signed)) &&
5949 (TLI.isTypeLegal(VT) ||
5950 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5952 Opcode, VT, ScaleInt);
5953 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5954 EVT PromVT;
5955 if (VT.isScalarInteger())
5956 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5957 else if (VT.isVector()) {
5958 PromVT = VT.getVectorElementType();
5959 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5960 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5961 } else
5962 llvm_unreachable("Wrong VT for DIVFIX?");
5963 LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5964 RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5965 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5966 // For saturating operations, we need to shift up the LHS to get the
5967 // proper saturation width, and then shift down again afterwards.
5968 if (Saturating)
5969 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5970 DAG.getConstant(1, DL, ShiftTy));
5971 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5972 if (Saturating)
5973 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5974 DAG.getConstant(1, DL, ShiftTy));
5975 return DAG.getZExtOrTrunc(Res, DL, VT);
5976 }
5977 }
5978
5979 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5980}
5981
5982// getUnderlyingArgRegs - Find underlying registers used for a truncated,
5983// bitcasted, or split argument. Returns a list of <Register, size in bits>
5984static void
5985getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs,
5986 const SDValue &N) {
5987 switch (N.getOpcode()) {
5988 case ISD::CopyFromReg: {
5989 SDValue Op = N.getOperand(1);
5990 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5991 Op.getValueType().getSizeInBits());
5992 return;
5993 }
5994 case ISD::BITCAST:
5995 case ISD::AssertZext:
5996 case ISD::AssertSext:
5997 case ISD::TRUNCATE:
5998 getUnderlyingArgRegs(Regs, N.getOperand(0));
5999 return;
6000 case ISD::BUILD_PAIR:
6001 case ISD::BUILD_VECTOR:
6003 for (SDValue Op : N->op_values())
6004 getUnderlyingArgRegs(Regs, Op);
6005 return;
6006 default:
6007 return;
6008 }
6009}
6010
6011/// If the DbgValueInst is a dbg_value of a function argument, create the
6012/// corresponding DBG_VALUE machine instruction for it now. At the end of
6013/// instruction selection, they will be inserted to the entry BB.
6014/// We don't currently support this for variadic dbg_values, as they shouldn't
6015/// appear for function arguments or in the prologue.
6016bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6017 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
6018 DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
6019 const Argument *Arg = dyn_cast<Argument>(V);
6020 if (!Arg)
6021 return false;
6022
6025
6026 // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
6027 // we've been asked to pursue.
6028 auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
6029 bool Indirect) {
6030 if (Reg.isVirtual() && MF.useDebugInstrRef()) {
6031 // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
6032 // pointing at the VReg, which will be patched up later.
6033 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6035 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
6036 /* isKill */ false, /* isDead */ false,
6037 /* isUndef */ false, /* isEarlyClobber */ false,
6038 /* SubReg */ 0, /* isDebug */ true)});
6039
6040 auto *NewDIExpr = FragExpr;
6041 // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
6042 // the DIExpression.
6043 if (Indirect)
6044 NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
6046 NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
6047 return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
6048 } else {
6049 // Create a completely standard DBG_VALUE.
6050 auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6051 return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
6052 }
6053 };
6054
6055 if (Kind == FuncArgumentDbgValueKind::Value) {
6056 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6057 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
6058 // the entry block.
6059 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6060 if (!IsInEntryBlock)
6061 return false;
6062
6063 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6064 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
6065 // variable that also is a param.
6066 //
6067 // Although, if we are at the top of the entry block already, we can still
6068 // emit using ArgDbgValue. This might catch some situations when the
6069 // dbg.value refers to an argument that isn't used in the entry block, so
6070 // any CopyToReg node would be optimized out and the only way to express
6071 // this DBG_VALUE is by using the physical reg (or FI) as done in this
6072 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
6073 // we should only emit as ArgDbgValue if the Variable is an argument to the
6074 // current function, and the dbg.value intrinsic is found in the entry
6075 // block.
6076 bool VariableIsFunctionInputArg = Variable->isParameter() &&
6077 !DL->getInlinedAt();
6078 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
6079 if (!IsInPrologue && !VariableIsFunctionInputArg)
6080 return false;
6081
6082 // Here we assume that a function argument on IR level only can be used to
6083 // describe one input parameter on source level. If we for example have
6084 // source code like this
6085 //
6086 // struct A { long x, y; };
6087 // void foo(struct A a, long b) {
6088 // ...
6089 // b = a.x;
6090 // ...
6091 // }
6092 //
6093 // and IR like this
6094 //
6095 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
6096 // entry:
6097 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
6098 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
6099 // call void @llvm.dbg.value(metadata i32 %b, "b",
6100 // ...
6101 // call void @llvm.dbg.value(metadata i32 %a1, "b"
6102 // ...
6103 //
6104 // then the last dbg.value is describing a parameter "b" using a value that
6105 // is an argument. But since we already has used %a1 to describe a parameter
6106 // we should not handle that last dbg.value here (that would result in an
6107 // incorrect hoisting of the DBG_VALUE to the function entry).
6108 // Notice that we allow one dbg.value per IR level argument, to accommodate
6109 // for the situation with fragments above.
6110 // If there is no node for the value being handled, we return true to skip
6111 // the normal generation of debug info, as it would kill existing debug
6112 // info for the parameter in case of duplicates.
6113 if (VariableIsFunctionInputArg) {
6114 unsigned ArgNo = Arg->getArgNo();
6115 if (ArgNo >= FuncInfo.DescribedArgs.size())
6116 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
6117 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6118 return !NodeMap[V].getNode();
6119 FuncInfo.DescribedArgs.set(ArgNo);
6120 }
6121 }
6122
6123 bool IsIndirect = false;
6124 std::optional<MachineOperand> Op;
6125 // Some arguments' frame index is recorded during argument lowering.
6126 int FI = FuncInfo.getArgumentFrameIndex(Arg);
6127 if (FI != std::numeric_limits<int>::max())
6129
6131 if (!Op && N.getNode()) {
6132 getUnderlyingArgRegs(ArgRegsAndSizes, N);
6133 Register Reg;
6134 if (ArgRegsAndSizes.size() == 1)
6135 Reg = ArgRegsAndSizes.front().first;
6136
6137 if (Reg && Reg.isVirtual()) {
6139 Register PR = RegInfo.getLiveInPhysReg(Reg);
6140 if (PR)
6141 Reg = PR;
6142 }
6143 if (Reg) {
6144 Op = MachineOperand::CreateReg(Reg, false);
6145 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6146 }
6147 }
6148
6149 if (!Op && N.getNode()) {
6150 // Check if frame index is available.
6151 SDValue LCandidate = peekThroughBitcasts(N);
6152 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
6153 if (FrameIndexSDNode *FINode =
6154 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6155 Op = MachineOperand::CreateFI(FINode->getIndex());
6156 }
6157
6158 if (!Op) {
6159 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
6160 auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>>
6161 SplitRegs) {
6162 unsigned Offset = 0;
6163 for (const auto &RegAndSize : SplitRegs) {
6164 // If the expression is already a fragment, the current register
6165 // offset+size might extend beyond the fragment. In this case, only
6166 // the register bits that are inside the fragment are relevant.
6167 int RegFragmentSizeInBits = RegAndSize.second;
6168 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6169 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6170 // The register is entirely outside the expression fragment,
6171 // so is irrelevant for debug info.
6172 if (Offset >= ExprFragmentSizeInBits)
6173 break;
6174 // The register is partially outside the expression fragment, only
6175 // the low bits within the fragment are relevant for debug info.
6176 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6177 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6178 }
6179 }
6180
6181 auto FragmentExpr = DIExpression::createFragmentExpression(
6182 Expr, Offset, RegFragmentSizeInBits);
6183 Offset += RegAndSize.second;
6184 // If a valid fragment expression cannot be created, the variable's
6185 // correct value cannot be determined and so it is set as Undef.
6186 if (!FragmentExpr) {
6188 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
6189 DAG.AddDbgValue(SDV, false);
6190 continue;
6191 }
6192 MachineInstr *NewMI =
6193 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6194 Kind != FuncArgumentDbgValueKind::Value);
6195 FuncInfo.ArgDbgValues.push_back(NewMI);
6196 }
6197 };
6198
6199 // Check if ValueMap has reg number.
6201 VMI = FuncInfo.ValueMap.find(V);
6202 if (VMI != FuncInfo.ValueMap.end()) {
6203 const auto &TLI = DAG.getTargetLoweringInfo();
6204 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6205 V->getType(), std::nullopt);
6206 if (RFV.occupiesMultipleRegs()) {
6207 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6208 return true;
6209 }
6210
6211 Op = MachineOperand::CreateReg(VMI->second, false);
6212 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6213 } else if (ArgRegsAndSizes.size() > 1) {
6214 // This was split due to the calling convention, and no virtual register
6215 // mapping exists for the value.
6216 splitMultiRegDbgValue(ArgRegsAndSizes);
6217 return true;
6218 }
6219 }
6220
6221 if (!Op)
6222 return false;
6223
6225 "Expected inlined-at fields to agree");
6226 MachineInstr *NewMI = nullptr;
6227
6228 if (Op->isReg())
6229 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6230 else
6231 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6232 Variable, Expr);
6233
6234 // Otherwise, use ArgDbgValues.
6235 FuncInfo.ArgDbgValues.push_back(NewMI);
6236 return true;
6237}
6238
6239/// Return the appropriate SDDbgValue based on N.
6240SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
6241 DILocalVariable *Variable,
6242 DIExpression *Expr,
6243 const DebugLoc &dl,
6244 unsigned DbgSDNodeOrder) {
6245 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
6246 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
6247 // stack slot locations.
6248 //
6249 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
6250 // debug values here after optimization:
6251 //
6252 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
6253 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
6254 //
6255 // Both describe the direct values of their associated variables.
6256 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6257 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6258 }
6259 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
6260 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6261}
6262
6263static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
6264 switch (Intrinsic) {
6265 case Intrinsic::smul_fix:
6266 return ISD::SMULFIX;
6267 case Intrinsic::umul_fix:
6268 return ISD::UMULFIX;
6269 case Intrinsic::smul_fix_sat:
6270 return ISD::SMULFIXSAT;
6271 case Intrinsic::umul_fix_sat:
6272 return ISD::UMULFIXSAT;
6273 case Intrinsic::sdiv_fix:
6274 return ISD::SDIVFIX;
6275 case Intrinsic::udiv_fix:
6276 return ISD::UDIVFIX;
6277 case Intrinsic::sdiv_fix_sat:
6278 return ISD::SDIVFIXSAT;
6279 case Intrinsic::udiv_fix_sat:
6280 return ISD::UDIVFIXSAT;
6281 default:
6282 llvm_unreachable("Unhandled fixed point intrinsic");
6283 }
6284}
6285
6286void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
6287 const char *FunctionName) {
6288 assert(FunctionName && "FunctionName must not be nullptr");
6290 FunctionName,
6292 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
6293}
6294
6295/// Given a @llvm.call.preallocated.setup, return the corresponding
6296/// preallocated call.
6297static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6298 assert(cast<CallBase>(PreallocatedSetup)
6300 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6301 "expected call_preallocated_setup Value");
6302 for (const auto *U : PreallocatedSetup->users()) {
6303 auto *UseCall = cast<CallBase>(U);
6304 const Function *Fn = UseCall->getCalledFunction();
6305 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6306 return UseCall;
6307 }
6308 }
6309 llvm_unreachable("expected corresponding call to preallocated setup/arg");
6310}
6311
6312/// If DI is a debug value with an EntryValue expression, lower it using the
6313/// corresponding physical register of the associated Argument value
6314/// (guaranteed to exist by the verifier).
6315bool SelectionDAGBuilder::visitEntryValueDbgValue(
6316 ArrayRef<const Value *> Values, DILocalVariable *Variable,
6317 DIExpression *Expr, DebugLoc DbgLoc) {
6318 if (!Expr->isEntryValue() || !hasSingleElement(Values))
6319 return false;
6320
6321 // These properties are guaranteed by the verifier.
6322 const Argument *Arg = cast<Argument>(Values[0]);
6323 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6324
6325 auto ArgIt = FuncInfo.ValueMap.find(Arg);
6326 if (ArgIt == FuncInfo.ValueMap.end()) {
6327 LLVM_DEBUG(
6328 dbgs() << "Dropping dbg.value: expression is entry_value but "
6329 "couldn't find an associated register for the Argument\n");
6330 return true;
6331 }
6332 Register ArgVReg = ArgIt->getSecond();
6333
6334 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6335 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6337 Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6338 DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6339 return true;
6340 }
6341 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6342 "couldn't find a physical register\n");
6343 return true;
6344}
6345
6346/// Lower the call to the specified intrinsic function.
6347void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
6348 unsigned Intrinsic) {
6349 SDLoc sdl = getCurSDLoc();
6350 switch (Intrinsic) {
6351 case Intrinsic::experimental_convergence_anchor:
6352 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6353 break;
6354 case Intrinsic::experimental_convergence_entry:
6355 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6356 break;
6357 case Intrinsic::experimental_convergence_loop: {
6358 auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
6359 auto *Token = Bundle->Inputs[0].get();
6360 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6361 getValue(Token)));
6362 break;
6363 }
6364 }
6365}
6366
6367void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
6368 unsigned IntrinsicID) {
6369 // For now, we're only lowering an 'add' histogram.
6370 // We can add others later, e.g. saturating adds, min/max.
6371 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6372 "Tried to lower unsupported histogram type");
6373 SDLoc sdl = getCurSDLoc();
6374 Value *Ptr = I.getOperand(0);
6375 SDValue Inc = getValue(I.getOperand(1));
6376 SDValue Mask = getValue(I.getOperand(2));
6377
6379 DataLayout TargetDL = DAG.getDataLayout();
6380 EVT VT = Inc.getValueType();
6381 Align Alignment = DAG.getEVTAlign(VT);
6382
6383 const MDNode *Ranges = getRangeMetadata(I);
6384
6385 SDValue Root = DAG.getRoot();
6386 SDValue Base;
6387 SDValue Index;
6388 ISD::MemIndexType IndexType;
6389 SDValue Scale;
6390 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
6391 I.getParent(), VT.getScalarStoreSize());
6392
6393 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6394
6398 MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
6399
6400 if (!UniformBase) {
6402 Index = getValue(Ptr);
6403 IndexType = ISD::SIGNED_SCALED;
6404 Scale =
6406 }
6407
6408 EVT IdxVT = Index.getValueType();
6409 EVT EltTy = IdxVT.getVectorElementType();
6410 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
6411 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
6412 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
6413 }
6414
6415 SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6416
6417 SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID};
6418 SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl,
6419 Ops, MMO, IndexType);
6420
6421 setValue(&I, Histogram);
6422 DAG.setRoot(Histogram);
6423}
6424
6425void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
6426 unsigned Intrinsic) {
6427 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6428 "Tried lowering invalid vector extract last");
6429 SDLoc sdl = getCurSDLoc();
6430 SDValue Data = getValue(I.getOperand(0));
6431 SDValue Mask = getValue(I.getOperand(1));
6432 SDValue PassThru = getValue(I.getOperand(2));
6433
6434 EVT DataVT = Data.getValueType();
6435 EVT ScalarVT = PassThru.getValueType();
6436 EVT BoolVT = Mask.getValueType().getScalarType();
6437
6438 // Find a suitable type for a stepvector.
6439 ConstantRange VScaleRange(1, /*isFullSet=*/true); // Dummy value.
6440 if (DataVT.isScalableVector())
6441 VScaleRange = getVScaleRange(I.getCaller(), 64);
6443 unsigned EltWidth = TLI.getBitWidthForCttzElements(
6444 I.getType(), DataVT.getVectorElementCount(), /*ZeroIsPoison=*/true,
6445 &VScaleRange);
6446 MVT StepVT = MVT::getIntegerVT(EltWidth);
6447 EVT StepVecVT = DataVT.changeVectorElementType(StepVT);
6448
6449 // Zero out lanes with inactive elements, then find the highest remaining
6450 // value from the stepvector.
6451 SDValue Zeroes = DAG.getConstant(0, sdl, StepVecVT);
6452 SDValue StepVec = DAG.getStepVector(sdl, StepVecVT);
6453 SDValue ActiveElts = DAG.getSelect(sdl, StepVecVT, Mask, StepVec, Zeroes);
6454 SDValue HighestIdx =
6455 DAG.getNode(ISD::VECREDUCE_UMAX, sdl, StepVT, ActiveElts);
6456
6457 // Extract the corresponding lane from the data vector
6458 EVT ExtVT = TLI.getVectorIdxTy(DAG.getDataLayout());
6459 SDValue Idx = DAG.getZExtOrTrunc(HighestIdx, sdl, ExtVT);
6460 SDValue Extract =
6461 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl, ScalarVT, Data, Idx);
6462
6463 // If all mask lanes were inactive, choose the passthru value instead.
6464 SDValue AnyActive = DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6465 SDValue Result = DAG.getSelect(sdl, ScalarVT, AnyActive, Extract, PassThru);
6466 setValue(&I, Result);
6467}
6468
6469/// Lower the call to the specified intrinsic function.
6470void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6471 unsigned Intrinsic) {
6473 SDLoc sdl = getCurSDLoc();
6474 DebugLoc dl = getCurDebugLoc();
6475 SDValue Res;
6476
6478 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6479 Flags.copyFMF(*FPOp);
6480
6481 switch (Intrinsic) {
6482 default:
6483 // By default, turn this into a target intrinsic node.
6484 visitTargetIntrinsic(I, Intrinsic);
6485 return;
6486 case Intrinsic::vscale: {
6487 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6488 setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6489 return;
6490 }
6491 case Intrinsic::vastart: visitVAStart(I); return;
6492 case Intrinsic::vaend: visitVAEnd(I); return;
6493 case Intrinsic::vacopy: visitVACopy(I); return;
6494 case Intrinsic::returnaddress:
6496 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6497 getValue(I.getArgOperand(0))));
6498 return;
6499 case Intrinsic::addressofreturnaddress:
6500 setValue(&I,
6502 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6503 return;
6504 case Intrinsic::sponentry:
6505 setValue(&I,
6507 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6508 return;
6509 case Intrinsic::frameaddress:
6512 getValue(I.getArgOperand(0))));
6513 return;
6514 case Intrinsic::read_volatile_register:
6515 case Intrinsic::read_register: {
6516 Value *Reg = I.getArgOperand(0);
6517 SDValue Chain = getRoot();
6519 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6520 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6521 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6522 DAG.getVTList(VT, MVT::Other), Chain, RegName);
6523 setValue(&I, Res);
6524 DAG.setRoot(Res.getValue(1));
6525 return;
6526 }
6527 case Intrinsic::write_register: {
6528 Value *Reg = I.getArgOperand(0);
6529 Value *RegValue = I.getArgOperand(1);
6530 SDValue Chain = getRoot();
6532 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6533 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6534 RegName, getValue(RegValue)));
6535 return;
6536 }
6537 case Intrinsic::memcpy: {
6538 const auto &MCI = cast<MemCpyInst>(I);
6539 SDValue Op1 = getValue(I.getArgOperand(0));
6540 SDValue Op2 = getValue(I.getArgOperand(1));
6541 SDValue Op3 = getValue(I.getArgOperand(2));
6542 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6543 Align DstAlign = MCI.getDestAlign().valueOrOne();
6544 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6545 Align Alignment = std::min(DstAlign, SrcAlign);
6546 bool isVol = MCI.isVolatile();
6547 // FIXME: Support passing different dest/src alignments to the memcpy DAG
6548 // node.
6549 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6550 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6551 /* AlwaysInline */ false, &I, std::nullopt,
6552 MachinePointerInfo(I.getArgOperand(0)),
6553 MachinePointerInfo(I.getArgOperand(1)),
6554 I.getAAMetadata(), AA);
6555 updateDAGForMaybeTailCall(MC);
6556 return;
6557 }
6558 case Intrinsic::memcpy_inline: {
6559 const auto &MCI = cast<MemCpyInlineInst>(I);
6560 SDValue Dst = getValue(I.getArgOperand(0));
6561 SDValue Src = getValue(I.getArgOperand(1));
6562 SDValue Size = getValue(I.getArgOperand(2));
6563 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6564 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6565 Align DstAlign = MCI.getDestAlign().valueOrOne();
6566 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6567 Align Alignment = std::min(DstAlign, SrcAlign);
6568 bool isVol = MCI.isVolatile();
6569 // FIXME: Support passing different dest/src alignments to the memcpy DAG
6570 // node.
6571 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6572 /* AlwaysInline */ true, &I, std::nullopt,
6573 MachinePointerInfo(I.getArgOperand(0)),
6574 MachinePointerInfo(I.getArgOperand(1)),
6575 I.getAAMetadata(), AA);
6576 updateDAGForMaybeTailCall(MC);
6577 return;
6578 }
6579 case Intrinsic::memset: {
6580 const auto &MSI = cast<MemSetInst>(I);
6581 SDValue Op1 = getValue(I.getArgOperand(0));
6582 SDValue Op2 = getValue(I.getArgOperand(1));
6583 SDValue Op3 = getValue(I.getArgOperand(2));
6584 // @llvm.memset defines 0 and 1 to both mean no alignment.
6585 Align Alignment = MSI.getDestAlign().valueOrOne();
6586 bool isVol = MSI.isVolatile();
6587 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6588 SDValue MS = DAG.getMemset(
6589 Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6590 &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6591 updateDAGForMaybeTailCall(MS);
6592 return;
6593 }
6594 case Intrinsic::memset_inline: {
6595 const auto &MSII = cast<MemSetInlineInst>(I);
6596 SDValue Dst = getValue(I.getArgOperand(0));
6597 SDValue Value = getValue(I.getArgOperand(1));
6598 SDValue Size = getValue(I.getArgOperand(2));
6599 assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6600 // @llvm.memset defines 0 and 1 to both mean no alignment.
6601 Align DstAlign = MSII.getDestAlign().valueOrOne();
6602 bool isVol = MSII.isVolatile();
6603 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6604 SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6605 /* AlwaysInline */ true, &I,
6606 MachinePointerInfo(I.getArgOperand(0)),
6607 I.getAAMetadata());
6608 updateDAGForMaybeTailCall(MC);
6609 return;
6610 }
6611 case Intrinsic::memmove: {
6612 const auto &MMI = cast<MemMoveInst>(I);
6613 SDValue Op1 = getValue(I.getArgOperand(0));
6614 SDValue Op2 = getValue(I.getArgOperand(1));
6615 SDValue Op3 = getValue(I.getArgOperand(2));
6616 // @llvm.memmove defines 0 and 1 to both mean no alignment.
6617 Align DstAlign = MMI.getDestAlign().valueOrOne();
6618 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6619 Align Alignment = std::min(DstAlign, SrcAlign);
6620 bool isVol = MMI.isVolatile();
6621 // FIXME: Support passing different dest/src alignments to the memmove DAG
6622 // node.
6623 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6624 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I,
6625 /* OverrideTailCall */ std::nullopt,
6626 MachinePointerInfo(I.getArgOperand(0)),
6627 MachinePointerInfo(I.getArgOperand(1)),
6628 I.getAAMetadata(), AA);
6629 updateDAGForMaybeTailCall(MM);
6630 return;
6631 }
6632 case Intrinsic::memcpy_element_unordered_atomic: {
6633 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6634 SDValue Dst = getValue(MI.getRawDest());
6635 SDValue Src = getValue(MI.getRawSource());
6636 SDValue Length = getValue(MI.getLength());
6637
6638 Type *LengthTy = MI.getLength()->getType();
6639 unsigned ElemSz = MI.getElementSizeInBytes();
6640 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6641 SDValue MC =
6642 DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6643 isTC, MachinePointerInfo(MI.getRawDest()),
6644 MachinePointerInfo(MI.getRawSource()));
6645 updateDAGForMaybeTailCall(MC);
6646 return;
6647 }
6648 case Intrinsic::memmove_element_unordered_atomic: {
6649 auto &MI = cast<AtomicMemMoveInst>(I);
6650 SDValue Dst = getValue(MI.getRawDest());
6651 SDValue Src = getValue(MI.getRawSource());
6652 SDValue Length = getValue(MI.getLength());
6653
6654 Type *LengthTy = MI.getLength()->getType();
6655 unsigned ElemSz = MI.getElementSizeInBytes();
6656 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6657 SDValue MC =
6658 DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6659 isTC, MachinePointerInfo(MI.getRawDest()),
6660 MachinePointerInfo(MI.getRawSource()));
6661 updateDAGForMaybeTailCall(MC);
6662 return;
6663 }
6664 case Intrinsic::memset_element_unordered_atomic: {
6665 auto &MI = cast<AtomicMemSetInst>(I);
6666 SDValue Dst = getValue(MI.getRawDest());
6667 SDValue Val = getValue(MI.getValue());
6668 SDValue Length = getValue(MI.getLength());
6669
6670 Type *LengthTy = MI.getLength()->getType();
6671 unsigned ElemSz = MI.getElementSizeInBytes();
6672 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6673 SDValue MC =
6674 DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6675 isTC, MachinePointerInfo(MI.getRawDest()));
6676 updateDAGForMaybeTailCall(MC);
6677 return;
6678 }
6679 case Intrinsic::call_preallocated_setup: {
6680 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6681 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6682 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6683 getRoot(), SrcValue);
6684 setValue(&I, Res);
6685 DAG.setRoot(Res);
6686 return;
6687 }
6688 case Intrinsic::call_preallocated_arg: {
6689 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6690 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6691 SDValue Ops[3];
6692 Ops[0] = getRoot();
6693 Ops[1] = SrcValue;
6694 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6695 MVT::i32); // arg index
6696 SDValue Res = DAG.getNode(
6698 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6699 setValue(&I, Res);
6700 DAG.setRoot(Res.getValue(1));
6701 return;
6702 }
6703 case Intrinsic::dbg_declare: {
6704 const auto &DI = cast<DbgDeclareInst>(I);
6705 // Debug intrinsics are handled separately in assignment tracking mode.
6706 // Some intrinsics are handled right after Argument lowering.
6707 if (AssignmentTrackingEnabled ||
6709 return;
6710 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6711 DILocalVariable *Variable = DI.getVariable();
6712 DIExpression *Expression = DI.getExpression();
6714 // Assume dbg.declare can not currently use DIArgList, i.e.
6715 // it is non-variadic.
6716 assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6717 handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6718 DI.getDebugLoc());
6719 return;
6720 }
6721 case Intrinsic::dbg_label: {
6722 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6723 DILabel *Label = DI.getLabel();
6724 assert(Label && "Missing label");
6725
6726 SDDbgLabel *SDV;
6727 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6728 DAG.AddDbgLabel(SDV);
6729 return;
6730 }
6731 case Intrinsic::dbg_assign: {
6732 // Debug intrinsics are handled separately in assignment tracking mode.
6733 if (AssignmentTrackingEnabled)
6734 return;
6735 // If assignment tracking hasn't been enabled then fall through and treat
6736 // the dbg.assign as a dbg.value.
6737 [[fallthrough]];
6738 }
6739 case Intrinsic::dbg_value: {
6740 // Debug intrinsics are handled separately in assignment tracking mode.
6741 if (AssignmentTrackingEnabled)
6742 return;
6743 const DbgValueInst &DI = cast<DbgValueInst>(I);
6744 assert(DI.getVariable() && "Missing variable");
6745
6746 DILocalVariable *Variable = DI.getVariable();
6749
6750 if (DI.isKillLocation()) {
6751 handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6752 return;
6753 }
6754
6756 if (Values.empty())
6757 return;
6758
6759 bool IsVariadic = DI.hasArgList();
6760 if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6761 SDNodeOrder, IsVariadic))
6762 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6763 DI.getDebugLoc(), SDNodeOrder);
6764 return;
6765 }
6766
6767 case Intrinsic::eh_typeid_for: {
6768 // Find the type id for the given typeinfo.
6769 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6770 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6771 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6772 setValue(&I, Res);
6773 return;
6774 }
6775
6776 case Intrinsic::eh_return_i32:
6777 case Intrinsic::eh_return_i64:
6780 MVT::Other,
6782 getValue(I.getArgOperand(0)),
6783 getValue(I.getArgOperand(1))));
6784 return;
6785 case Intrinsic::eh_unwind_init:
6787 return;
6788 case Intrinsic::eh_dwarf_cfa:
6791 getValue(I.getArgOperand(0))));
6792 return;
6793 case Intrinsic::eh_sjlj_callsite: {
6794 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6795 assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!");
6796
6798 return;
6799 }
6800 case Intrinsic::eh_sjlj_functioncontext: {
6801 // Get and store the index of the function context.
6803 AllocaInst *FnCtx =
6804 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6805 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6807 return;
6808 }
6809 case Intrinsic::eh_sjlj_setjmp: {
6810 SDValue Ops[2];
6811 Ops[0] = getRoot();
6812 Ops[1] = getValue(I.getArgOperand(0));
6814 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6815 setValue(&I, Op.getValue(0));
6816 DAG.setRoot(Op.getValue(1));
6817 return;
6818 }
6819 case Intrinsic::eh_sjlj_longjmp:
6820 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6821 getRoot(), getValue(I.getArgOperand(0))));
6822 return;
6823 case Intrinsic::eh_sjlj_setup_dispatch:
6825 getRoot()));
6826 return;
6827 case Intrinsic::masked_gather:
6828 visitMaskedGather(I);
6829 return;
6830 case Intrinsic::masked_load:
6831 visitMaskedLoad(I);
6832 return;
6833 case Intrinsic::masked_scatter:
6834 visitMaskedScatter(I);
6835 return;
6836 case Intrinsic::masked_store:
6837 visitMaskedStore(I);
6838 return;
6839 case Intrinsic::masked_expandload:
6840 visitMaskedLoad(I, true /* IsExpanding */);
6841 return;
6842 case Intrinsic::masked_compressstore:
6843 visitMaskedStore(I, true /* IsCompressing */);
6844 return;
6845 case Intrinsic::powi:
6846 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6847 getValue(I.getArgOperand(1)), DAG));
6848 return;
6849 case Intrinsic::log:
6850 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6851 return;
6852 case Intrinsic::log2:
6853 setValue(&I,
6854 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6855 return;
6856 case Intrinsic::log10:
6857 setValue(&I,
6858 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6859 return;
6860 case Intrinsic::exp:
6861 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6862 return;
6863 case Intrinsic::exp2:
6864 setValue(&I,
6865 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6866 return;
6867 case Intrinsic::pow:
6868 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6869 getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6870 return;
6871 case Intrinsic::sqrt:
6872 case Intrinsic::fabs:
6873 case Intrinsic::sin:
6874 case Intrinsic::cos:
6875 case Intrinsic::tan:
6876 case Intrinsic::asin:
6877 case Intrinsic::acos:
6878 case Intrinsic::atan:
6879 case Intrinsic::sinh:
6880 case Intrinsic::cosh:
6881 case Intrinsic::tanh:
6882 case Intrinsic::exp10:
6883 case Intrinsic::floor:
6884 case Intrinsic::ceil:
6885 case Intrinsic::trunc:
6886 case Intrinsic::rint:
6887 case Intrinsic::nearbyint:
6888 case Intrinsic::round:
6889 case Intrinsic::roundeven:
6890 case Intrinsic::canonicalize: {
6891 unsigned Opcode;
6892 // clang-format off
6893 switch (Intrinsic) {
6894 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6895 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6896 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6897 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6898 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6899 case Intrinsic::tan: Opcode = ISD::FTAN; break;
6900 case Intrinsic::asin: Opcode = ISD::FASIN; break;
6901 case Intrinsic::acos: Opcode = ISD::FACOS; break;
6902 case Intrinsic::atan: Opcode = ISD::FATAN; break;
6903 case Intrinsic::sinh: Opcode = ISD::FSINH; break;
6904 case Intrinsic::cosh: Opcode = ISD::FCOSH; break;
6905 case Intrinsic::tanh: Opcode = ISD::FTANH; break;
6906 case Intrinsic::exp10: Opcode = ISD::FEXP10; break;
6907 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6908 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6909 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6910 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6911 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6912 case Intrinsic::round: Opcode = ISD::FROUND; break;
6913 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6914 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6915 }
6916 // clang-format on
6917
6918 setValue(&I, DAG.getNode(Opcode, sdl,
6919 getValue(I.getArgOperand(0)).getValueType(),
6920 getValue(I.getArgOperand(0)), Flags));
6921 return;
6922 }
6923 case Intrinsic::atan2:
6925 getValue(I.getArgOperand(0)).getValueType(),
6926 getValue(I.getArgOperand(0)),
6927 getValue(I.getArgOperand(1)), Flags));
6928 return;
6929 case Intrinsic::lround:
6930 case Intrinsic::llround:
6931 case Intrinsic::lrint:
6932 case Intrinsic::llrint: {
6933 unsigned Opcode;
6934 // clang-format off
6935 switch (Intrinsic) {
6936 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6937 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6938 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6939 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6940 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6941 }
6942 // clang-format on
6943
6944 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6945 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6946 getValue(I.getArgOperand(0))));
6947 return;
6948 }
6949 case Intrinsic::minnum:
6951 getValue(I.getArgOperand(0)).getValueType(),
6952 getValue(I.getArgOperand(0)),
6953 getValue(I.getArgOperand(1)), Flags));
6954 return;
6955 case Intrinsic::maxnum:
6957 getValue(I.getArgOperand(0)).getValueType(),
6958 getValue(I.getArgOperand(0)),
6959 getValue(I.getArgOperand(1)), Flags));
6960 return;
6961 case Intrinsic::minimum:
6963 getValue(I.getArgOperand(0)).getValueType(),
6964 getValue(I.getArgOperand(0)),
6965 getValue(I.getArgOperand(1)), Flags));
6966 return;
6967 case Intrinsic::maximum:
6969 getValue(I.getArgOperand(0)).getValueType(),
6970 getValue(I.getArgOperand(0)),
6971 getValue(I.getArgOperand(1)), Flags));
6972 return;
6973 case Intrinsic::minimumnum:
6975 getValue(I.getArgOperand(0)).getValueType(),
6976 getValue(I.getArgOperand(0)),
6977 getValue(I.getArgOperand(1)), Flags));
6978 return;
6979 case Intrinsic::maximumnum:
6981 getValue(I.getArgOperand(0)).getValueType(),
6982 getValue(I.getArgOperand(0)),
6983 getValue(I.getArgOperand(1)), Flags));
6984 return;
6985 case Intrinsic::copysign:
6987 getValue(I.getArgOperand(0)).getValueType(),
6988 getValue(I.getArgOperand(0)),
6989 getValue(I.getArgOperand(1)), Flags));
6990 return;
6991 case Intrinsic::ldexp:
6993 getValue(I.getArgOperand(0)).getValueType(),
6994 getValue(I.getArgOperand(0)),
6995 getValue(I.getArgOperand(1)), Flags));
6996 return;
6997 case Intrinsic::sincos:
6998 case Intrinsic::frexp: {
6999 unsigned Opcode;
7000 switch (Intrinsic) {
7001 default:
7002 llvm_unreachable("unexpected intrinsic");
7003 case Intrinsic::sincos:
7004 Opcode = ISD::FSINCOS;
7005 break;
7006 case Intrinsic::frexp:
7007 Opcode = ISD::FFREXP;
7008 break;
7009 }
7010 SmallVector<EVT, 2> ValueVTs;
7011 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
7012 SDVTList VTs = DAG.getVTList(ValueVTs);
7013 setValue(
7014 &I, DAG.getNode(Opcode, sdl, VTs, getValue(I.getArgOperand(0)), Flags));
7015 return;
7016 }
7017 case Intrinsic::arithmetic_fence: {
7019 getValue(I.getArgOperand(0)).getValueType(),
7020 getValue(I.getArgOperand(0)), Flags));
7021 return;
7022 }
7023 case Intrinsic::fma:
7025 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
7026 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
7027 getValue(I.getArgOperand(2)), Flags));
7028 return;
7029#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7030 case Intrinsic::INTRINSIC:
7031#include "llvm/IR/ConstrainedOps.def"
7032 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
7033 return;
7034#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7035#include "llvm/IR/VPIntrinsics.def"
7036 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
7037 return;
7038 case Intrinsic::fptrunc_round: {
7039 // Get the last argument, the metadata and convert it to an integer in the
7040 // call
7041 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
7042 std::optional<RoundingMode> RoundMode =
7043 convertStrToRoundingMode(cast<MDString>(MD)->getString());
7044
7045 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7046
7047 // Propagate fast-math-flags from IR to node(s).
7049 Flags.copyFMF(*cast<FPMathOperator>(&I));
7050 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
7051
7053 Result = DAG.getNode(
7054 ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
7055 DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32));
7056 setValue(&I, Result);
7057
7058 return;
7059 }
7060 case Intrinsic::fmuladd: {
7061 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7064 setValue(&I, DAG.getNode(ISD::FMA, sdl,
7065 getValue(I.getArgOperand(0)).getValueType(),
7066 getValue(I.getArgOperand(0)),
7067 getValue(I.getArgOperand(1)),
7068 getValue(I.getArgOperand(2)), Flags));
7069 } else {
7070 // TODO: Intrinsic calls should have fast-math-flags.
7072 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
7073 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
7075 getValue(I.getArgOperand(0)).getValueType(),
7076 Mul, getValue(I.getArgOperand(2)), Flags);
7077 setValue(&I, Add);
7078 }
7079 return;
7080 }
7081 case Intrinsic::convert_to_fp16:
7082 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
7083 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
7084 getValue(I.getArgOperand(0)),
7085 DAG.getTargetConstant(0, sdl,
7086 MVT::i32))));
7087 return;
7088 case Intrinsic::convert_from_fp16:
7090 TLI.getValueType(DAG.getDataLayout(), I.getType()),
7091 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7092 getValue(I.getArgOperand(0)))));
7093 return;
7094 case Intrinsic::fptosi_sat: {
7095 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7097 getValue(I.getArgOperand(0)),
7099 return;
7100 }
7101 case Intrinsic::fptoui_sat: {
7102 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7104 getValue(I.getArgOperand(0)),
7106 return;
7107 }
7108 case Intrinsic::set_rounding:
7109 Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7110 {getRoot(), getValue(I.getArgOperand(0))});
7111 setValue(&I, Res);
7112 DAG.setRoot(Res.getValue(0));
7113 return;
7114 case Intrinsic::is_fpclass: {
7115 const DataLayout DLayout = DAG.getDataLayout();
7116 EVT DestVT = TLI.getValueType(DLayout, I.getType());
7117 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7118 FPClassTest Test = static_cast<FPClassTest>(
7119 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7121 const Function &F = MF.getFunction();
7122 SDValue Op = getValue(I.getArgOperand(0));
7124 Flags.setNoFPExcept(
7125 !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7126 // If ISD::IS_FPCLASS should be expanded, do it right now, because the
7127 // expansion can use illegal types. Making expansion early allows
7128 // legalizing these types prior to selection.
7129 if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
7130 !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
7131 SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7132 setValue(&I, Result);
7133 return;
7134 }
7135
7136 SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7137 SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
7138 setValue(&I, V);
7139 return;
7140 }
7141 case Intrinsic::get_fpenv: {
7142 const DataLayout DLayout = DAG.getDataLayout();
7143 EVT EnvVT = TLI.getValueType(DLayout, I.getType());
7144 Align TempAlign = DAG.getEVTAlign(EnvVT);
7145 SDValue Chain = getRoot();
7146 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7147 // and temporary storage in stack.
7148 if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
7149 Res = DAG.getNode(
7150 ISD::GET_FPENV, sdl,
7151 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7152 MVT::Other),
7153 Chain);
7154 } else {
7155 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7156 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7157 auto MPI =
7161 TempAlign);
7162 Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7163 Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7164 }
7165 setValue(&I, Res);
7166 DAG.setRoot(Res.getValue(1));
7167 return;
7168 }
7169 case Intrinsic::set_fpenv: {
7170 const DataLayout DLayout = DAG.getDataLayout();
7171 SDValue Env = getValue(I.getArgOperand(0));
7172 EVT EnvVT = Env.getValueType();
7173 Align TempAlign = DAG.getEVTAlign(EnvVT);
7174 SDValue Chain = getRoot();
7175 // If SET_FPENV is custom or legal, use it. Otherwise use loading
7176 // environment from memory.
7177 if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
7178 Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7179 } else {
7180 // Allocate space in stack, copy environment bits into it and use this
7181 // memory in SET_FPENV_MEM.
7182 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7183 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7184 auto MPI =
7186 Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7190 TempAlign);
7191 Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7192 }
7193 DAG.setRoot(Chain);
7194 return;
7195 }
7196 case Intrinsic::reset_fpenv:
7197 DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
7198 return;
7199 case Intrinsic::get_fpmode:
7200 Res = DAG.getNode(
7201 ISD::GET_FPMODE, sdl,
7202 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7203 MVT::Other),
7204 DAG.getRoot());
7205 setValue(&I, Res);
7206 DAG.setRoot(Res.getValue(1));
7207 return;
7208 case Intrinsic::set_fpmode:
7209 Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
7210 getValue(I.getArgOperand(0)));
7211 DAG.setRoot(Res);
7212 return;
7213 case Intrinsic::reset_fpmode: {
7214 Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
7215 DAG.setRoot(Res);
7216 return;
7217 }
7218 case Intrinsic::pcmarker: {
7219 SDValue Tmp = getValue(I.getArgOperand(0));
7220 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
7221 return;
7222 }
7223 case Intrinsic::readcyclecounter: {
7224 SDValue Op = getRoot();
7226 DAG.getVTList(MVT::i64, MVT::Other), Op);
7227 setValue(&I, Res);
7228 DAG.setRoot(Res.getValue(1));
7229 return;
7230 }
7231 case Intrinsic::readsteadycounter: {
7232 SDValue Op = getRoot();
7234 DAG.getVTList(MVT::i64, MVT::Other), Op);
7235 setValue(&I, Res);
7236 DAG.setRoot(Res.getValue(1));
7237 return;
7238 }
7239 case Intrinsic::bitreverse:
7241 getValue(I.getArgOperand(0)).getValueType(),
7242 getValue(I.getArgOperand(0))));
7243 return;
7244 case Intrinsic::bswap:
7246 getValue(I.getArgOperand(0)).getValueType(),
7247 getValue(I.getArgOperand(0))));
7248 return;
7249 case Intrinsic::cttz: {
7250 SDValue Arg = getValue(I.getArgOperand(0));
7251 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7252 EVT Ty = Arg.getValueType();
7254 sdl, Ty, Arg));
7255 return;
7256 }
7257 case Intrinsic::ctlz: {
7258 SDValue Arg = getValue(I.getArgOperand(0));
7259 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7260 EVT Ty = Arg.getValueType();
7262 sdl, Ty, Arg));
7263 return;
7264 }
7265 case Intrinsic::ctpop: {
7266 SDValue Arg = getValue(I.getArgOperand(0));
7267 EVT Ty = Arg.getValueType();
7268 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
7269 return;
7270 }
7271 case Intrinsic::fshl:
7272 case Intrinsic::fshr: {
7273 bool IsFSHL = Intrinsic == Intrinsic::fshl;
7274 SDValue X = getValue(I.getArgOperand(0));
7275 SDValue Y = getValue(I.getArgOperand(1));
7276 SDValue Z = getValue(I.getArgOperand(2));
7277 EVT VT = X.getValueType();
7278
7279 if (X == Y) {
7280 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
7281 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
7282 } else {
7283 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
7284 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
7285 }
7286 return;
7287 }
7288 case Intrinsic::sadd_sat: {
7289 SDValue Op1 = getValue(I.getArgOperand(0));
7290 SDValue Op2 = getValue(I.getArgOperand(1));
7291 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7292 return;
7293 }
7294 case Intrinsic::uadd_sat: {
7295 SDValue Op1 = getValue(I.getArgOperand(0));
7296 SDValue Op2 = getValue(I.getArgOperand(1));
7297 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7298 return;
7299 }
7300 case Intrinsic::ssub_sat: {
7301 SDValue Op1 = getValue(I.getArgOperand(0));
7302 SDValue Op2 = getValue(I.getArgOperand(1));
7303 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7304 return;
7305 }
7306 case Intrinsic::usub_sat: {
7307 SDValue Op1 = getValue(I.getArgOperand(0));
7308 SDValue Op2 = getValue(I.getArgOperand(1));
7309 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7310 return;
7311 }
7312 case Intrinsic::sshl_sat: {
7313 SDValue Op1 = getValue(I.getArgOperand(0));
7314 SDValue Op2 = getValue(I.getArgOperand(1));
7315 setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7316 return;
7317 }
7318 case Intrinsic::ushl_sat: {
7319 SDValue Op1 = getValue(I.getArgOperand(0));
7320 SDValue Op2 = getValue(I.getArgOperand(1));
7321 setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7322 return;
7323 }
7324 case Intrinsic::smul_fix:
7325 case Intrinsic::umul_fix:
7326 case Intrinsic::smul_fix_sat:
7327 case Intrinsic::umul_fix_sat: {
7328 SDValue Op1 = getValue(I.getArgOperand(0));
7329 SDValue Op2 = getValue(I.getArgOperand(1));
7330 SDValue Op3 = getValue(I.getArgOperand(2));
7332 Op1.getValueType(), Op1, Op2, Op3));
7333 return;
7334 }
7335 case Intrinsic::sdiv_fix:
7336 case Intrinsic::udiv_fix:
7337 case Intrinsic::sdiv_fix_sat:
7338 case Intrinsic::udiv_fix_sat: {
7339 SDValue Op1 = getValue(I.getArgOperand(0));
7340 SDValue Op2 = getValue(I.getArgOperand(1));
7341 SDValue Op3 = getValue(I.getArgOperand(2));
7343 Op1, Op2, Op3, DAG, TLI));
7344 return;
7345 }
7346 case Intrinsic::smax: {
7347 SDValue Op1 = getValue(I.getArgOperand(0));
7348 SDValue Op2 = getValue(I.getArgOperand(1));
7349 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
7350 return;
7351 }
7352 case Intrinsic::smin: {
7353 SDValue Op1 = getValue(I.getArgOperand(0));
7354 SDValue Op2 = getValue(I.getArgOperand(1));
7355 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
7356 return;
7357 }
7358 case Intrinsic::umax: {
7359 SDValue Op1 = getValue(I.getArgOperand(0));
7360 SDValue Op2 = getValue(I.getArgOperand(1));
7361 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
7362 return;
7363 }
7364 case Intrinsic::umin: {
7365 SDValue Op1 = getValue(I.getArgOperand(0));
7366 SDValue Op2 = getValue(I.getArgOperand(1));
7367 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
7368 return;
7369 }
7370 case Intrinsic::abs: {
7371 // TODO: Preserve "int min is poison" arg in SDAG?
7372 SDValue Op1 = getValue(I.getArgOperand(0));
7373 setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
7374 return;
7375 }
7376 case Intrinsic::scmp: {
7377 SDValue Op1 = getValue(I.getArgOperand(0));
7378 SDValue Op2 = getValue(I.getArgOperand(1));
7379 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7380 setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
7381 break;
7382 }
7383 case Intrinsic::ucmp: {
7384 SDValue Op1 = getValue(I.getArgOperand(0));
7385 SDValue Op2 = getValue(I.getArgOperand(1));
7386 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7387 setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
7388 break;
7389 }
7390 case Intrinsic::stacksave: {
7391 SDValue Op = getRoot();
7392 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7393 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
7394 setValue(&I, Res);
7395 DAG.setRoot(Res.getValue(1));
7396 return;
7397 }
7398 case Intrinsic::stackrestore:
7399 Res = getValue(I.getArgOperand(0));
7400 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
7401 return;
7402 case Intrinsic::get_dynamic_area_offset: {
7403 SDValue Op = getRoot();
7404 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7405 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7406 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
7407 // target.
7408 if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
7409 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
7410 " intrinsic!");
7412 Op);
7413 DAG.setRoot(Op);
7414 setValue(&I, Res);
7415 return;
7416 }
7417 case Intrinsic::stackguard: {
7419 const Module &M = *MF.getFunction().getParent();
7420 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7421 SDValue Chain = getRoot();
7422 if (TLI.useLoadStackGuardNode(M)) {
7423 Res = getLoadStackGuard(DAG, sdl, Chain);
7424 Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7425 } else {
7426 const Value *Global = TLI.getSDagStackGuard(M);
7428 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
7431 }
7432 if (TLI.useStackGuardXorFP())
7433 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
7434 DAG.setRoot(Chain);
7435 setValue(&I, Res);
7436 return;
7437 }
7438 case Intrinsic::stackprotector: {
7439 // Emit code into the DAG to store the stack guard onto the stack.
7441 MachineFrameInfo &MFI = MF.getFrameInfo();
7442 const Module &M = *MF.getFunction().getParent();
7443 SDValue Src, Chain = getRoot();
7444
7445 if (TLI.useLoadStackGuardNode(M))
7446 Src = getLoadStackGuard(DAG, sdl, Chain);
7447 else
7448 Src = getValue(I.getArgOperand(0)); // The guard's value.
7449
7450 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7451
7452 int FI = FuncInfo.StaticAllocaMap[Slot];
7453 MFI.setStackProtectorIndex(FI);
7454 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7455
7456 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7457
7458 // Store the stack protector onto the stack.
7459 Res = DAG.getStore(
7460 Chain, sdl, Src, FIN,
7463 setValue(&I, Res);
7464 DAG.setRoot(Res);
7465 return;
7466 }
7467 case Intrinsic::objectsize:
7468 llvm_unreachable("llvm.objectsize.* should have been lowered already");
7469
7470 case Intrinsic::is_constant:
7471 llvm_unreachable("llvm.is.constant.* should have been lowered already");
7472
7473 case Intrinsic::annotation:
7474 case Intrinsic::ptr_annotation:
7475 case Intrinsic::launder_invariant_group:
7476 case Intrinsic::strip_invariant_group:
7477 // Drop the intrinsic, but forward the value
7478 setValue(&I, getValue(I.getOperand(0)));
7479 return;
7480
7481 case Intrinsic::assume:
7482 case Intrinsic::experimental_noalias_scope_decl:
7483 case Intrinsic::var_annotation:
7484 case Intrinsic::sideeffect:
7485 // Discard annotate attributes, noalias scope declarations, assumptions, and
7486 // artificial side-effects.
7487 return;
7488
7489 case Intrinsic::codeview_annotation: {
7490 // Emit a label associated with this metadata.
7492 MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true);
7493 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7494 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7495 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7496 DAG.setRoot(Res);
7497 return;
7498 }
7499
7500 case Intrinsic::init_trampoline: {
7501 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7502
7503 SDValue Ops[6];
7504 Ops[0] = getRoot();
7505 Ops[1] = getValue(I.getArgOperand(0));
7506 Ops[2] = getValue(I.getArgOperand(1));
7507 Ops[3] = getValue(I.getArgOperand(2));
7508 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7509 Ops[5] = DAG.getSrcValue(F);
7510
7511 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7512
7513 DAG.setRoot(Res);
7514 return;
7515 }
7516 case Intrinsic::adjust_trampoline:
7519 getValue(I.getArgOperand(0))));
7520 return;
7521 case Intrinsic::gcroot: {
7523 "only valid in functions with gc specified, enforced by Verifier");
7524 assert(GFI && "implied by previous");
7525 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7526 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7527
7528 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7529 GFI->addStackRoot(FI->getIndex(), TypeMap);
7530 return;
7531 }
7532 case Intrinsic::gcread:
7533 case Intrinsic::gcwrite:
7534 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7535 case Intrinsic::get_rounding:
7536 Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7537 setValue(&I, Res);
7538 DAG.setRoot(Res.getValue(1));
7539 return;
7540
7541 case Intrinsic::expect:
7542 case Intrinsic::expect_with_probability:
7543 // Just replace __builtin_expect(exp, c) and
7544 // __builtin_expect_with_probability(exp, c, p) with EXP.
7545 setValue(&I, getValue(I.getArgOperand(0)));
7546 return;
7547
7548 case Intrinsic::ubsantrap:
7549 case Intrinsic::debugtrap:
7550 case Intrinsic::trap: {
7551 StringRef TrapFuncName =
7552 I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7553 if (TrapFuncName.empty()) {
7554 switch (Intrinsic) {
7555 case Intrinsic::trap:
7556 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7557 break;
7558 case Intrinsic::debugtrap:
7559 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7560 break;
7561 case Intrinsic::ubsantrap:
7563 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7565 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7566 MVT::i32)));
7567 break;
7568 default: llvm_unreachable("unknown trap intrinsic");
7569 }
7571 I.hasFnAttr(Attribute::NoMerge));
7572 return;
7573 }
7575 if (Intrinsic == Intrinsic::ubsantrap) {
7577 Args[0].Val = I.getArgOperand(0);
7578 Args[0].Node = getValue(Args[0].Val);
7579 Args[0].Ty = Args[0].Val->getType();
7580 }
7581
7583 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7584 CallingConv::C, I.getType(),
7585 DAG.getExternalSymbol(TrapFuncName.data(),
7587 std::move(Args));
7588 CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge);
7589 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7590 DAG.setRoot(Result.second);
7591 return;
7592 }
7593
7594 case Intrinsic::allow_runtime_check:
7595 case Intrinsic::allow_ubsan_check:
7596 setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7597 return;
7598
7599 case Intrinsic::uadd_with_overflow:
7600 case Intrinsic::sadd_with_overflow:
7601 case Intrinsic::usub_with_overflow:
7602 case Intrinsic::ssub_with_overflow:
7603 case Intrinsic::umul_with_overflow:
7604 case Intrinsic::smul_with_overflow: {
7606 switch (Intrinsic) {
7607 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7608 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7609 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7610 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7611 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7612 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7613 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7614 }
7615 SDValue Op1 = getValue(I.getArgOperand(0));
7616 SDValue Op2 = getValue(I.getArgOperand(1));
7617
7618 EVT ResultVT = Op1.getValueType();
7619 EVT OverflowVT = MVT::i1;
7620 if (ResultVT.isVector())
7621 OverflowVT = EVT::getVectorVT(
7622 *Context, OverflowVT, ResultVT.getVectorElementCount());
7623
7624 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7625 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7626 return;
7627 }
7628 case Intrinsic::prefetch: {
7629 SDValue Ops[5];
7630 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7632 Ops[0] = DAG.getRoot();
7633 Ops[1] = getValue(I.getArgOperand(0));
7634 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7635 MVT::i32);
7636 Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7637 MVT::i32);
7638 Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7639 MVT::i32);
7641 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7642 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7643 /* align */ std::nullopt, Flags);
7644
7645 // Chain the prefetch in parallel with any pending loads, to stay out of
7646 // the way of later optimizations.
7647 PendingLoads.push_back(Result);
7648 Result = getRoot();
7649 DAG.setRoot(Result);
7650 return;
7651 }
7652 case Intrinsic::lifetime_start:
7653 case Intrinsic::lifetime_end: {
7654 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7655 // Stack coloring is not enabled in O0, discard region information.
7657 return;
7658
7659 const int64_t ObjectSize =
7660 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7661 Value *const ObjectPtr = I.getArgOperand(1);
7663 getUnderlyingObjects(ObjectPtr, Allocas);
7664
7665 for (const Value *Alloca : Allocas) {
7666 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7667
7668 // Could not find an Alloca.
7669 if (!LifetimeObject)
7670 continue;
7671
7672 // First check that the Alloca is static, otherwise it won't have a
7673 // valid frame index.
7674 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7675 if (SI == FuncInfo.StaticAllocaMap.end())
7676 return;
7677
7678 const int FrameIndex = SI->second;
7679 int64_t Offset;
7681 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7682 Offset = -1; // Cannot determine offset from alloca to lifetime object.
7683 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7684 Offset);
7685 DAG.setRoot(Res);
7686 }
7687 return;
7688 }
7689 case Intrinsic::pseudoprobe: {
7690 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7691 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7692 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7693 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7694 DAG.setRoot(Res);
7695 return;
7696 }
7697 case Intrinsic::invariant_start:
7698 // Discard region information.
7699 setValue(&I,
7700 DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7701 return;
7702 case Intrinsic::invariant_end:
7703 // Discard region information.
7704 return;
7705 case Intrinsic::clear_cache: {
7706 SDValue InputChain = DAG.getRoot();
7707 SDValue StartVal = getValue(I.getArgOperand(0));
7708 SDValue EndVal = getValue(I.getArgOperand(1));
7709 Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other),
7710 {InputChain, StartVal, EndVal});
7711 setValue(&I, Res);
7712 DAG.setRoot(Res);
7713 return;
7714 }
7715 case Intrinsic::donothing:
7716 case Intrinsic::seh_try_begin:
7717 case Intrinsic::seh_scope_begin:
7718 case Intrinsic::seh_try_end:
7719 case Intrinsic::seh_scope_end:
7720 // ignore
7721 return;
7722 case Intrinsic::experimental_stackmap:
7723 visitStackmap(I);
7724 return;
7725 case Intrinsic::experimental_patchpoint_void:
7726 case Intrinsic::experimental_patchpoint:
7727 visitPatchpoint(I);
7728 return;
7729 case Intrinsic::experimental_gc_statepoint:
7730 LowerStatepoint(cast<GCStatepointInst>(I));
7731 return;
7732 case Intrinsic::experimental_gc_result:
7733 visitGCResult(cast<GCResultInst>(I));
7734 return;
7735 case Intrinsic::experimental_gc_relocate:
7736 visitGCRelocate(cast<GCRelocateInst>(I));
7737 return;
7738 case Intrinsic::instrprof_cover:
7739 llvm_unreachable("instrprof failed to lower a cover");
7740 case Intrinsic::instrprof_increment:
7741 llvm_unreachable("instrprof failed to lower an increment");
7742 case Intrinsic::instrprof_timestamp:
7743 llvm_unreachable("instrprof failed to lower a timestamp");
7744 case Intrinsic::instrprof_value_profile:
7745 llvm_unreachable("instrprof failed to lower a value profiling call");
7746 case Intrinsic::instrprof_mcdc_parameters:
7747 llvm_unreachable("instrprof failed to lower mcdc parameters");
7748 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7749 llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7750 case Intrinsic::localescape: {
7753
7754 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7755 // is the same on all targets.
7756 for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7757 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7758 if (isa<ConstantPointerNull>(Arg))
7759 continue; // Skip null pointers. They represent a hole in index space.
7760 AllocaInst *Slot = cast<AllocaInst>(Arg);
7761 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7762 "can only escape static allocas");
7763 int FI = FuncInfo.StaticAllocaMap[Slot];
7764 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7767 TII->get(TargetOpcode::LOCAL_ESCAPE))
7768 .addSym(FrameAllocSym)
7769 .addFrameIndex(FI);
7770 }
7771
7772 return;
7773 }
7774
7775 case Intrinsic::localrecover: {
7776 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7778
7779 // Get the symbol that defines the frame offset.
7780 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7781 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7782 unsigned IdxVal =
7783 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7784 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7786
7787 Value *FP = I.getArgOperand(1);
7788 SDValue FPVal = getValue(FP);
7789 EVT PtrVT = FPVal.getValueType();
7790
7791 // Create a MCSymbol for the label to avoid any target lowering
7792 // that would make this PC relative.
7793 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7794 SDValue OffsetVal =
7795 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7796
7797 // Add the offset to the FP.
7798 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7799 setValue(&I, Add);
7800
7801 return;
7802 }
7803
7804 case Intrinsic::fake_use: {
7805 Value *V = I.getArgOperand(0);
7806 SDValue Ops[2];
7807 // For Values not declared or previously used in this basic block, the
7808 // NodeMap will not have an entry, and `getValue` will assert if V has no
7809 // valid register value.
7810 auto FakeUseValue = [&]() -> SDValue {
7811 SDValue &N = NodeMap[V];
7812 if (N.getNode())
7813 return N;
7814
7815 // If there's a virtual register allocated and initialized for this
7816 // value, use it.
7817 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
7818 return copyFromReg;
7819 // FIXME: Do we want to preserve constants? It seems pointless.
7820 if (isa<Constant>(V))
7821 return getValue(V);
7822 return SDValue();
7823 }();
7824 if (!FakeUseValue || FakeUseValue.isUndef())
7825 return;
7826 Ops[0] = getRoot();
7827 Ops[1] = FakeUseValue;
7828 // Also, do not translate a fake use with an undef operand, or any other
7829 // empty SDValues.
7830 if (!Ops[1] || Ops[1].isUndef())
7831 return;
7832 DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops));
7833 return;
7834 }
7835
7836 case Intrinsic::eh_exceptionpointer:
7837 case Intrinsic::eh_exceptioncode: {
7838 // Get the exception pointer vreg, copy from it, and resize it to fit.
7839 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7840 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7841 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7843 SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7844 if (Intrinsic == Intrinsic::eh_exceptioncode)
7845 N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7846 setValue(&I, N);
7847 return;
7848 }
7849 case Intrinsic::xray_customevent: {
7850 // Here we want to make sure that the intrinsic behaves as if it has a
7851 // specific calling convention.
7852 const auto &Triple = DAG.getTarget().getTargetTriple();
7853 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7854 return;
7855
7857
7858 // We want to say that we always want the arguments in registers.
7859 SDValue LogEntryVal = getValue(I.getArgOperand(0));
7860 SDValue StrSizeVal = getValue(I.getArgOperand(1));
7861 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7862 SDValue Chain = getRoot();
7863 Ops.push_back(LogEntryVal);
7864 Ops.push_back(StrSizeVal);
7865 Ops.push_back(Chain);
7866
7867 // We need to enforce the calling convention for the callsite, so that
7868 // argument ordering is enforced correctly, and that register allocation can
7869 // see that some registers may be assumed clobbered and have to preserve
7870 // them across calls to the intrinsic.
7871 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7872 sdl, NodeTys, Ops);
7873 SDValue patchableNode = SDValue(MN, 0);
7874 DAG.setRoot(patchableNode);
7875 setValue(&I, patchableNode);
7876 return;
7877 }
7878 case Intrinsic::xray_typedevent: {
7879 // Here we want to make sure that the intrinsic behaves as if it has a
7880 // specific calling convention.
7881 const auto &Triple = DAG.getTarget().getTargetTriple();
7882 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7883 return;
7884
7886
7887 // We want to say that we always want the arguments in registers.
7888 // It's unclear to me how manipulating the selection DAG here forces callers
7889 // to provide arguments in registers instead of on the stack.
7890 SDValue LogTypeId = getValue(I.getArgOperand(0));
7891 SDValue LogEntryVal = getValue(I.getArgOperand(1));
7892 SDValue StrSizeVal = getValue(I.getArgOperand(2));
7893 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7894 SDValue Chain = getRoot();
7895 Ops.push_back(LogTypeId);
7896 Ops.push_back(LogEntryVal);
7897 Ops.push_back(StrSizeVal);
7898 Ops.push_back(Chain);
7899
7900 // We need to enforce the calling convention for the callsite, so that
7901 // argument ordering is enforced correctly, and that register allocation can
7902 // see that some registers may be assumed clobbered and have to preserve
7903 // them across calls to the intrinsic.
7905 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7906 SDValue patchableNode = SDValue(MN, 0);
7907 DAG.setRoot(patchableNode);
7908 setValue(&I, patchableNode);
7909 return;
7910 }
7911 case Intrinsic::experimental_deoptimize:
7913 return;
7914 case Intrinsic::stepvector:
7915 visitStepVector(I);
7916 return;
7917 case Intrinsic::vector_reduce_fadd:
7918 case Intrinsic::vector_reduce_fmul:
7919 case Intrinsic::vector_reduce_add:
7920 case Intrinsic::vector_reduce_mul:
7921 case Intrinsic::vector_reduce_and:
7922 case Intrinsic::vector_reduce_or:
7923 case Intrinsic::vector_reduce_xor:
7924 case Intrinsic::vector_reduce_smax:
7925 case Intrinsic::vector_reduce_smin:
7926 case Intrinsic::vector_reduce_umax:
7927 case Intrinsic::vector_reduce_umin:
7928 case Intrinsic::vector_reduce_fmax:
7929 case Intrinsic::vector_reduce_fmin:
7930 case Intrinsic::vector_reduce_fmaximum:
7931 case Intrinsic::vector_reduce_fminimum:
7932 visitVectorReduce(I, Intrinsic);
7933 return;
7934
7935 case Intrinsic::icall_branch_funnel: {
7937 Ops.push_back(getValue(I.getArgOperand(0)));
7938
7939 int64_t Offset;
7940 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7941 I.getArgOperand(1), Offset, DAG.getDataLayout()));
7942 if (!Base)
7944 "llvm.icall.branch.funnel operand must be a GlobalValue");
7945 Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7946
7947 struct BranchFunnelTarget {
7948 int64_t Offset;
7950 };
7952
7953 for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7954 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7955 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7956 if (ElemBase != Base)
7957 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7958 "to the same GlobalValue");
7959
7960 SDValue Val = getValue(I.getArgOperand(Op + 1));
7961 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7962 if (!GA)
7964 "llvm.icall.branch.funnel operand must be a GlobalValue");
7966 GA->getGlobal(), sdl, Val.getValueType(),
7967 GA->getOffset())});
7968 }
7969 llvm::sort(Targets,
7970 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7971 return T1.Offset < T2.Offset;
7972 });
7973
7974 for (auto &T : Targets) {
7975 Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7976 Ops.push_back(T.Target);
7977 }
7978
7979 Ops.push_back(DAG.getRoot()); // Chain
7980 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7981 MVT::Other, Ops),
7982 0);
7983 DAG.setRoot(N);
7984 setValue(&I, N);
7985 HasTailCall = true;
7986 return;
7987 }
7988
7989 case Intrinsic::wasm_landingpad_index:
7990 // Information this intrinsic contained has been transferred to
7991 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7992 // delete it now.
7993 return;
7994
7995 case Intrinsic::aarch64_settag:
7996 case Intrinsic::aarch64_settag_zero: {
7998 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
8000 DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
8001 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
8002 ZeroMemory);
8003 DAG.setRoot(Val);
8004 setValue(&I, Val);
8005 return;
8006 }
8007 case Intrinsic::amdgcn_cs_chain: {
8008 assert(I.arg_size() == 5 && "Additional args not supported yet");
8009 assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
8010 "Non-zero flags not supported yet");
8011
8012 // At this point we don't care if it's amdgpu_cs_chain or
8013 // amdgpu_cs_chain_preserve.
8015
8016 Type *RetTy = I.getType();
8017 assert(RetTy->isVoidTy() && "Should not return");
8018
8019 SDValue Callee = getValue(I.getOperand(0));
8020
8021 // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
8022 // We'll also tack the value of the EXEC mask at the end.
8024 Args.reserve(3);
8025
8026 for (unsigned Idx : {2, 3, 1}) {
8028 Arg.Node = getValue(I.getOperand(Idx));
8029 Arg.Ty = I.getOperand(Idx)->getType();
8030 Arg.setAttributes(&I, Idx);
8031 Args.push_back(Arg);
8032 }
8033
8034 assert(Args[0].IsInReg && "SGPR args should be marked inreg");
8035 assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
8036 Args[2].IsInReg = true; // EXEC should be inreg
8037
8039 CLI.setDebugLoc(getCurSDLoc())
8040 .setChain(getRoot())
8041 .setCallee(CC, RetTy, Callee, std::move(Args))
8042 .setNoReturn(true)
8043 .setTailCall(true)
8044 .setConvergent(I.isConvergent());
8045 CLI.CB = &I;
8046 std::pair<SDValue, SDValue> Result =
8047 lowerInvokable(CLI, /*EHPadBB*/ nullptr);
8048 (void)Result;
8049 assert(!Result.first.getNode() && !Result.second.getNode() &&
8050 "Should've lowered as tail call");
8051
8052 HasTailCall = true;
8053 return;
8054 }
8055 case Intrinsic::ptrmask: {
8056 SDValue Ptr = getValue(I.getOperand(0));
8057 SDValue Mask = getValue(I.getOperand(1));
8058
8059 // On arm64_32, pointers are 32 bits when stored in memory, but
8060 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to
8061 // match the index type, but the pointer is 64 bits, so the the mask must be
8062 // zero-extended up to 64 bits to match the pointer.
8063 EVT PtrVT =
8064 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8065 EVT MemVT =
8066 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8067 assert(PtrVT == Ptr.getValueType());
8068 assert(MemVT == Mask.getValueType());
8069 if (MemVT != PtrVT)
8070 Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8071
8072 setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
8073 return;
8074 }
8075 case Intrinsic::threadlocal_address: {
8076 setValue(&I, getValue(I.getOperand(0)));
8077 return;
8078 }
8079 case Intrinsic::get_active_lane_mask: {
8080 EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8081 SDValue Index = getValue(I.getOperand(0));
8082 EVT ElementVT = Index.getValueType();
8083
8084 if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
8085 visitTargetIntrinsic(I, Intrinsic);
8086 return;
8087 }
8088
8089 SDValue TripCount = getValue(I.getOperand(1));
8090 EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
8091 CCVT.getVectorElementCount());
8092
8093 SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
8094 SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
8095 SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
8096 SDValue VectorInduction = DAG.getNode(
8097 ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
8098 SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
8099 VectorTripCount, ISD::CondCode::SETULT);
8100 setValue(&I, SetCC);
8101 return;
8102 }
8103 case Intrinsic::experimental_get_vector_length: {
8104 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
8105 "Expected positive VF");
8106 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
8107 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8108
8109 SDValue Count = getValue(I.getOperand(0));
8110 EVT CountVT = Count.getValueType();
8111
8112 if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
8113 visitTargetIntrinsic(I, Intrinsic);
8114 return;
8115 }
8116
8117 // Expand to a umin between the trip count and the maximum elements the type
8118 // can hold.
8119 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8120
8121 // Extend the trip count to at least the result VT.
8122 if (CountVT.bitsLT(VT)) {
8123 Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
8124 CountVT = VT;
8125 }
8126
8127 SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
8128 ElementCount::get(VF, IsScalable));
8129
8130 SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
8131 // Clip to the result type if needed.
8132 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
8133
8134 setValue(&I, Trunc);
8135 return;
8136 }
8137 case Intrinsic::experimental_vector_partial_reduce_add: {
8138
8139 if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) {
8140 visitTargetIntrinsic(I, Intrinsic);
8141 return;
8142 }
8143
8144 setValue(&I, DAG.getPartialReduceAdd(sdl, EVT::getEVT(I.getType()),
8145 getValue(I.getOperand(0)),
8146 getValue(I.getOperand(1))));
8147 return;
8148 }
8149 case Intrinsic::experimental_cttz_elts: {
8150 auto DL = getCurSDLoc();
8151 SDValue Op = getValue(I.getOperand(0));
8152 EVT OpVT = Op.getValueType();
8153
8154 if (!TLI.shouldExpandCttzElements(OpVT)) {
8155 visitTargetIntrinsic(I, Intrinsic);
8156 return;
8157 }
8158
8159 if (OpVT.getScalarType() != MVT::i1) {
8160 // Compare the input vector elements to zero & use to count trailing zeros
8161 SDValue AllZero = DAG.getConstant(0, DL, OpVT);
8162 OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
8163 OpVT.getVectorElementCount());
8164 Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
8165 }
8166
8167 // If the zero-is-poison flag is set, we can assume the upper limit
8168 // of the result is VF-1.
8169 bool ZeroIsPoison =
8170 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8171 ConstantRange VScaleRange(1, true); // Dummy value.
8172 if (isa<ScalableVectorType>(I.getOperand(0)->getType()))
8173 VScaleRange = getVScaleRange(I.getCaller(), 64);
8174 unsigned EltWidth = TLI.getBitWidthForCttzElements(
8175 I.getType(), OpVT.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
8176
8177 MVT NewEltTy = MVT::getIntegerVT(EltWidth);
8178
8179 // Create the new vector type & get the vector length
8180 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
8181 OpVT.getVectorElementCount());
8182
8183 SDValue VL =
8184 DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
8185
8186 SDValue StepVec = DAG.getStepVector(DL, NewVT);
8187 SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
8188 SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
8190 SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
8192 SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
8193
8194 EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
8196
8197 setValue(&I, Ret);
8198 return;
8199 }
8200 case Intrinsic::vector_insert: {
8201 SDValue Vec = getValue(I.getOperand(0));
8202 SDValue SubVec = getValue(I.getOperand(1));
8203 SDValue Index = getValue(I.getOperand(2));
8204
8205 // The intrinsic's index type is i64, but the SDNode requires an index type
8206 // suitable for the target. Convert the index as required.
8207 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8208 if (Index.getValueType() != VectorIdxTy)
8209 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8210
8211 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8212 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
8213 Index));
8214 return;
8215 }
8216 case Intrinsic::vector_extract: {
8217 SDValue Vec = getValue(I.getOperand(0));
8218 SDValue Index = getValue(I.getOperand(1));
8219 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8220
8221 // The intrinsic's index type is i64, but the SDNode requires an index type
8222 // suitable for the target. Convert the index as required.
8223 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8224 if (Index.getValueType() != VectorIdxTy)
8225 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8226
8227 setValue(&I,
8228 DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
8229 return;
8230 }
8231 case Intrinsic::experimental_vector_match: {
8232 SDValue Op1 = getValue(I.getOperand(0));
8233 SDValue Op2 = getValue(I.getOperand(1));
8234 SDValue Mask = getValue(I.getOperand(2));
8235 EVT Op1VT = Op1.getValueType();
8236 EVT Op2VT = Op2.getValueType();
8237 EVT ResVT = Mask.getValueType();
8238 unsigned SearchSize = Op2VT.getVectorNumElements();
8239
8240 // If the target has native support for this vector match operation, lower
8241 // the intrinsic untouched; otherwise, expand it below.
8242 if (!TLI.shouldExpandVectorMatch(Op1VT, SearchSize)) {
8243 visitTargetIntrinsic(I, Intrinsic);
8244 return;
8245 }
8246
8247 SDValue Ret = DAG.getConstant(0, sdl, ResVT);
8248
8249 for (unsigned i = 0; i < SearchSize; ++i) {
8251 Op2VT.getVectorElementType(), Op2,
8252 DAG.getVectorIdxConstant(i, sdl));
8253 SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, sdl, Op1VT, Op2Elem);
8254 SDValue Cmp = DAG.getSetCC(sdl, ResVT, Op1, Splat, ISD::SETEQ);
8255 Ret = DAG.getNode(ISD::OR, sdl, ResVT, Ret, Cmp);
8256 }
8257
8258 setValue(&I, DAG.getNode(ISD::AND, sdl, ResVT, Ret, Mask));
8259 return;
8260 }
8261 case Intrinsic::vector_reverse:
8262 visitVectorReverse(I);
8263 return;
8264 case Intrinsic::vector_splice:
8265 visitVectorSplice(I);
8266 return;
8267 case Intrinsic::callbr_landingpad:
8268 visitCallBrLandingPad(I);
8269 return;
8270 case Intrinsic::vector_interleave2:
8271 visitVectorInterleave(I);
8272 return;
8273 case Intrinsic::vector_deinterleave2:
8274 visitVectorDeinterleave(I);
8275 return;
8276 case Intrinsic::experimental_vector_compress:
8278 getValue(I.getArgOperand(0)).getValueType(),
8279 getValue(I.getArgOperand(0)),
8280 getValue(I.getArgOperand(1)),
8281 getValue(I.getArgOperand(2)), Flags));
8282 return;
8283 case Intrinsic::experimental_convergence_anchor:
8284 case Intrinsic::experimental_convergence_entry:
8285 case Intrinsic::experimental_convergence_loop:
8286 visitConvergenceControl(I, Intrinsic);
8287 return;
8288 case Intrinsic::experimental_vector_histogram_add: {
8289 visitVectorHistogram(I, Intrinsic);
8290 return;
8291 }
8292 case Intrinsic::experimental_vector_extract_last_active: {
8293 visitVectorExtractLastActive(I, Intrinsic);
8294 return;
8295 }
8296 }
8297}
8298
8299void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8300 const ConstrainedFPIntrinsic &FPI) {
8301 SDLoc sdl = getCurSDLoc();
8302
8303 // We do not need to serialize constrained FP intrinsics against
8304 // each other or against (nonvolatile) loads, so they can be
8305 // chained like loads.
8306 SDValue Chain = DAG.getRoot();
8308 Opers.push_back(Chain);
8309 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
8310 Opers.push_back(getValue(FPI.getArgOperand(I)));
8311
8312 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
8313 assert(Result.getNode()->getNumValues() == 2);
8314
8315 // Push node to the appropriate list so that future instructions can be
8316 // chained up correctly.
8317 SDValue OutChain = Result.getValue(1);
8318 switch (EB) {
8320 // The only reason why ebIgnore nodes still need to be chained is that
8321 // they might depend on the current rounding mode, and therefore must
8322 // not be moved across instruction that may change that mode.
8323 [[fallthrough]];
8325 // These must not be moved across calls or instructions that may change
8326 // floating-point exception masks.
8327 PendingConstrainedFP.push_back(OutChain);
8328 break;
8330 // These must not be moved across calls or instructions that may change
8331 // floating-point exception masks or read floating-point exception flags.
8332 // In addition, they cannot be optimized out even if unused.
8333 PendingConstrainedFPStrict.push_back(OutChain);
8334 break;
8335 }
8336 };
8337
8339 EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
8340 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
8342
8345 Flags.setNoFPExcept(true);
8346
8347 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8348 Flags.copyFMF(*FPOp);
8349
8350 unsigned Opcode;
8351 switch (FPI.getIntrinsicID()) {
8352 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
8353#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8354 case Intrinsic::INTRINSIC: \
8355 Opcode = ISD::STRICT_##DAGN; \
8356 break;
8357#include "llvm/IR/ConstrainedOps.def"
8358 case Intrinsic::experimental_constrained_fmuladd: {
8359 Opcode = ISD::STRICT_FMA;
8360 // Break fmuladd into fmul and fadd.
8363 Opers.pop_back();
8364 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
8365 pushOutChain(Mul, EB);
8366 Opcode = ISD::STRICT_FADD;
8367 Opers.clear();
8368 Opers.push_back(Mul.getValue(1));
8369 Opers.push_back(Mul.getValue(0));
8370 Opers.push_back(getValue(FPI.getArgOperand(2)));
8371 }
8372 break;
8373 }
8374 }
8375
8376 // A few strict DAG nodes carry additional operands that are not
8377 // set up by the default code above.
8378 switch (Opcode) {
8379 default: break;
8381 Opers.push_back(
8383 break;
8384 case ISD::STRICT_FSETCC:
8385 case ISD::STRICT_FSETCCS: {
8386 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8387 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8388 if (TM.Options.NoNaNsFPMath)
8389 Condition = getFCmpCodeWithoutNaN(Condition);
8390 Opers.push_back(DAG.getCondCode(Condition));
8391 break;
8392 }
8393 }
8394
8395 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
8396 pushOutChain(Result, EB);
8397
8398 SDValue FPResult = Result.getValue(0);
8399 setValue(&FPI, FPResult);
8400}
8401
8402static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
8403 std::optional<unsigned> ResOPC;
8404 switch (VPIntrin.getIntrinsicID()) {
8405 case Intrinsic::vp_ctlz: {
8406 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8407 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8408 break;
8409 }
8410 case Intrinsic::vp_cttz: {
8411 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8412 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8413 break;
8414 }
8415 case Intrinsic::vp_cttz_elts: {
8416 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8417 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8418 break;
8419 }
8420#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8421 case Intrinsic::VPID: \
8422 ResOPC = ISD::VPSD; \
8423 break;
8424#include "llvm/IR/VPIntrinsics.def"
8425 }
8426
8427 if (!ResOPC)
8429 "Inconsistency: no SDNode available for this VPIntrinsic!");
8430
8431 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8432 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8433 if (VPIntrin.getFastMathFlags().allowReassoc())
8434 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8435 : ISD::VP_REDUCE_FMUL;
8436 }
8437
8438 return *ResOPC;
8439}
8440
8441void SelectionDAGBuilder::visitVPLoad(
8442 const VPIntrinsic &VPIntrin, EVT VT,
8443 const SmallVectorImpl<SDValue> &OpValues) {
8444 SDLoc DL = getCurSDLoc();
8445 Value *PtrOperand = VPIntrin.getArgOperand(0);
8446 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8447 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8448 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8449 SDValue LD;
8450 // Do not serialize variable-length loads of constant memory with
8451 // anything.
8452 if (!Alignment)
8453 Alignment = DAG.getEVTAlign(VT);
8454 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8455 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8456 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8459 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8460 LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8461 MMO, false /*IsExpanding */);
8462 if (AddToChain)
8463 PendingLoads.push_back(LD.getValue(1));
8464 setValue(&VPIntrin, LD);
8465}
8466
8467void SelectionDAGBuilder::visitVPGather(
8468 const VPIntrinsic &VPIntrin, EVT VT,
8469 const SmallVectorImpl<SDValue> &OpValues) {
8470 SDLoc DL = getCurSDLoc();
8472 Value *PtrOperand = VPIntrin.getArgOperand(0);
8473 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8474 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8475 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8476 SDValue LD;
8477 if (!Alignment)
8478 Alignment = DAG.getEVTAlign(VT.getScalarType());
8479 unsigned AS =
8480 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8483 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8484 SDValue Base, Index, Scale;
8485 ISD::MemIndexType IndexType;
8486 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8487 this, VPIntrin.getParent(),
8488 VT.getScalarStoreSize());
8489 if (!UniformBase) {
8491 Index = getValue(PtrOperand);
8492 IndexType = ISD::SIGNED_SCALED;
8494 }
8495 EVT IdxVT = Index.getValueType();
8496 EVT EltTy = IdxVT.getVectorElementType();
8497 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8498 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8499 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8500 }
8501 LD = DAG.getGatherVP(
8502 DAG.getVTList(VT, MVT::Other), VT, DL,
8503 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8504 IndexType);
8505 PendingLoads.push_back(LD.getValue(1));
8506 setValue(&VPIntrin, LD);
8507}
8508
8509void SelectionDAGBuilder::visitVPStore(
8510 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8511 SDLoc DL = getCurSDLoc();
8512 Value *PtrOperand = VPIntrin.getArgOperand(1);
8513 EVT VT = OpValues[0].getValueType();
8514 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8515 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8516 SDValue ST;
8517 if (!Alignment)
8518 Alignment = DAG.getEVTAlign(VT);
8519 SDValue Ptr = OpValues[1];
8520 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
8523 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8524 ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
8525 OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
8526 /* IsTruncating */ false, /*IsCompressing*/ false);
8527 DAG.setRoot(ST);
8528 setValue(&VPIntrin, ST);
8529}
8530
8531void SelectionDAGBuilder::visitVPScatter(
8532 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8533 SDLoc DL = getCurSDLoc();
8535 Value *PtrOperand = VPIntrin.getArgOperand(1);
8536 EVT VT = OpValues[0].getValueType();
8537 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8538 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8539 SDValue ST;
8540 if (!Alignment)
8541 Alignment = DAG.getEVTAlign(VT.getScalarType());
8542 unsigned AS =
8543 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8546 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8547 SDValue Base, Index, Scale;
8548 ISD::MemIndexType IndexType;
8549 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8550 this, VPIntrin.getParent(),
8551 VT.getScalarStoreSize());
8552 if (!UniformBase) {
8554 Index = getValue(PtrOperand);
8555 IndexType = ISD::SIGNED_SCALED;
8556 Scale =
8558 }
8559 EVT IdxVT = Index.getValueType();
8560 EVT EltTy = IdxVT.getVectorElementType();
8561 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8562 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8563 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8564 }
8565 ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8566 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8567 OpValues[2], OpValues[3]},
8568 MMO, IndexType);
8569 DAG.setRoot(ST);
8570 setValue(&VPIntrin, ST);
8571}
8572
8573void SelectionDAGBuilder::visitVPStridedLoad(
8574 const VPIntrinsic &VPIntrin, EVT VT,
8575 const SmallVectorImpl<SDValue> &OpValues) {
8576 SDLoc DL = getCurSDLoc();
8577 Value *PtrOperand = VPIntrin.getArgOperand(0);
8578 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8579 if (!Alignment)
8580 Alignment = DAG.getEVTAlign(VT.getScalarType());
8581 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8582 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8583 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8584 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8585 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8586 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8589 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8590
8591 SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8592 OpValues[2], OpValues[3], MMO,
8593 false /*IsExpanding*/);
8594
8595 if (AddToChain)
8596 PendingLoads.push_back(LD.getValue(1));
8597 setValue(&VPIntrin, LD);
8598}
8599
8600void SelectionDAGBuilder::visitVPStridedStore(
8601 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8602 SDLoc DL = getCurSDLoc();
8603 Value *PtrOperand = VPIntrin.getArgOperand(1);
8604 EVT VT = OpValues[0].getValueType();
8605 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8606 if (!Alignment)
8607 Alignment = DAG.getEVTAlign(VT.getScalarType());
8608 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8609 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8612 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8613
8615 getMemoryRoot(), DL, OpValues[0], OpValues[1],
8616 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8617 OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8618 /*IsCompressing*/ false);
8619
8620 DAG.setRoot(ST);
8621 setValue(&VPIntrin, ST);
8622}
8623
8624void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8626 SDLoc DL = getCurSDLoc();
8627
8628 ISD::CondCode Condition;
8630 bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8631 if (IsFP) {
8632 // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8633 // flags, but calls that don't return floating-point types can't be
8634 // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8635 Condition = getFCmpCondCode(CondCode);
8636 if (TM.Options.NoNaNsFPMath)
8637 Condition = getFCmpCodeWithoutNaN(Condition);
8638 } else {
8639 Condition = getICmpCondCode(CondCode);
8640 }
8641
8642 SDValue Op1 = getValue(VPIntrin.getOperand(0));
8643 SDValue Op2 = getValue(VPIntrin.getOperand(1));
8644 // #2 is the condition code
8645 SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8646 SDValue EVL = getValue(VPIntrin.getOperand(4));
8647 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8648 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8649 "Unexpected target EVL type");
8650 EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8651
8653 VPIntrin.getType());
8654 setValue(&VPIntrin,
8655 DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8656}
8657
8658void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8659 const VPIntrinsic &VPIntrin) {
8660 SDLoc DL = getCurSDLoc();
8661 unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8662
8663 auto IID = VPIntrin.getIntrinsicID();
8664
8665 if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8666 return visitVPCmp(*CmpI);
8667
8668 SmallVector<EVT, 4> ValueVTs;
8670 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8671 SDVTList VTs = DAG.getVTList(ValueVTs);
8672
8673 auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8674
8675 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8676 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8677 "Unexpected target EVL type");
8678
8679 // Request operands.
8680 SmallVector<SDValue, 7> OpValues;
8681 for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8682 auto Op = getValue(VPIntrin.getArgOperand(I));
8683 if (I == EVLParamPos)
8684 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8685 OpValues.push_back(Op);
8686 }
8687
8688 switch (Opcode) {
8689 default: {
8690 SDNodeFlags SDFlags;
8691 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8692 SDFlags.copyFMF(*FPMO);
8693 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8694 setValue(&VPIntrin, Result);
8695 break;
8696 }
8697 case ISD::VP_LOAD:
8698 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8699 break;
8700 case ISD::VP_GATHER:
8701 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8702 break;
8703 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8704 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8705 break;
8706 case ISD::VP_STORE:
8707 visitVPStore(VPIntrin, OpValues);
8708 break;
8709 case ISD::VP_SCATTER:
8710 visitVPScatter(VPIntrin, OpValues);
8711 break;
8712 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8713 visitVPStridedStore(VPIntrin, OpValues);
8714 break;
8715 case ISD::VP_FMULADD: {
8716 assert(OpValues.size() == 5 && "Unexpected number of operands");
8717 SDNodeFlags SDFlags;
8718 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8719 SDFlags.copyFMF(*FPMO);
8722 setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8723 } else {
8725 ISD::VP_FMUL, DL, VTs,
8726 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8727 SDValue Add =
8728 DAG.getNode(ISD::VP_FADD, DL, VTs,
8729 {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8730 setValue(&VPIntrin, Add);
8731 }
8732 break;
8733 }
8734 case ISD::VP_IS_FPCLASS: {
8735 const DataLayout DLayout = DAG.getDataLayout();
8736 EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8737 auto Constant = OpValues[1]->getAsZExtVal();
8739 SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8740 {OpValues[0], Check, OpValues[2], OpValues[3]});
8741 setValue(&VPIntrin, V);
8742 return;
8743 }
8744 case ISD::VP_INTTOPTR: {
8745 SDValue N = OpValues[0];
8746 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8747 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8748 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8749 OpValues[2]);
8750 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8751 OpValues[2]);
8752 setValue(&VPIntrin, N);
8753 break;
8754 }
8755 case ISD::VP_PTRTOINT: {
8756 SDValue N = OpValues[0];
8758 VPIntrin.getType());
8759 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8760 VPIntrin.getOperand(0)->getType());
8761 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8762 OpValues[2]);
8763 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8764 OpValues[2]);
8765 setValue(&VPIntrin, N);
8766 break;
8767 }
8768 case ISD::VP_ABS:
8769 case ISD::VP_CTLZ:
8770 case ISD::VP_CTLZ_ZERO_UNDEF:
8771 case ISD::VP_CTTZ:
8772 case ISD::VP_CTTZ_ZERO_UNDEF:
8773 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8774 case ISD::VP_CTTZ_ELTS: {
8775 SDValue Result =
8776 DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8777 setValue(&VPIntrin, Result);
8778 break;
8779 }
8780 }
8781}
8782
8783SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8784 const BasicBlock *EHPadBB,
8785 MCSymbol *&BeginLabel) {
8787
8788 // Insert a label before the invoke call to mark the try range. This can be
8789 // used to detect deletion of the invoke via the MachineModuleInfo.
8790 BeginLabel = MF.getContext().createTempSymbol();
8791
8792 // For SjLj, keep track of which landing pads go with which invokes
8793 // so as to maintain the ordering of pads in the LSDA.
8794 unsigned CallSiteIndex = FuncInfo.getCurrentCallSite();
8795 if (CallSiteIndex) {
8796 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8797 LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex);
8798
8799 // Now that the call site is handled, stop tracking it.
8801 }
8802
8803 return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8804}
8805
8806SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8807 const BasicBlock *EHPadBB,
8808 MCSymbol *BeginLabel) {
8809 assert(BeginLabel && "BeginLabel should've been set");
8810
8812
8813 // Insert a label at the end of the invoke call to mark the try range. This
8814 // can be used to detect deletion of the invoke via the MachineModuleInfo.
8815 MCSymbol *EndLabel = MF.getContext().createTempSymbol();
8816 Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8817
8818 // Inform MachineModuleInfo of range.
8820 // There is a platform (e.g. wasm) that uses funclet style IR but does not
8821 // actually use outlined funclets and their LSDA info style.
8822 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8823 assert(II && "II should've been set");
8824 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8825 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8826 } else if (!isScopedEHPersonality(Pers)) {
8827 assert(EHPadBB);
8828 MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel);
8829 }
8830
8831 return Chain;
8832}
8833
8834std::pair<SDValue, SDValue>
8836 const BasicBlock *EHPadBB) {
8837 MCSymbol *BeginLabel = nullptr;
8838
8839 if (EHPadBB) {
8840 // Both PendingLoads and PendingExports must be flushed here;
8841 // this call might not return.
8842 (void)getRoot();
8843 DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8844 CLI.setChain(getRoot());
8845 }
8846
8848 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8849
8850 assert((CLI.IsTailCall || Result.second.getNode()) &&
8851 "Non-null chain expected with non-tail call!");
8852 assert((Result.second.getNode() || !Result.first.getNode()) &&
8853 "Null value expected with tail call!");
8854
8855 if (!Result.second.getNode()) {
8856 // As a special case, a null chain means that a tail call has been emitted
8857 // and the DAG root is already updated.
8858 HasTailCall = true;
8859
8860 // Since there's no actual continuation from this block, nothing can be
8861 // relying on us setting vregs for them.
8862 PendingExports.clear();
8863 } else {
8864 DAG.setRoot(Result.second);
8865 }
8866
8867 if (EHPadBB) {
8868 DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8869 BeginLabel));
8870 Result.second = getRoot();
8871 }
8872
8873 return Result;
8874}
8875
8877 bool isTailCall, bool isMustTailCall,
8878 const BasicBlock *EHPadBB,
8879 const TargetLowering::PtrAuthInfo *PAI) {
8880 auto &DL = DAG.getDataLayout();
8881 FunctionType *FTy = CB.getFunctionType();
8882 Type *RetTy = CB.getType();
8883
8885 Args.reserve(CB.arg_size());
8886
8887 const Value *SwiftErrorVal = nullptr;
8889
8890 if (isTailCall) {
8891 // Avoid emitting tail calls in functions with the disable-tail-calls
8892 // attribute.
8893 auto *Caller = CB.getParent()->getParent();
8894 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8895 "true" && !isMustTailCall)
8896 isTailCall = false;
8897
8898 // We can't tail call inside a function with a swifterror argument. Lowering
8899 // does not support this yet. It would have to move into the swifterror
8900 // register before the call.
8901 if (TLI.supportSwiftError() &&
8902 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8903 isTailCall = false;
8904 }
8905
8906 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8908 const Value *V = *I;
8909
8910 // Skip empty types
8911 if (V->getType()->isEmptyTy())
8912 continue;
8913
8914 SDValue ArgNode = getValue(V);
8915 Entry.Node = ArgNode; Entry.Ty = V->getType();
8916
8917 Entry.setAttributes(&CB, I - CB.arg_begin());
8918
8919 // Use swifterror virtual register as input to the call.
8920 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8921 SwiftErrorVal = V;
8922 // We find the virtual register for the actual swifterror argument.
8923 // Instead of using the Value, we use the virtual register instead.
8924 Entry.Node =
8926 EVT(TLI.getPointerTy(DL)));
8927 }
8928
8929 Args.push_back(Entry);
8930
8931 // If we have an explicit sret argument that is an Instruction, (i.e., it
8932 // might point to function-local memory), we can't meaningfully tail-call.
8933 if (Entry.IsSRet && isa<Instruction>(V))
8934 isTailCall = false;
8935 }
8936
8937 // If call site has a cfguardtarget operand bundle, create and add an
8938 // additional ArgListEntry.
8939 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8941 Value *V = Bundle->Inputs[0];
8942 SDValue ArgNode = getValue(V);
8943 Entry.Node = ArgNode;
8944 Entry.Ty = V->getType();
8945 Entry.IsCFGuardTarget = true;
8946 Args.push_back(Entry);
8947 }
8948
8949 // Check if target-independent constraints permit a tail call here.
8950 // Target-dependent constraints are checked within TLI->LowerCallTo.
8951 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8952 isTailCall = false;
8953
8954 // Disable tail calls if there is an swifterror argument. Targets have not
8955 // been updated to support tail calls.
8956 if (TLI.supportSwiftError() && SwiftErrorVal)
8957 isTailCall = false;
8958
8959 ConstantInt *CFIType = nullptr;
8960 if (CB.isIndirectCall()) {
8961 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8962 if (!TLI.supportKCFIBundles())
8964 "Target doesn't support calls with kcfi operand bundles.");
8965 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8966 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8967 }
8968 }
8969
8970 SDValue ConvControlToken;
8971 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
8972 auto *Token = Bundle->Inputs[0].get();
8973 ConvControlToken = getValue(Token);
8974 }
8975
8978 .setChain(getRoot())
8979 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8980 .setTailCall(isTailCall)
8984 .setCFIType(CFIType)
8985 .setConvergenceControlToken(ConvControlToken);
8986
8987 // Set the pointer authentication info if we have it.
8988 if (PAI) {
8989 if (!TLI.supportPtrAuthBundles())
8991 "This target doesn't support calls with ptrauth operand bundles.");
8992 CLI.setPtrAuth(*PAI);
8993 }
8994
8995 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8996
8997 if (Result.first.getNode()) {
8998 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8999 setValue(&CB, Result.first);
9000 }
9001
9002 // The last element of CLI.InVals has the SDValue for swifterror return.
9003 // Here we copy it to a virtual register and update SwiftErrorMap for
9004 // book-keeping.
9005 if (SwiftErrorVal && TLI.supportSwiftError()) {
9006 // Get the last element of InVals.
9007 SDValue Src = CLI.InVals.back();
9008 Register VReg =
9009 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
9010 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
9011 DAG.setRoot(CopyNode);
9012 }
9013}
9014
9015static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
9016 SelectionDAGBuilder &Builder) {
9017 // Check to see if this load can be trivially constant folded, e.g. if the
9018 // input is from a string literal.
9019 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
9020 // Cast pointer to the type we really want to load.
9021 Type *LoadTy =
9022 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
9023 if (LoadVT.isVector())
9024 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
9025
9026 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
9027 PointerType::getUnqual(LoadTy));
9028
9029 if (const Constant *LoadCst =
9030 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
9031 LoadTy, Builder.DAG.getDataLayout()))
9032 return Builder.getValue(LoadCst);
9033 }
9034
9035 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
9036 // still constant memory, the input chain can be the entry node.
9037 SDValue Root;
9038 bool ConstantMemory = false;
9039
9040 // Do not serialize (non-volatile) loads of constant memory with anything.
9041 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
9042 Root = Builder.DAG.getEntryNode();
9043 ConstantMemory = true;
9044 } else {
9045 // Do not serialize non-volatile loads against each other.
9046 Root = Builder.DAG.getRoot();
9047 }
9048
9049 SDValue Ptr = Builder.getValue(PtrVal);
9050 SDValue LoadVal =
9051 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9052 MachinePointerInfo(PtrVal), Align(1));
9053
9054 if (!ConstantMemory)
9055 Builder.PendingLoads.push_back(LoadVal.getValue(1));
9056 return LoadVal;
9057}
9058
9059/// Record the value for an instruction that produces an integer result,
9060/// converting the type where necessary.
9061void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
9062 SDValue Value,
9063 bool IsSigned) {
9065 I.getType(), true);
9066 Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
9067 setValue(&I, Value);
9068}
9069
9070/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
9071/// true and lower it. Otherwise return false, and it will be lowered like a
9072/// normal call.
9073/// The caller already checked that \p I calls the appropriate LibFunc with a
9074/// correct prototype.
9075bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
9076 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
9077 const Value *Size = I.getArgOperand(2);
9078 const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
9079 if (CSize && CSize->getZExtValue() == 0) {
9081 I.getType(), true);
9082 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
9083 return true;
9084 }
9085
9087 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
9088 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
9090 if (Res.first.getNode()) {
9091 processIntegerCallValue(I, Res.first, true);
9092 PendingLoads.push_back(Res.second);
9093 return true;
9094 }
9095
9096 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
9097 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
9098 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
9099 return false;
9100
9101 // If the target has a fast compare for the given size, it will return a
9102 // preferred load type for that size. Require that the load VT is legal and
9103 // that the target supports unaligned loads of that type. Otherwise, return
9104 // INVALID.
9105 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
9107 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
9108 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
9109 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
9110 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
9111 // TODO: Check alignment of src and dest ptrs.
9112 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
9113 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
9114 if (!TLI.isTypeLegal(LVT) ||
9115 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
9116 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
9118 }
9119
9120 return LVT;
9121 };
9122
9123 // This turns into unaligned loads. We only do this if the target natively
9124 // supports the MVT we'll be loading or if it is small enough (<= 4) that
9125 // we'll only produce a small number of byte loads.
9126 MVT LoadVT;
9127 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
9128 switch (NumBitsToCompare) {
9129 default:
9130 return false;
9131 case 16:
9132 LoadVT = MVT::i16;
9133 break;
9134 case 32:
9135 LoadVT = MVT::i32;
9136 break;
9137 case 64:
9138 case 128:
9139 case 256:
9140 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9141 break;
9142 }
9143
9144 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
9145 return false;
9146
9147 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
9148 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
9149
9150 // Bitcast to a wide integer type if the loads are vectors.
9151 if (LoadVT.isVector()) {
9152 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9153 LoadL = DAG.getBitcast(CmpVT, LoadL);
9154 LoadR = DAG.getBitcast(CmpVT, LoadR);
9155 }
9156
9157 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
9158 processIntegerCallValue(I, Cmp, false);
9159 return true;
9160}
9161
9162/// See if we can lower a memchr call into an optimized form. If so, return
9163/// true and lower it. Otherwise return false, and it will be lowered like a
9164/// normal call.
9165/// The caller already checked that \p I calls the appropriate LibFunc with a
9166/// correct prototype.
9167bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
9168 const Value *Src = I.getArgOperand(0);
9169 const Value *Char = I.getArgOperand(1);
9170 const Value *Length = I.getArgOperand(2);
9171
9173 std::pair<SDValue, SDValue> Res =
9175 getValue(Src), getValue(Char), getValue(Length),
9176 MachinePointerInfo(Src));
9177 if (Res.first.getNode()) {
9178 setValue(&I, Res.first);
9179 PendingLoads.push_back(Res.second);
9180 return true;
9181 }
9182
9183 return false;
9184}
9185
9186/// See if we can lower a mempcpy call into an optimized form. If so, return
9187/// true and lower it. Otherwise return false, and it will be lowered like a
9188/// normal call.
9189/// The caller already checked that \p I calls the appropriate LibFunc with a
9190/// correct prototype.
9191bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
9192 SDValue Dst = getValue(I.getArgOperand(0));
9193 SDValue Src = getValue(I.getArgOperand(1));
9194 SDValue Size = getValue(I.getArgOperand(2));
9195
9196 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
9197 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
9198 // DAG::getMemcpy needs Alignment to be defined.
9199 Align Alignment = std::min(DstAlign, SrcAlign);
9200
9201 SDLoc sdl = getCurSDLoc();
9202
9203 // In the mempcpy context we need to pass in a false value for isTailCall
9204 // because the return pointer needs to be adjusted by the size of
9205 // the copied memory.
9206 SDValue Root = getMemoryRoot();
9207 SDValue MC = DAG.getMemcpy(
9208 Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr,
9209 std::nullopt, MachinePointerInfo(I.getArgOperand(0)),
9210 MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata());
9211 assert(MC.getNode() != nullptr &&
9212 "** memcpy should not be lowered as TailCall in mempcpy context **");
9213 DAG.setRoot(MC);
9214
9215 // Check if Size needs to be truncated or extended.
9216 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
9217
9218 // Adjust return pointer to point just past the last dst byte.
9219 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
9220 Dst, Size);
9221 setValue(&I, DstPlusSize);
9222 return true;
9223}
9224
9225/// See if we can lower a strcpy call into an optimized form. If so, return
9226/// true and lower it, otherwise return false and it will be lowered like a
9227/// normal call.
9228/// The caller already checked that \p I calls the appropriate LibFunc with a
9229/// correct prototype.
9230bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
9231 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9232
9234 std::pair<SDValue, SDValue> Res =
9236 getValue(Arg0), getValue(Arg1),
9237 MachinePointerInfo(Arg0),
9238 MachinePointerInfo(Arg1), isStpcpy);
9239 if (Res.first.getNode()) {
9240 setValue(&I, Res.first);
9241 DAG.setRoot(Res.second);
9242 return true;
9243 }
9244
9245 return false;
9246}
9247
9248/// See if we can lower a strcmp call into an optimized form. If so, return
9249/// true and lower it, otherwise return false and it will be lowered like a
9250/// normal call.
9251/// The caller already checked that \p I calls the appropriate LibFunc with a
9252/// correct prototype.
9253bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
9254 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9255
9257 std::pair<SDValue, SDValue> Res =
9259 getValue(Arg0), getValue(Arg1),
9260 MachinePointerInfo(Arg0),
9261 MachinePointerInfo(Arg1));
9262 if (Res.first.getNode()) {
9263 processIntegerCallValue(I, Res.first, true);
9264 PendingLoads.push_back(Res.second);
9265 return true;
9266 }
9267
9268 return false;
9269}
9270
9271/// See if we can lower a strlen call into an optimized form. If so, return
9272/// true and lower it, otherwise return false and it will be lowered like a
9273/// normal call.
9274/// The caller already checked that \p I calls the appropriate LibFunc with a
9275/// correct prototype.
9276bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
9277 const Value *Arg0 = I.getArgOperand(0);
9278
9280 std::pair<SDValue, SDValue> Res =
9282 getValue(Arg0), MachinePointerInfo(Arg0));
9283 if (Res.first.getNode()) {
9284 processIntegerCallValue(I, Res.first, false);
9285 PendingLoads.push_back(Res.second);
9286 return true;
9287 }
9288
9289 return false;
9290}
9291
9292/// See if we can lower a strnlen call into an optimized form. If so, return
9293/// true and lower it, otherwise return false and it will be lowered like a
9294/// normal call.
9295/// The caller already checked that \p I calls the appropriate LibFunc with a
9296/// correct prototype.
9297bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
9298 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9299
9301 std::pair<SDValue, SDValue> Res =
9303 getValue(Arg0), getValue(Arg1),
9304 MachinePointerInfo(Arg0));
9305 if (Res.first.getNode()) {
9306 processIntegerCallValue(I, Res.first, false);
9307 PendingLoads.push_back(Res.second);
9308 return true;
9309 }
9310
9311 return false;
9312}
9313
9314/// See if we can lower a unary floating-point operation into an SDNode with
9315/// the specified Opcode. If so, return true and lower it, otherwise return
9316/// false and it will be lowered like a normal call.
9317/// The caller already checked that \p I calls the appropriate LibFunc with a
9318/// correct prototype.
9319bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
9320 unsigned Opcode) {
9321 // We already checked this call's prototype; verify it doesn't modify errno.
9322 if (!I.onlyReadsMemory())
9323 return false;
9324
9326 Flags.copyFMF(cast<FPMathOperator>(I));
9327
9328 SDValue Tmp = getValue(I.getArgOperand(0));
9329 setValue(&I,
9330 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
9331 return true;
9332}
9333
9334/// See if we can lower a binary floating-point operation into an SDNode with
9335/// the specified Opcode. If so, return true and lower it. Otherwise return
9336/// false, and it will be lowered like a normal call.
9337/// The caller already checked that \p I calls the appropriate LibFunc with a
9338/// correct prototype.
9339bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
9340 unsigned Opcode) {
9341 // We already checked this call's prototype; verify it doesn't modify errno.
9342 if (!I.onlyReadsMemory())
9343 return false;
9344
9346 Flags.copyFMF(cast<FPMathOperator>(I));
9347
9348 SDValue Tmp0 = getValue(I.getArgOperand(0));
9349 SDValue Tmp1 = getValue(I.getArgOperand(1));
9350 EVT VT = Tmp0.getValueType();
9351 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
9352 return true;
9353}
9354
9355void SelectionDAGBuilder::visitCall(const CallInst &I) {
9356 // Handle inline assembly differently.
9357 if (I.isInlineAsm()) {
9358 visitInlineAsm(I);
9359 return;
9360 }
9361
9363
9364 if (Function *F = I.getCalledFunction()) {
9365 if (F->isDeclaration()) {
9366 // Is this an LLVM intrinsic or a target-specific intrinsic?
9367 unsigned IID = F->getIntrinsicID();
9368 if (!IID)
9369 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
9370 IID = II->getIntrinsicID(F);
9371
9372 if (IID) {
9373 visitIntrinsicCall(I, IID);
9374 return;
9375 }
9376 }
9377
9378 // Check for well-known libc/libm calls. If the function is internal, it
9379 // can't be a library call. Don't do the check if marked as nobuiltin for
9380 // some reason or the call site requires strict floating point semantics.
9381 LibFunc Func;
9382 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
9383 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
9385 switch (Func) {
9386 default: break;
9387 case LibFunc_bcmp:
9388 if (visitMemCmpBCmpCall(I))
9389 return;
9390 break;
9391 case LibFunc_copysign:
9392 case LibFunc_copysignf:
9393 case LibFunc_copysignl:
9394 // We already checked this call's prototype; verify it doesn't modify
9395 // errno.
9396 if (I.onlyReadsMemory()) {
9397 SDValue LHS = getValue(I.getArgOperand(0));
9398 SDValue RHS = getValue(I.getArgOperand(1));
9400 LHS.getValueType(), LHS, RHS));
9401 return;
9402 }
9403 break;
9404 case LibFunc_fabs:
9405 case LibFunc_fabsf:
9406 case LibFunc_fabsl:
9407 if (visitUnaryFloatCall(I, ISD::FABS))
9408 return;
9409 break;
9410 case LibFunc_fmin:
9411 case LibFunc_fminf:
9412 case LibFunc_fminl:
9413 if (visitBinaryFloatCall(I, ISD::FMINNUM))
9414 return;
9415 break;
9416 case LibFunc_fmax:
9417 case LibFunc_fmaxf:
9418 case LibFunc_fmaxl:
9419 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
9420 return;
9421 break;
9422 case LibFunc_fminimum_num:
9423 case LibFunc_fminimum_numf:
9424 case LibFunc_fminimum_numl:
9425 if (visitBinaryFloatCall(I, ISD::FMINIMUMNUM))
9426 return;
9427 break;
9428 case LibFunc_fmaximum_num:
9429 case LibFunc_fmaximum_numf:
9430 case LibFunc_fmaximum_numl:
9431 if (visitBinaryFloatCall(I, ISD::FMAXIMUMNUM))
9432 return;
9433 break;
9434 case LibFunc_sin:
9435 case LibFunc_sinf:
9436 case LibFunc_sinl:
9437 if (visitUnaryFloatCall(I, ISD::FSIN))
9438 return;
9439 break;
9440 case LibFunc_cos:
9441 case LibFunc_cosf:
9442 case LibFunc_cosl:
9443 if (visitUnaryFloatCall(I, ISD::FCOS))
9444 return;
9445 break;
9446 case LibFunc_tan:
9447 case LibFunc_tanf:
9448 case LibFunc_tanl:
9449 if (visitUnaryFloatCall(I, ISD::FTAN))
9450 return;
9451 break;
9452 case LibFunc_asin:
9453 case LibFunc_asinf:
9454 case LibFunc_asinl:
9455 if (visitUnaryFloatCall(I, ISD::FASIN))
9456 return;
9457 break;
9458 case LibFunc_acos:
9459 case LibFunc_acosf:
9460 case LibFunc_acosl:
9461 if (visitUnaryFloatCall(I, ISD::FACOS))
9462 return;
9463 break;
9464 case LibFunc_atan:
9465 case LibFunc_atanf:
9466 case LibFunc_atanl:
9467 if (visitUnaryFloatCall(I, ISD::FATAN))
9468 return;
9469 break;
9470 case LibFunc_atan2:
9471 case LibFunc_atan2f:
9472 case LibFunc_atan2l:
9473 if (visitBinaryFloatCall(I, ISD::FATAN2))
9474 return;
9475 break;
9476 case LibFunc_sinh:
9477 case LibFunc_sinhf:
9478 case LibFunc_sinhl:
9479 if (visitUnaryFloatCall(I, ISD::FSINH))
9480 return;
9481 break;
9482 case LibFunc_cosh:
9483 case LibFunc_coshf:
9484 case LibFunc_coshl:
9485 if (visitUnaryFloatCall(I, ISD::FCOSH))
9486 return;
9487 break;
9488 case LibFunc_tanh:
9489 case LibFunc_tanhf:
9490 case LibFunc_tanhl:
9491 if (visitUnaryFloatCall(I, ISD::FTANH))
9492 return;
9493 break;
9494 case LibFunc_sqrt:
9495 case LibFunc_sqrtf:
9496 case LibFunc_sqrtl:
9497 case LibFunc_sqrt_finite:
9498 case LibFunc_sqrtf_finite:
9499 case LibFunc_sqrtl_finite:
9500 if (visitUnaryFloatCall(I, ISD::FSQRT))
9501 return;
9502 break;
9503 case LibFunc_floor:
9504 case LibFunc_floorf:
9505 case LibFunc_floorl:
9506 if (visitUnaryFloatCall(I, ISD::FFLOOR))
9507 return;
9508 break;
9509 case LibFunc_nearbyint:
9510 case LibFunc_nearbyintf:
9511 case LibFunc_nearbyintl:
9512 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
9513 return;
9514 break;
9515 case LibFunc_ceil:
9516 case LibFunc_ceilf:
9517 case LibFunc_ceill:
9518 if (visitUnaryFloatCall(I, ISD::FCEIL))
9519 return;
9520 break;
9521 case LibFunc_rint:
9522 case LibFunc_rintf:
9523 case LibFunc_rintl:
9524 if (visitUnaryFloatCall(I, ISD::FRINT))
9525 return;
9526 break;
9527 case LibFunc_round:
9528 case LibFunc_roundf:
9529 case LibFunc_roundl:
9530 if (visitUnaryFloatCall(I, ISD::FROUND))
9531 return;
9532 break;
9533 case LibFunc_trunc:
9534 case LibFunc_truncf:
9535 case LibFunc_truncl:
9536 if (visitUnaryFloatCall(I, ISD::FTRUNC))
9537 return;
9538 break;
9539 case LibFunc_log2:
9540 case LibFunc_log2f:
9541 case LibFunc_log2l:
9542 if (visitUnaryFloatCall(I, ISD::FLOG2))
9543 return;
9544 break;
9545 case LibFunc_exp2:
9546 case LibFunc_exp2f:
9547 case LibFunc_exp2l:
9548 if (visitUnaryFloatCall(I, ISD::FEXP2))
9549 return;
9550 break;
9551 case LibFunc_exp10:
9552 case LibFunc_exp10f:
9553 case LibFunc_exp10l:
9554 if (visitUnaryFloatCall(I, ISD::FEXP10))
9555 return;
9556 break;
9557 case LibFunc_ldexp:
9558 case LibFunc_ldexpf:
9559 case LibFunc_ldexpl:
9560 if (visitBinaryFloatCall(I, ISD::FLDEXP))
9561 return;
9562 break;
9563 case LibFunc_memcmp:
9564 if (visitMemCmpBCmpCall(I))
9565 return;
9566 break;
9567 case LibFunc_mempcpy:
9568 if (visitMemPCpyCall(I))
9569 return;
9570 break;
9571 case LibFunc_memchr:
9572 if (visitMemChrCall(I))
9573 return;
9574 break;
9575 case LibFunc_strcpy:
9576 if (visitStrCpyCall(I, false))
9577 return;
9578 break;
9579 case LibFunc_stpcpy:
9580 if (visitStrCpyCall(I, true))
9581 return;
9582 break;
9583 case LibFunc_strcmp:
9584 if (visitStrCmpCall(I))
9585 return;
9586 break;
9587 case LibFunc_strlen:
9588 if (visitStrLenCall(I))
9589 return;
9590 break;
9591 case LibFunc_strnlen:
9592 if (visitStrNLenCall(I))
9593 return;
9594 break;
9595 }
9596 }
9597 }
9598
9599 if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
9600 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr);
9601 return;
9602 }
9603
9604 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
9605 // have to do anything here to lower funclet bundles.
9606 // CFGuardTarget bundles are lowered in LowerCallTo.
9607 assert(!I.hasOperandBundlesOtherThan(
9608 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9609 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9610 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9611 LLVMContext::OB_convergencectrl}) &&
9612 "Cannot lower calls with arbitrary operand bundles!");
9613
9614 SDValue Callee = getValue(I.getCalledOperand());
9615
9616 if (I.hasDeoptState())
9617 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
9618 else
9619 // Check if we can potentially perform a tail call. More detailed checking
9620 // is be done within LowerCallTo, after more information about the call is
9621 // known.
9622 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
9623}
9624
9626 const CallBase &CB, const BasicBlock *EHPadBB) {
9627 auto PAB = CB.getOperandBundle("ptrauth");
9628 const Value *CalleeV = CB.getCalledOperand();
9629
9630 // Gather the call ptrauth data from the operand bundle:
9631 // [ i32 <key>, i64 <discriminator> ]
9632 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9633 const Value *Discriminator = PAB->Inputs[1];
9634
9635 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9636 assert(Discriminator->getType()->isIntegerTy(64) &&
9637 "Invalid ptrauth discriminator");
9638
9639 // Look through ptrauth constants to find the raw callee.
9640 // Do a direct unauthenticated call if we found it and everything matches.
9641 if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9642 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9643 DAG.getDataLayout()))
9644 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9645 CB.isMustTailCall(), EHPadBB);
9646
9647 // Functions should never be ptrauth-called directly.
9648 assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call");
9649
9650 // Otherwise, do an authenticated indirect call.
9651 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9652 getValue(Discriminator)};
9653
9654 LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(),
9655 EHPadBB, &PAI);
9656}
9657
9658namespace {
9659
9660/// AsmOperandInfo - This contains information for each constraint that we are
9661/// lowering.
9662class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
9663public:
9664 /// CallOperand - If this is the result output operand or a clobber
9665 /// this is null, otherwise it is the incoming operand to the CallInst.
9666 /// This gets modified as the asm is processed.
9667 SDValue CallOperand;
9668
9669 /// AssignedRegs - If this is a register or register class operand, this
9670 /// contains the set of register corresponding to the operand.
9671 RegsForValue AssignedRegs;
9672
9673 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9674 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9675 }
9676
9677 /// Whether or not this operand accesses memory
9678 bool hasMemory(const TargetLowering &TLI) const {
9679 // Indirect operand accesses access memory.
9680 if (isIndirect)
9681 return true;
9682
9683 for (const auto &Code : Codes)
9685 return true;
9686
9687 return false;
9688 }
9689};
9690
9691
9692} // end anonymous namespace
9693
9694/// Make sure that the output operand \p OpInfo and its corresponding input
9695/// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9696/// out).
9697static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9698 SDISelAsmOperandInfo &MatchingOpInfo,
9699 SelectionDAG &DAG) {
9700 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9701 return;
9702
9704 const auto &TLI = DAG.getTargetLoweringInfo();
9705
9706 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9707 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9708 OpInfo.ConstraintVT);
9709 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9710 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9711 MatchingOpInfo.ConstraintVT);
9712 const bool OutOpIsIntOrFP =
9713 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9714 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9715 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9716 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9717 // FIXME: error out in a more elegant fashion
9718 report_fatal_error("Unsupported asm: input constraint"
9719 " with a matching output constraint of"
9720 " incompatible type!");
9721 }
9722 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9723}
9724
9725/// Get a direct memory input to behave well as an indirect operand.
9726/// This may introduce stores, hence the need for a \p Chain.
9727/// \return The (possibly updated) chain.
9728static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9729 SDISelAsmOperandInfo &OpInfo,
9730 SelectionDAG &DAG) {
9731 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9732
9733 // If we don't have an indirect input, put it in the constpool if we can,
9734 // otherwise spill it to a stack slot.
9735 // TODO: This isn't quite right. We need to handle these according to
9736 // the addressing mode that the constraint wants. Also, this may take
9737 // an additional register for the computation and we don't want that
9738 // either.
9739
9740 // If the operand is a float, integer, or vector constant, spill to a
9741 // constant pool entry to get its address.
9742 const Value *OpVal = OpInfo.CallOperandVal;
9743 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9744 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9745 OpInfo.CallOperand = DAG.getConstantPool(
9746 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9747 return Chain;
9748 }
9749
9750 // Otherwise, create a stack slot and emit a store to it before the asm.
9751 Type *Ty = OpVal->getType();
9752 auto &DL = DAG.getDataLayout();
9753 TypeSize TySize = DL.getTypeAllocSize(Ty);
9756 int StackID = 0;
9757 if (TySize.isScalable())
9758 StackID = TFI->getStackIDForScalableVectors();
9759 int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(),
9760 DL.getPrefTypeAlign(Ty), false,
9761 nullptr, StackID);
9762 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9763 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9765 TLI.getMemValueType(DL, Ty));
9766 OpInfo.CallOperand = StackSlot;
9767
9768 return Chain;
9769}
9770
9771/// GetRegistersForValue - Assign registers (virtual or physical) for the
9772/// specified operand. We prefer to assign virtual registers, to allow the
9773/// register allocator to handle the assignment process. However, if the asm
9774/// uses features that we can't model on machineinstrs, we have SDISel do the
9775/// allocation. This produces generally horrible, but correct, code.
9776///
9777/// OpInfo describes the operand
9778/// RefOpInfo describes the matching operand if any, the operand otherwise
9779static std::optional<unsigned>
9781 SDISelAsmOperandInfo &OpInfo,
9782 SDISelAsmOperandInfo &RefOpInfo) {
9783 LLVMContext &Context = *DAG.getContext();
9784 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9785
9789
9790 // No work to do for memory/address operands.
9791 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9792 OpInfo.ConstraintType == TargetLowering::C_Address)
9793 return std::nullopt;
9794
9795 // If this is a constraint for a single physreg, or a constraint for a
9796 // register class, find it.
9797 unsigned AssignedReg;
9798 const TargetRegisterClass *RC;
9799 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9800 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9801 // RC is unset only on failure. Return immediately.
9802 if (!RC)
9803 return std::nullopt;
9804
9805 // Get the actual register value type. This is important, because the user
9806 // may have asked for (e.g.) the AX register in i32 type. We need to
9807 // remember that AX is actually i16 to get the right extension.
9808 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9809
9810 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9811 // If this is an FP operand in an integer register (or visa versa), or more
9812 // generally if the operand value disagrees with the register class we plan
9813 // to stick it in, fix the operand type.
9814 //
9815 // If this is an input value, the bitcast to the new type is done now.
9816 // Bitcast for output value is done at the end of visitInlineAsm().
9817 if ((OpInfo.Type == InlineAsm::isOutput ||
9818 OpInfo.Type == InlineAsm::isInput) &&
9819 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9820 // Try to convert to the first EVT that the reg class contains. If the
9821 // types are identical size, use a bitcast to convert (e.g. two differing
9822 // vector types). Note: output bitcast is done at the end of
9823 // visitInlineAsm().
9824 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9825 // Exclude indirect inputs while they are unsupported because the code
9826 // to perform the load is missing and thus OpInfo.CallOperand still
9827 // refers to the input address rather than the pointed-to value.
9828 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9829 OpInfo.CallOperand =
9830 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9831 OpInfo.ConstraintVT = RegVT;
9832 // If the operand is an FP value and we want it in integer registers,
9833 // use the corresponding integer type. This turns an f64 value into
9834 // i64, which can be passed with two i32 values on a 32-bit machine.
9835 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9836 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9837 if (OpInfo.Type == InlineAsm::isInput)
9838 OpInfo.CallOperand =
9839 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9840 OpInfo.ConstraintVT = VT;
9841 }
9842 }
9843 }
9844
9845 // No need to allocate a matching input constraint since the constraint it's
9846 // matching to has already been allocated.
9847 if (OpInfo.isMatchingInputConstraint())
9848 return std::nullopt;
9849
9850 EVT ValueVT = OpInfo.ConstraintVT;
9851 if (OpInfo.ConstraintVT == MVT::Other)
9852 ValueVT = RegVT;
9853
9854 // Initialize NumRegs.
9855 unsigned NumRegs = 1;
9856 if (OpInfo.ConstraintVT != MVT::Other)
9857 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9858
9859 // If this is a constraint for a specific physical register, like {r17},
9860 // assign it now.
9861
9862 // If this associated to a specific register, initialize iterator to correct
9863 // place. If virtual, make sure we have enough registers
9864
9865 // Initialize iterator if necessary
9868
9869 // Do not check for single registers.
9870 if (AssignedReg) {
9871 I = std::find(I, RC->end(), AssignedReg);
9872 if (I == RC->end()) {
9873 // RC does not contain the selected register, which indicates a
9874 // mismatch between the register and the required type/bitwidth.
9875 return {AssignedReg};
9876 }
9877 }
9878
9879 for (; NumRegs; --NumRegs, ++I) {
9880 assert(I != RC->end() && "Ran out of registers to allocate!");
9881 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9882 Regs.push_back(R);
9883 }
9884
9885 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9886 return std::nullopt;
9887}
9888
9889static unsigned
9891 const std::vector<SDValue> &AsmNodeOperands) {
9892 // Scan until we find the definition we already emitted of this operand.
9893 unsigned CurOp = InlineAsm::Op_FirstOperand;
9894 for (; OperandNo; --OperandNo) {
9895 // Advance to the next operand.
9896 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9897 const InlineAsm::Flag F(OpFlag);
9898 assert(
9899 (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9900 "Skipped past definitions?");
9901 CurOp += F.getNumOperandRegisters() + 1;
9902 }
9903 return CurOp;
9904}
9905
9906namespace {
9907
9908class ExtraFlags {
9909 unsigned Flags = 0;
9910
9911public:
9912 explicit ExtraFlags(const CallBase &Call) {
9913 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9914 if (IA->hasSideEffects())
9916 if (IA->isAlignStack())
9918 if (Call.isConvergent())
9920 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9921 }
9922
9923 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9924 // Ideally, we would only check against memory constraints. However, the
9925 // meaning of an Other constraint can be target-specific and we can't easily
9926 // reason about it. Therefore, be conservative and set MayLoad/MayStore
9927 // for Other constraints as well.
9930 if (OpInfo.Type == InlineAsm::isInput)
9932 else if (OpInfo.Type == InlineAsm::isOutput)
9934 else if (OpInfo.Type == InlineAsm::isClobber)
9936 }
9937 }
9938
9939 unsigned get() const { return Flags; }
9940};
9941
9942} // end anonymous namespace
9943
9944static bool isFunction(SDValue Op) {
9945 if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9946 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9947 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9948
9949 // In normal "call dllimport func" instruction (non-inlineasm) it force
9950 // indirect access by specifing call opcode. And usually specially print
9951 // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9952 // not do in this way now. (In fact, this is similar with "Data Access"
9953 // action). So here we ignore dllimport function.
9954 if (Fn && !Fn->hasDLLImportStorageClass())
9955 return true;
9956 }
9957 }
9958 return false;
9959}
9960
9961/// visitInlineAsm - Handle a call to an InlineAsm object.
9962void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9963 const BasicBlock *EHPadBB) {
9964 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9965
9966 /// ConstraintOperands - Information about all of the constraints.
9967 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9968
9972
9973 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9974 // AsmDialect, MayLoad, MayStore).
9975 bool HasSideEffect = IA->hasSideEffects();
9976 ExtraFlags ExtraInfo(Call);
9977
9978 for (auto &T : TargetConstraints) {
9979 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9980 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9981
9982 if (OpInfo.CallOperandVal)
9983 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9984
9985 if (!HasSideEffect)
9986 HasSideEffect = OpInfo.hasMemory(TLI);
9987
9988 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9989 // FIXME: Could we compute this on OpInfo rather than T?
9990
9991 // Compute the constraint code and ConstraintType to use.
9993
9994 if (T.ConstraintType == TargetLowering::C_Immediate &&
9995 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9996 // We've delayed emitting a diagnostic like the "n" constraint because
9997 // inlining could cause an integer showing up.
9998 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9999 "' expects an integer constant "
10000 "expression");
10001
10002 ExtraInfo.update(T);
10003 }
10004
10005 // We won't need to flush pending loads if this asm doesn't touch
10006 // memory and is nonvolatile.
10007 SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
10008
10009 bool EmitEHLabels = isa<InvokeInst>(Call);
10010 if (EmitEHLabels) {
10011 assert(EHPadBB && "InvokeInst must have an EHPadBB");
10012 }
10013 bool IsCallBr = isa<CallBrInst>(Call);
10014
10015 if (IsCallBr || EmitEHLabels) {
10016 // If this is a callbr or invoke we need to flush pending exports since
10017 // inlineasm_br and invoke are terminators.
10018 // We need to do this before nodes are glued to the inlineasm_br node.
10019 Chain = getControlRoot();
10020 }
10021
10022 MCSymbol *BeginLabel = nullptr;
10023 if (EmitEHLabels) {
10024 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10025 }
10026
10027 int OpNo = -1;
10028 SmallVector<StringRef> AsmStrs;
10029 IA->collectAsmStrs(AsmStrs);
10030
10031 // Second pass over the constraints: compute which constraint option to use.
10032 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10033 if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
10034 OpNo++;
10035
10036 // If this is an output operand with a matching input operand, look up the
10037 // matching input. If their types mismatch, e.g. one is an integer, the
10038 // other is floating point, or their sizes are different, flag it as an
10039 // error.
10040 if (OpInfo.hasMatchingInput()) {
10041 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10042 patchMatchingInput(OpInfo, Input, DAG);
10043 }
10044
10045 // Compute the constraint code and ConstraintType to use.
10046 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
10047
10048 if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
10049 OpInfo.Type == InlineAsm::isClobber) ||
10050 OpInfo.ConstraintType == TargetLowering::C_Address)
10051 continue;
10052
10053 // In Linux PIC model, there are 4 cases about value/label addressing:
10054 //
10055 // 1: Function call or Label jmp inside the module.
10056 // 2: Data access (such as global variable, static variable) inside module.
10057 // 3: Function call or Label jmp outside the module.
10058 // 4: Data access (such as global variable) outside the module.
10059 //
10060 // Due to current llvm inline asm architecture designed to not "recognize"
10061 // the asm code, there are quite troubles for us to treat mem addressing
10062 // differently for same value/adress used in different instuctions.
10063 // For example, in pic model, call a func may in plt way or direclty
10064 // pc-related, but lea/mov a function adress may use got.
10065 //
10066 // Here we try to "recognize" function call for the case 1 and case 3 in
10067 // inline asm. And try to adjust the constraint for them.
10068 //
10069 // TODO: Due to current inline asm didn't encourage to jmp to the outsider
10070 // label, so here we don't handle jmp function label now, but we need to
10071 // enhance it (especilly in PIC model) if we meet meaningful requirements.
10072 if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
10073 TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
10075 OpInfo.isIndirect = false;
10076 OpInfo.ConstraintType = TargetLowering::C_Address;
10077 }
10078
10079 // If this is a memory input, and if the operand is not indirect, do what we
10080 // need to provide an address for the memory input.
10081 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
10082 !OpInfo.isIndirect) {
10083 assert((OpInfo.isMultipleAlternative ||
10084 (OpInfo.Type == InlineAsm::isInput)) &&
10085 "Can only indirectify direct input operands!");
10086
10087 // Memory operands really want the address of the value.
10088 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
10089
10090 // There is no longer a Value* corresponding to this operand.
10091 OpInfo.CallOperandVal = nullptr;
10092
10093 // It is now an indirect operand.
10094 OpInfo.isIndirect = true;
10095 }
10096
10097 }
10098
10099 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
10100 std::vector<SDValue> AsmNodeOperands;
10101 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
10102 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
10103 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
10104
10105 // If we have a !srcloc metadata node associated with it, we want to attach
10106 // this to the ultimately generated inline asm machineinstr. To do this, we
10107 // pass in the third operand as this (potentially null) inline asm MDNode.
10108 const MDNode *SrcLoc = Call.getMetadata("srcloc");
10109 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
10110
10111 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
10112 // bits as operand 3.
10113 AsmNodeOperands.push_back(DAG.getTargetConstant(
10114 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10115
10116 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
10117 // this, assign virtual and physical registers for inputs and otput.
10118 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10119 // Assign Registers.
10120 SDISelAsmOperandInfo &RefOpInfo =
10121 OpInfo.isMatchingInputConstraint()
10122 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10123 : OpInfo;
10124 const auto RegError =
10125 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
10126 if (RegError) {
10129 const char *RegName = TRI.getName(*RegError);
10130 emitInlineAsmError(Call, "register '" + Twine(RegName) +
10131 "' allocated for constraint '" +
10132 Twine(OpInfo.ConstraintCode) +
10133 "' does not match required type");
10134 return;
10135 }
10136
10137 auto DetectWriteToReservedRegister = [&]() {
10140 for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
10142 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10143 const char *RegName = TRI.getName(Reg);
10144 emitInlineAsmError(Call, "write to reserved register '" +
10145 Twine(RegName) + "'");
10146 return true;
10147 }
10148 }
10149 return false;
10150 };
10151 assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
10152 (OpInfo.Type == InlineAsm::isInput &&
10153 !OpInfo.isMatchingInputConstraint())) &&
10154 "Only address as input operand is allowed.");
10155
10156 switch (OpInfo.Type) {
10158 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10159 const InlineAsm::ConstraintCode ConstraintID =
10160 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10162 "Failed to convert memory constraint code to constraint id.");
10163
10164 // Add information to the INLINEASM node to know about this output.
10166 OpFlags.setMemConstraint(ConstraintID);
10167 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
10168 MVT::i32));
10169 AsmNodeOperands.push_back(OpInfo.CallOperand);
10170 } else {
10171 // Otherwise, this outputs to a register (directly for C_Register /
10172 // C_RegisterClass, and a target-defined fashion for
10173 // C_Immediate/C_Other). Find a register that we can use.
10174 if (OpInfo.AssignedRegs.Regs.empty()) {
10175 emitInlineAsmError(
10176 Call, "couldn't allocate output register for constraint '" +
10177 Twine(OpInfo.ConstraintCode) + "'");
10178 return;
10179 }
10180
10181 if (DetectWriteToReservedRegister())
10182 return;
10183
10184 // Add information to the INLINEASM node to know that this register is
10185 // set.
10186 OpInfo.AssignedRegs.AddInlineAsmOperands(
10187 OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
10189 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
10190 }
10191 break;
10192
10193 case InlineAsm::isInput:
10194 case InlineAsm::isLabel: {
10195 SDValue InOperandVal = OpInfo.CallOperand;
10196
10197 if (OpInfo.isMatchingInputConstraint()) {
10198 // If this is required to match an output register we have already set,
10199 // just use its register.
10200 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
10201 AsmNodeOperands);
10202 InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10203 if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10204 if (OpInfo.isIndirect) {
10205 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10206 emitInlineAsmError(Call, "inline asm not supported yet: "
10207 "don't know how to handle tied "
10208 "indirect register inputs");
10209 return;
10210 }
10211
10216 auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10217 Register TiedReg = R->getReg();
10218 MVT RegVT = R->getSimpleValueType(0);
10219 const TargetRegisterClass *RC =
10220 TiedReg.isVirtual() ? MRI.getRegClass(TiedReg)
10221 : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
10222 : TRI.getMinimalPhysRegClass(TiedReg);
10223 for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
10224 Regs.push_back(MRI.createVirtualRegister(RC));
10225
10226 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
10227
10228 SDLoc dl = getCurSDLoc();
10229 // Use the produced MatchedRegs object to
10230 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
10231 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
10232 OpInfo.getMatchedOperand(), dl, DAG,
10233 AsmNodeOperands);
10234 break;
10235 }
10236
10237 assert(Flag.isMemKind() && "Unknown matching constraint!");
10238 assert(Flag.getNumOperandRegisters() == 1 &&
10239 "Unexpected number of operands");
10240 // Add information to the INLINEASM node to know about this input.
10241 // See InlineAsm.h isUseOperandTiedToDef.
10242 Flag.clearMemConstraint();
10243 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10244 AsmNodeOperands.push_back(DAG.getTargetConstant(
10245 Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10246 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10247 break;
10248 }
10249
10250 // Treat indirect 'X' constraint as memory.
10251 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
10252 OpInfo.isIndirect)
10253 OpInfo.ConstraintType = TargetLowering::C_Memory;
10254
10255 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
10256 OpInfo.ConstraintType == TargetLowering::C_Other) {
10257 std::vector<SDValue> Ops;
10258 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
10259 Ops, DAG);
10260 if (Ops.empty()) {
10261 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
10262 if (isa<ConstantSDNode>(InOperandVal)) {
10263 emitInlineAsmError(Call, "value out of range for constraint '" +
10264 Twine(OpInfo.ConstraintCode) + "'");
10265 return;
10266 }
10267
10268 emitInlineAsmError(Call,
10269 "invalid operand for inline asm constraint '" +
10270 Twine(OpInfo.ConstraintCode) + "'");
10271 return;
10272 }
10273
10274 // Add information to the INLINEASM node to know about this input.
10275 InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
10276 AsmNodeOperands.push_back(DAG.getTargetConstant(
10277 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10278 llvm::append_range(AsmNodeOperands, Ops);
10279 break;
10280 }
10281
10282 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10283 assert((OpInfo.isIndirect ||
10284 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
10285 "Operand must be indirect to be a mem!");
10286 assert(InOperandVal.getValueType() ==
10288 "Memory operands expect pointer values");
10289
10290 const InlineAsm::ConstraintCode ConstraintID =
10291 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10293 "Failed to convert memory constraint code to constraint id.");
10294
10295 // Add information to the INLINEASM node to know about this input.
10297 ResOpType.setMemConstraint(ConstraintID);
10298 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
10299 getCurSDLoc(),
10300 MVT::i32));
10301 AsmNodeOperands.push_back(InOperandVal);
10302 break;
10303 }
10304
10305 if (OpInfo.ConstraintType == TargetLowering::C_Address) {
10306 const InlineAsm::ConstraintCode ConstraintID =
10307 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10309 "Failed to convert memory constraint code to constraint id.");
10310
10312
10313 SDValue AsmOp = InOperandVal;
10314 if (isFunction(InOperandVal)) {
10315 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10316 ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
10317 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
10318 InOperandVal.getValueType(),
10319 GA->getOffset());
10320 }
10321
10322 // Add information to the INLINEASM node to know about this input.
10323 ResOpType.setMemConstraint(ConstraintID);
10324
10325 AsmNodeOperands.push_back(
10326 DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
10327
10328 AsmNodeOperands.push_back(AsmOp);
10329 break;
10330 }
10331
10332 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
10333 OpInfo.ConstraintType != TargetLowering::C_Register) {
10334 emitInlineAsmError(Call, "unknown asm constraint '" +
10335 Twine(OpInfo.ConstraintCode) + "'");
10336 return;
10337 }
10338
10339 // TODO: Support this.
10340 if (OpInfo.isIndirect) {
10341 emitInlineAsmError(
10342 Call, "Don't know how to handle indirect register inputs yet "
10343 "for constraint '" +
10344 Twine(OpInfo.ConstraintCode) + "'");
10345 return;
10346 }
10347
10348 // Copy the input into the appropriate registers.
10349 if (OpInfo.AssignedRegs.Regs.empty()) {
10350 emitInlineAsmError(Call,
10351 "couldn't allocate input reg for constraint '" +
10352 Twine(OpInfo.ConstraintCode) + "'");
10353 return;
10354 }
10355
10356 if (DetectWriteToReservedRegister())
10357 return;
10358
10359 SDLoc dl = getCurSDLoc();
10360
10361 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
10362 &Call);
10363
10364 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
10365 0, dl, DAG, AsmNodeOperands);
10366 break;
10367 }
10369 // Add the clobbered value to the operand list, so that the register
10370 // allocator is aware that the physreg got clobbered.
10371 if (!OpInfo.AssignedRegs.Regs.empty())
10372 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
10373 false, 0, getCurSDLoc(), DAG,
10374 AsmNodeOperands);
10375 break;
10376 }
10377 }
10378
10379 // Finish up input operands. Set the input chain and add the flag last.
10380 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
10381 if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
10382
10383 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10384 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
10385 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10386 Glue = Chain.getValue(1);
10387
10388 // Do additional work to generate outputs.
10389
10390 SmallVector<EVT, 1> ResultVTs;
10391 SmallVector<SDValue, 1> ResultValues;
10392 SmallVector<SDValue, 8> OutChains;
10393
10394 llvm::Type *CallResultType = Call.getType();
10395 ArrayRef<Type *> ResultTypes;
10396 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
10397 ResultTypes = StructResult->elements();
10398 else if (!CallResultType->isVoidTy())
10399 ResultTypes = ArrayRef(CallResultType);
10400
10401 auto CurResultType = ResultTypes.begin();
10402 auto handleRegAssign = [&](SDValue V) {
10403 assert(CurResultType != ResultTypes.end() && "Unexpected value");
10404 assert((*CurResultType)->isSized() && "Unexpected unsized type");
10405 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
10406 ++CurResultType;
10407 // If the type of the inline asm call site return value is different but has
10408 // same size as the type of the asm output bitcast it. One example of this
10409 // is for vectors with different width / number of elements. This can
10410 // happen for register classes that can contain multiple different value
10411 // types. The preg or vreg allocated may not have the same VT as was
10412 // expected.
10413 //
10414 // This can also happen for a return value that disagrees with the register
10415 // class it is put in, eg. a double in a general-purpose register on a
10416 // 32-bit machine.
10417 if (ResultVT != V.getValueType() &&
10418 ResultVT.getSizeInBits() == V.getValueSizeInBits())
10419 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
10420 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
10421 V.getValueType().isInteger()) {
10422 // If a result value was tied to an input value, the computed result
10423 // may have a wider width than the expected result. Extract the
10424 // relevant portion.
10425 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
10426 }
10427 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
10428 ResultVTs.push_back(ResultVT);
10429 ResultValues.push_back(V);
10430 };
10431
10432 // Deal with output operands.
10433 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10434 if (OpInfo.Type == InlineAsm::isOutput) {
10435 SDValue Val;
10436 // Skip trivial output operands.
10437 if (OpInfo.AssignedRegs.Regs.empty())
10438 continue;
10439
10440 switch (OpInfo.ConstraintType) {
10443 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
10444 Chain, &Glue, &Call);
10445 break;
10448 Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
10449 OpInfo, DAG);
10450 break;
10452 break; // Already handled.
10454 break; // Silence warning.
10456 assert(false && "Unexpected unknown constraint");
10457 }
10458
10459 // Indirect output manifest as stores. Record output chains.
10460 if (OpInfo.isIndirect) {
10461 const Value *Ptr = OpInfo.CallOperandVal;
10462 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
10463 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
10465 OutChains.push_back(Store);
10466 } else {
10467 // generate CopyFromRegs to associated registers.
10468 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10469 if (Val.getOpcode() == ISD::MERGE_VALUES) {
10470 for (const SDValue &V : Val->op_values())
10471 handleRegAssign(V);
10472 } else
10473 handleRegAssign(Val);
10474 }
10475 }
10476 }
10477
10478 // Set results.
10479 if (!ResultValues.empty()) {
10480 assert(CurResultType == ResultTypes.end() &&
10481 "Mismatch in number of ResultTypes");
10482 assert(ResultValues.size() == ResultTypes.size() &&
10483 "Mismatch in number of output operands in asm result");
10484
10486 DAG.getVTList(ResultVTs), ResultValues);
10487 setValue(&Call, V);
10488 }
10489
10490 // Collect store chains.
10491 if (!OutChains.empty())
10492 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
10493
10494 if (EmitEHLabels) {
10495 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10496 }
10497
10498 // Only Update Root if inline assembly has a memory effect.
10499 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
10500 EmitEHLabels)
10501 DAG.setRoot(Chain);
10502}
10503
10504void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
10505 const Twine &Message) {
10506 LLVMContext &Ctx = *DAG.getContext();
10507 Ctx.diagnose(DiagnosticInfoInlineAsm(Call, Message));
10508
10509 // Make sure we leave the DAG in a valid state
10511 SmallVector<EVT, 1> ValueVTs;
10512 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
10513
10514 if (ValueVTs.empty())
10515 return;
10516
10518 for (const EVT &VT : ValueVTs)
10519 Ops.push_back(DAG.getUNDEF(VT));
10520
10521 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
10522}
10523
10524void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
10526 MVT::Other, getRoot(),
10527 getValue(I.getArgOperand(0)),
10528 DAG.getSrcValue(I.getArgOperand(0))));
10529}
10530
10531void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
10533 const DataLayout &DL = DAG.getDataLayout();
10535 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
10536 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
10537 DL.getABITypeAlign(I.getType()).value());
10538 DAG.setRoot(V.getValue(1));
10539
10540 if (I.getType()->isPointerTy())
10542 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
10543 setValue(&I, V);
10544}
10545
10546void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
10548 MVT::Other, getRoot(),
10549 getValue(I.getArgOperand(0)),
10550 DAG.getSrcValue(I.getArgOperand(0))));
10551}
10552
10553void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
10555 MVT::Other, getRoot(),
10556 getValue(I.getArgOperand(0)),
10557 getValue(I.getArgOperand(1)),
10558 DAG.getSrcValue(I.getArgOperand(0)),
10559 DAG.getSrcValue(I.getArgOperand(1))));
10560}
10561
10563 const Instruction &I,
10564 SDValue Op) {
10565 std::optional<ConstantRange> CR = getRange(I);
10566
10567 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10568 return Op;
10569
10570 APInt Lo = CR->getUnsignedMin();
10571 if (!Lo.isMinValue())
10572 return Op;
10573
10574 APInt Hi = CR->getUnsignedMax();
10575 unsigned Bits = std::max(Hi.getActiveBits(),
10576 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
10577
10578 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
10579
10580 SDLoc SL = getCurSDLoc();
10581
10582 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
10583 DAG.getValueType(SmallVT));
10584 unsigned NumVals = Op.getNode()->getNumValues();
10585 if (NumVals == 1)
10586 return ZExt;
10587
10589
10590 Ops.push_back(ZExt);
10591 for (unsigned I = 1; I != NumVals; ++I)
10592 Ops.push_back(Op.getValue(I));
10593
10594 return DAG.getMergeValues(Ops, SL);
10595}
10596
10597/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
10598/// the call being lowered.
10599///
10600/// This is a helper for lowering intrinsics that follow a target calling
10601/// convention or require stack pointer adjustment. Only a subset of the
10602/// intrinsic's operands need to participate in the calling convention.
10605 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
10606 AttributeSet RetAttrs, bool IsPatchPoint) {
10608 Args.reserve(NumArgs);
10609
10610 // Populate the argument list.
10611 // Attributes for args start at offset 1, after the return attribute.
10612 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10613 ArgI != ArgE; ++ArgI) {
10614 const Value *V = Call->getOperand(ArgI);
10615
10616 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10617
10619 Entry.Node = getValue(V);
10620 Entry.Ty = V->getType();
10621 Entry.setAttributes(Call, ArgI);
10622 Args.push_back(Entry);
10623 }
10624
10626 .setChain(getRoot())
10627 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10628 RetAttrs)
10629 .setDiscardResult(Call->use_empty())
10630 .setIsPatchPoint(IsPatchPoint)
10632 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10633}
10634
10635/// Add a stack map intrinsic call's live variable operands to a stackmap
10636/// or patchpoint target node's operand list.
10637///
10638/// Constants are converted to TargetConstants purely as an optimization to
10639/// avoid constant materialization and register allocation.
10640///
10641/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
10642/// generate addess computation nodes, and so FinalizeISel can convert the
10643/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
10644/// address materialization and register allocation, but may also be required
10645/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
10646/// alloca in the entry block, then the runtime may assume that the alloca's
10647/// StackMap location can be read immediately after compilation and that the
10648/// location is valid at any point during execution (this is similar to the
10649/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
10650/// only available in a register, then the runtime would need to trap when
10651/// execution reaches the StackMap in order to read the alloca's location.
10652static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
10653 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
10654 SelectionDAGBuilder &Builder) {
10655 SelectionDAG &DAG = Builder.DAG;
10656 for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
10657 SDValue Op = Builder.getValue(Call.getArgOperand(I));
10658
10659 // Things on the stack are pointer-typed, meaning that they are already
10660 // legal and can be emitted directly to target nodes.
10661 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
10662 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10663 } else {
10664 // Otherwise emit a target independent node to be legalised.
10665 Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
10666 }
10667 }
10668}
10669
10670/// Lower llvm.experimental.stackmap.
10671void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
10672 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
10673 // [live variables...])
10674
10675 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10676
10677 SDValue Chain, InGlue, Callee;
10679
10680 SDLoc DL = getCurSDLoc();
10682
10683 // The stackmap intrinsic only records the live variables (the arguments
10684 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10685 // intrinsic, this won't be lowered to a function call. This means we don't
10686 // have to worry about calling conventions and target specific lowering code.
10687 // Instead we perform the call lowering right here.
10688 //
10689 // chain, flag = CALLSEQ_START(chain, 0, 0)
10690 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10691 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10692 //
10693 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
10694 InGlue = Chain.getValue(1);
10695
10696 // Add the STACKMAP operands, starting with DAG house-keeping.
10697 Ops.push_back(Chain);
10698 Ops.push_back(InGlue);
10699
10700 // Add the <id>, <numShadowBytes> operands.
10701 //
10702 // These do not require legalisation, and can be emitted directly to target
10703 // constant nodes.
10705 assert(ID.getValueType() == MVT::i64);
10706 SDValue IDConst =
10707 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10708 Ops.push_back(IDConst);
10709
10710 SDValue Shad = getValue(CI.getArgOperand(1));
10711 assert(Shad.getValueType() == MVT::i32);
10712 SDValue ShadConst =
10714 Ops.push_back(ShadConst);
10715
10716 // Add the live variables.
10717 addStackMapLiveVars(CI, 2, DL, Ops, *this);
10718
10719 // Create the STACKMAP node.
10720 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10721 Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10722 InGlue = Chain.getValue(1);
10723
10724 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10725
10726 // Stackmaps don't generate values, so nothing goes into the NodeMap.
10727
10728 // Set the root to the target-lowered call chain.
10729 DAG.setRoot(Chain);
10730
10731 // Inform the Frame Information that we have a stackmap in this function.
10733}
10734
10735/// Lower llvm.experimental.patchpoint directly to its target opcode.
10736void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10737 const BasicBlock *EHPadBB) {
10738 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
10739 // i32 <numBytes>,
10740 // i8* <target>,
10741 // i32 <numArgs>,
10742 // [Args...],
10743 // [live variables...])
10744
10746 bool IsAnyRegCC = CC == CallingConv::AnyReg;
10747 bool HasDef = !CB.getType()->isVoidTy();
10748 SDLoc dl = getCurSDLoc();
10750
10751 // Handle immediate and symbolic callees.
10752 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10753 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10754 /*isTarget=*/true);
10755 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10756 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10757 SDLoc(SymbolicCallee),
10758 SymbolicCallee->getValueType(0));
10759
10760 // Get the real number of arguments participating in the call <numArgs>
10762 unsigned NumArgs = NArgVal->getAsZExtVal();
10763
10764 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10765 // Intrinsics include all meta-operands up to but not including CC.
10766 unsigned NumMetaOpers = PatchPointOpers::CCPos;
10767 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10768 "Not enough arguments provided to the patchpoint intrinsic");
10769
10770 // For AnyRegCC the arguments are lowered later on manually.
10771 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10772 Type *ReturnTy =
10773 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10774
10776 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10777 ReturnTy, CB.getAttributes().getRetAttrs(), true);
10778 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10779
10780 SDNode *CallEnd = Result.second.getNode();
10781 if (CallEnd->getOpcode() == ISD::EH_LABEL)
10782 CallEnd = CallEnd->getOperand(0).getNode();
10783 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10784 CallEnd = CallEnd->getOperand(0).getNode();
10785
10786 /// Get a call instruction from the call sequence chain.
10787 /// Tail calls are not allowed.
10788 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10789 "Expected a callseq node.");
10790 SDNode *Call = CallEnd->getOperand(0).getNode();
10791 bool HasGlue = Call->getGluedNode();
10792
10793 // Replace the target specific call node with the patchable intrinsic.
10795
10796 // Push the chain.
10797 Ops.push_back(*(Call->op_begin()));
10798
10799 // Optionally, push the glue (if any).
10800 if (HasGlue)
10801 Ops.push_back(*(Call->op_end() - 1));
10802
10803 // Push the register mask info.
10804 if (HasGlue)
10805 Ops.push_back(*(Call->op_end() - 2));
10806 else
10807 Ops.push_back(*(Call->op_end() - 1));
10808
10809 // Add the <id> and <numBytes> constants.
10811 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10813 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10814
10815 // Add the callee.
10816 Ops.push_back(Callee);
10817
10818 // Adjust <numArgs> to account for any arguments that have been passed on the
10819 // stack instead.
10820 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10821 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10822 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10823 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10824
10825 // Add the calling convention
10826 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10827
10828 // Add the arguments we omitted previously. The register allocator should
10829 // place these in any free register.
10830 if (IsAnyRegCC)
10831 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10832 Ops.push_back(getValue(CB.getArgOperand(i)));
10833
10834 // Push the arguments from the call instruction.
10835 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10836 Ops.append(Call->op_begin() + 2, e);
10837
10838 // Push live variables for the stack map.
10839 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10840
10841 SDVTList NodeTys;
10842 if (IsAnyRegCC && HasDef) {
10843 // Create the return types based on the intrinsic definition
10845 SmallVector<EVT, 3> ValueVTs;
10846 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10847 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10848
10849 // There is always a chain and a glue type at the end
10850 ValueVTs.push_back(MVT::Other);
10851 ValueVTs.push_back(MVT::Glue);
10852 NodeTys = DAG.getVTList(ValueVTs);
10853 } else
10854 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10855
10856 // Replace the target specific call node with a PATCHPOINT node.
10857 SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10858
10859 // Update the NodeMap.
10860 if (HasDef) {
10861 if (IsAnyRegCC)
10862 setValue(&CB, SDValue(PPV.getNode(), 0));
10863 else
10864 setValue(&CB, Result.first);
10865 }
10866
10867 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10868 // call sequence. Furthermore the location of the chain and glue can change
10869 // when the AnyReg calling convention is used and the intrinsic returns a
10870 // value.
10871 if (IsAnyRegCC && HasDef) {
10872 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10873 SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10875 } else
10876 DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10877 DAG.DeleteNode(Call);
10878
10879 // Inform the Frame Information that we have a patchpoint in this function.
10881}
10882
10883void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10884 unsigned Intrinsic) {
10886 SDValue Op1 = getValue(I.getArgOperand(0));
10887 SDValue Op2;
10888 if (I.arg_size() > 1)
10889 Op2 = getValue(I.getArgOperand(1));
10890 SDLoc dl = getCurSDLoc();
10891 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10892 SDValue Res;
10893 SDNodeFlags SDFlags;
10894 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10895 SDFlags.copyFMF(*FPMO);
10896
10897 switch (Intrinsic) {
10898 case Intrinsic::vector_reduce_fadd:
10899 if (SDFlags.hasAllowReassociation())
10900 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10901 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10902 SDFlags);
10903 else
10904 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10905 break;
10906 case Intrinsic::vector_reduce_fmul:
10907 if (SDFlags.hasAllowReassociation())
10908 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10909 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10910 SDFlags);
10911 else
10912 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10913 break;
10914 case Intrinsic::vector_reduce_add:
10915 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10916 break;
10917 case Intrinsic::vector_reduce_mul:
10918 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10919 break;
10920 case Intrinsic::vector_reduce_and:
10921 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10922 break;
10923 case Intrinsic::vector_reduce_or:
10924 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10925 break;
10926 case Intrinsic::vector_reduce_xor:
10927 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10928 break;
10929 case Intrinsic::vector_reduce_smax:
10930 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10931 break;
10932 case Intrinsic::vector_reduce_smin:
10933 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10934 break;
10935 case Intrinsic::vector_reduce_umax:
10936 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10937 break;
10938 case Intrinsic::vector_reduce_umin:
10939 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10940 break;
10941 case Intrinsic::vector_reduce_fmax:
10942 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10943 break;
10944 case Intrinsic::vector_reduce_fmin:
10945 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10946 break;
10947 case Intrinsic::vector_reduce_fmaximum:
10948 Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10949 break;
10950 case Intrinsic::vector_reduce_fminimum:
10951 Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10952 break;
10953 default:
10954 llvm_unreachable("Unhandled vector reduce intrinsic");
10955 }
10956 setValue(&I, Res);
10957}
10958
10959/// Returns an AttributeList representing the attributes applied to the return
10960/// value of the given call.
10963 if (CLI.RetSExt)
10964 Attrs.push_back(Attribute::SExt);
10965 if (CLI.RetZExt)
10966 Attrs.push_back(Attribute::ZExt);
10967 if (CLI.IsInReg)
10968 Attrs.push_back(Attribute::InReg);
10969
10971 Attrs);
10972}
10973
10974/// TargetLowering::LowerCallTo - This is the default LowerCallTo
10975/// implementation, which just calls LowerCall.
10976/// FIXME: When all targets are
10977/// migrated to using LowerCall, this hook should be integrated into SDISel.
10978std::pair<SDValue, SDValue>
10980 // Handle the incoming return values from the call.
10981 CLI.Ins.clear();
10982 SmallVector<EVT, 4> RetTys;
10984 auto &DL = CLI.DAG.getDataLayout();
10985 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
10986
10987 if (CLI.IsPostTypeLegalization) {
10988 // If we are lowering a libcall after legalization, split the return type.
10989 SmallVector<EVT, 4> OldRetTys;
10990 SmallVector<TypeSize, 4> OldOffsets;
10991 RetTys.swap(OldRetTys);
10992 Offsets.swap(OldOffsets);
10993
10994 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10995 EVT RetVT = OldRetTys[i];
10996 uint64_t Offset = OldOffsets[i];
10997 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10998 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10999 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
11000 RetTys.append(NumRegs, RegisterVT);
11001 for (unsigned j = 0; j != NumRegs; ++j)
11002 Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
11003 }
11004 }
11005
11007 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
11008
11009 bool CanLowerReturn =
11011 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
11012
11013 SDValue DemoteStackSlot;
11014 int DemoteStackIdx = -100;
11015 if (!CanLowerReturn) {
11016 // FIXME: equivalent assert?
11017 // assert(!CS.hasInAllocaArgument() &&
11018 // "sret demotion is incompatible with inalloca");
11019 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
11020 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
11022 DemoteStackIdx =
11023 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
11024 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
11025 DL.getAllocaAddrSpace());
11026
11027 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
11028 ArgListEntry Entry;
11029 Entry.Node = DemoteStackSlot;
11030 Entry.Ty = StackSlotPtrType;
11031 Entry.IsSExt = false;
11032 Entry.IsZExt = false;
11033 Entry.IsInReg = false;
11034 Entry.IsSRet = true;
11035 Entry.IsNest = false;
11036 Entry.IsByVal = false;
11037 Entry.IsByRef = false;
11038 Entry.IsReturned = false;
11039 Entry.IsSwiftSelf = false;
11040 Entry.IsSwiftAsync = false;
11041 Entry.IsSwiftError = false;
11042 Entry.IsCFGuardTarget = false;
11043 Entry.Alignment = Alignment;
11044 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
11045 CLI.NumFixedArgs += 1;
11046 CLI.getArgs()[0].IndirectType = CLI.RetTy;
11047 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
11048
11049 // sret demotion isn't compatible with tail-calls, since the sret argument
11050 // points into the callers stack frame.
11051 CLI.IsTailCall = false;
11052 } else {
11053 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11054 CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
11055 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
11056 ISD::ArgFlagsTy Flags;
11057 if (NeedsRegBlock) {
11058 Flags.setInConsecutiveRegs();
11059 if (I == RetTys.size() - 1)
11060 Flags.setInConsecutiveRegsLast();
11061 }
11062 EVT VT = RetTys[I];
11064 CLI.CallConv, VT);
11065 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11066 CLI.CallConv, VT);
11067 for (unsigned i = 0; i != NumRegs; ++i) {
11068 ISD::InputArg MyFlags;
11069 MyFlags.Flags = Flags;
11070 MyFlags.VT = RegisterVT;
11071 MyFlags.ArgVT = VT;
11072 MyFlags.Used = CLI.IsReturnValueUsed;
11073 if (CLI.RetTy->isPointerTy()) {
11074 MyFlags.Flags.setPointer();
11075 MyFlags.Flags.setPointerAddrSpace(
11076 cast<PointerType>(CLI.RetTy)->getAddressSpace());
11077 }
11078 if (CLI.RetSExt)
11079 MyFlags.Flags.setSExt();
11080 if (CLI.RetZExt)
11081 MyFlags.Flags.setZExt();
11082 if (CLI.IsInReg)
11083 MyFlags.Flags.setInReg();
11084 CLI.Ins.push_back(MyFlags);
11085 }
11086 }
11087 }
11088
11089 // We push in swifterror return as the last element of CLI.Ins.
11090 ArgListTy &Args = CLI.getArgs();
11091 if (supportSwiftError()) {
11092 for (const ArgListEntry &Arg : Args) {
11093 if (Arg.IsSwiftError) {
11094 ISD::InputArg MyFlags;
11095 MyFlags.VT = getPointerTy(DL);
11096 MyFlags.ArgVT = EVT(getPointerTy(DL));
11097 MyFlags.Flags.setSwiftError();
11098 CLI.Ins.push_back(MyFlags);
11099 }
11100 }
11101 }
11102
11103 // Handle all of the outgoing arguments.
11104 CLI.Outs.clear();
11105 CLI.OutVals.clear();
11106 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
11107 SmallVector<EVT, 4> ValueVTs;
11108 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
11109 // FIXME: Split arguments if CLI.IsPostTypeLegalization
11110 Type *FinalType = Args[i].Ty;
11111 if (Args[i].IsByVal)
11112 FinalType = Args[i].IndirectType;
11113 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11114 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
11115 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
11116 ++Value) {
11117 EVT VT = ValueVTs[Value];
11118 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
11119 SDValue Op = SDValue(Args[i].Node.getNode(),
11120 Args[i].Node.getResNo() + Value);
11121 ISD::ArgFlagsTy Flags;
11122
11123 // Certain targets (such as MIPS), may have a different ABI alignment
11124 // for a type depending on the context. Give the target a chance to
11125 // specify the alignment it wants.
11126 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
11127 Flags.setOrigAlign(OriginalAlignment);
11128
11129 if (Args[i].Ty->isPointerTy()) {
11130 Flags.setPointer();
11131 Flags.setPointerAddrSpace(
11132 cast<PointerType>(Args[i].Ty)->getAddressSpace());
11133 }
11134 if (Args[i].IsZExt)
11135 Flags.setZExt();
11136 if (Args[i].IsSExt)
11137 Flags.setSExt();
11138 if (Args[i].IsNoExt)
11139 Flags.setNoExt();
11140 if (Args[i].IsInReg) {
11141 // If we are using vectorcall calling convention, a structure that is
11142 // passed InReg - is surely an HVA
11144 isa<StructType>(FinalType)) {
11145 // The first value of a structure is marked
11146 if (0 == Value)
11147 Flags.setHvaStart();
11148 Flags.setHva();
11149 }
11150 // Set InReg Flag
11151 Flags.setInReg();
11152 }
11153 if (Args[i].IsSRet)
11154 Flags.setSRet();
11155 if (Args[i].IsSwiftSelf)
11156 Flags.setSwiftSelf();
11157 if (Args[i].IsSwiftAsync)
11158 Flags.setSwiftAsync();
11159 if (Args[i].IsSwiftError)
11160 Flags.setSwiftError();
11161 if (Args[i].IsCFGuardTarget)
11162 Flags.setCFGuardTarget();
11163 if (Args[i].IsByVal)
11164 Flags.setByVal();
11165 if (Args[i].IsByRef)
11166 Flags.setByRef();
11167 if (Args[i].IsPreallocated) {
11168 Flags.setPreallocated();
11169 // Set the byval flag for CCAssignFn callbacks that don't know about
11170 // preallocated. This way we can know how many bytes we should've
11171 // allocated and how many bytes a callee cleanup function will pop. If
11172 // we port preallocated to more targets, we'll have to add custom
11173 // preallocated handling in the various CC lowering callbacks.
11174 Flags.setByVal();
11175 }
11176 if (Args[i].IsInAlloca) {
11177 Flags.setInAlloca();
11178 // Set the byval flag for CCAssignFn callbacks that don't know about
11179 // inalloca. This way we can know how many bytes we should've allocated
11180 // and how many bytes a callee cleanup function will pop. If we port
11181 // inalloca to more targets, we'll have to add custom inalloca handling
11182 // in the various CC lowering callbacks.
11183 Flags.setByVal();
11184 }
11185 Align MemAlign;
11186 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11187 unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
11188 Flags.setByValSize(FrameSize);
11189
11190 // info is not there but there are cases it cannot get right.
11191 if (auto MA = Args[i].Alignment)
11192 MemAlign = *MA;
11193 else
11194 MemAlign = getByValTypeAlignment(Args[i].IndirectType, DL);
11195 } else if (auto MA = Args[i].Alignment) {
11196 MemAlign = *MA;
11197 } else {
11198 MemAlign = OriginalAlignment;
11199 }
11200 Flags.setMemAlign(MemAlign);
11201 if (Args[i].IsNest)
11202 Flags.setNest();
11203 if (NeedsRegBlock)
11204 Flags.setInConsecutiveRegs();
11205
11207 CLI.CallConv, VT);
11208 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11209 CLI.CallConv, VT);
11210 SmallVector<SDValue, 4> Parts(NumParts);
11211 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
11212
11213 if (Args[i].IsSExt)
11214 ExtendKind = ISD::SIGN_EXTEND;
11215 else if (Args[i].IsZExt)
11216 ExtendKind = ISD::ZERO_EXTEND;
11217
11218 // Conservatively only handle 'returned' on non-vectors that can be lowered,
11219 // for now.
11220 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
11222 assert((CLI.RetTy == Args[i].Ty ||
11223 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11225 Args[i].Ty->getPointerAddressSpace())) &&
11226 RetTys.size() == NumValues && "unexpected use of 'returned'");
11227 // Before passing 'returned' to the target lowering code, ensure that
11228 // either the register MVT and the actual EVT are the same size or that
11229 // the return value and argument are extended in the same way; in these
11230 // cases it's safe to pass the argument register value unchanged as the
11231 // return register value (although it's at the target's option whether
11232 // to do so)
11233 // TODO: allow code generation to take advantage of partially preserved
11234 // registers rather than clobbering the entire register when the
11235 // parameter extension method is not compatible with the return
11236 // extension method
11237 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
11238 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
11239 CLI.RetZExt == Args[i].IsZExt))
11240 Flags.setReturned();
11241 }
11242
11243 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
11244 CLI.CallConv, ExtendKind);
11245
11246 for (unsigned j = 0; j != NumParts; ++j) {
11247 // if it isn't first piece, alignment must be 1
11248 // For scalable vectors the scalable part is currently handled
11249 // by individual targets, so we just use the known minimum size here.
11250 ISD::OutputArg MyFlags(
11251 Flags, Parts[j].getValueType().getSimpleVT(), VT,
11252 i < CLI.NumFixedArgs, i,
11253 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11254 if (NumParts > 1 && j == 0)
11255 MyFlags.Flags.setSplit();
11256 else if (j != 0) {
11257 MyFlags.Flags.setOrigAlign(Align(1));
11258 if (j == NumParts - 1)
11259 MyFlags.Flags.setSplitEnd();
11260 }
11261
11262 CLI.Outs.push_back(MyFlags);
11263 CLI.OutVals.push_back(Parts[j]);
11264 }
11265
11266 if (NeedsRegBlock && Value == NumValues - 1)
11267 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11268 }
11269 }
11270
11272 CLI.Chain = LowerCall(CLI, InVals);
11273
11274 // Update CLI.InVals to use outside of this function.
11275 CLI.InVals = InVals;
11276
11277 // Verify that the target's LowerCall behaved as expected.
11278 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
11279 "LowerCall didn't return a valid chain!");
11280 assert((!CLI.IsTailCall || InVals.empty()) &&
11281 "LowerCall emitted a return value for a tail call!");
11282 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
11283 "LowerCall didn't emit the correct number of values!");
11284
11285 // For a tail call, the return value is merely live-out and there aren't
11286 // any nodes in the DAG representing it. Return a special value to
11287 // indicate that a tail call has been emitted and no more Instructions
11288 // should be processed in the current block.
11289 if (CLI.IsTailCall) {
11290 CLI.DAG.setRoot(CLI.Chain);
11291 return std::make_pair(SDValue(), SDValue());
11292 }
11293
11294#ifndef NDEBUG
11295 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
11296 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
11297 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
11298 "LowerCall emitted a value with the wrong type!");
11299 }
11300#endif
11301
11302 SmallVector<SDValue, 4> ReturnValues;
11303 if (!CanLowerReturn) {
11304 // The instruction result is the result of loading from the
11305 // hidden sret parameter.
11306 MVT PtrVT = getPointerTy(DL, DL.getAllocaAddrSpace());
11307
11308 unsigned NumValues = RetTys.size();
11309 ReturnValues.resize(NumValues);
11310 SmallVector<SDValue, 4> Chains(NumValues);
11311
11312 // An aggregate return value cannot wrap around the address space, so
11313 // offsets to its parts don't wrap either.
11315 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
11316 for (unsigned i = 0; i < NumValues; ++i) {
11317 SDValue Add =
11318 CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
11319 CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT),
11321 SDValue L = CLI.DAG.getLoad(
11322 RetTys[i], CLI.DL, CLI.Chain, Add,
11324 DemoteStackIdx, Offsets[i]),
11325 HiddenSRetAlign);
11326 ReturnValues[i] = L;
11327 Chains[i] = L.getValue(1);
11328 }
11329
11330 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
11331 } else {
11332 // Collect the legal value parts into potentially illegal values
11333 // that correspond to the original function's return values.
11334 std::optional<ISD::NodeType> AssertOp;
11335 if (CLI.RetSExt)
11336 AssertOp = ISD::AssertSext;
11337 else if (CLI.RetZExt)
11338 AssertOp = ISD::AssertZext;
11339 unsigned CurReg = 0;
11340 for (EVT VT : RetTys) {
11342 CLI.CallConv, VT);
11343 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11344 CLI.CallConv, VT);
11345
11346 ReturnValues.push_back(getCopyFromParts(
11347 CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
11348 CLI.Chain, CLI.CallConv, AssertOp));
11349 CurReg += NumRegs;
11350 }
11351
11352 // For a function returning void, there is no return value. We can't create
11353 // such a node, so we just return a null return value in that case. In
11354 // that case, nothing will actually look at the value.
11355 if (ReturnValues.empty())
11356 return std::make_pair(SDValue(), CLI.Chain);
11357 }
11358
11359 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
11360 CLI.DAG.getVTList(RetTys), ReturnValues);
11361 return std::make_pair(Res, CLI.Chain);
11362}
11363
11364/// Places new result values for the node in Results (their number
11365/// and types must exactly match those of the original return values of
11366/// the node), or leaves Results empty, which indicates that the node is not
11367/// to be custom lowered after all.
11370 SelectionDAG &DAG) const {
11371 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
11372
11373 if (!Res.getNode())
11374 return;
11375
11376 // If the original node has one result, take the return value from
11377 // LowerOperation as is. It might not be result number 0.
11378 if (N->getNumValues() == 1) {
11379 Results.push_back(Res);
11380 return;
11381 }
11382
11383 // If the original node has multiple results, then the return node should
11384 // have the same number of results.
11385 assert((N->getNumValues() == Res->getNumValues()) &&
11386 "Lowering returned the wrong number of results!");
11387
11388 // Places new result values base on N result number.
11389 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11390 Results.push_back(Res.getValue(I));
11391}
11392
11394 llvm_unreachable("LowerOperation not implemented for this target!");
11395}
11396
11398 unsigned Reg,
11399 ISD::NodeType ExtendType) {
11401 assert((Op.getOpcode() != ISD::CopyFromReg ||
11402 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11403 "Copy from a reg to the same reg!");
11404 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
11405
11407 // If this is an InlineAsm we have to match the registers required, not the
11408 // notional registers required by the type.
11409
11410 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11411 std::nullopt); // This is not an ABI copy.
11412 SDValue Chain = DAG.getEntryNode();
11413
11414 if (ExtendType == ISD::ANY_EXTEND) {
11415 auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
11416 if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
11417 ExtendType = PreferredExtendIt->second;
11418 }
11419 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
11420 PendingExports.push_back(Chain);
11421}
11422
11424
11425/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11426/// entry block, return true. This includes arguments used by switches, since
11427/// the switch may expand into multiple basic blocks.
11428static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
11429 // With FastISel active, we may be splitting blocks, so force creation
11430 // of virtual registers for all non-dead arguments.
11431 if (FastISel)
11432 return A->use_empty();
11433
11434 const BasicBlock &Entry = A->getParent()->front();
11435 for (const User *U : A->users())
11436 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11437 return false; // Use not in entry block.
11438
11439 return true;
11440}
11441
11443 DenseMap<const Argument *,
11444 std::pair<const AllocaInst *, const StoreInst *>>;
11445
11446/// Scan the entry block of the function in FuncInfo for arguments that look
11447/// like copies into a local alloca. Record any copied arguments in
11448/// ArgCopyElisionCandidates.
11449static void
11451 FunctionLoweringInfo *FuncInfo,
11452 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
11453 // Record the state of every static alloca used in the entry block. Argument
11454 // allocas are all used in the entry block, so we need approximately as many
11455 // entries as we have arguments.
11456 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
11458 unsigned NumArgs = FuncInfo->Fn->arg_size();
11459 StaticAllocas.reserve(NumArgs * 2);
11460
11461 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11462 if (!V)
11463 return nullptr;
11464 V = V->stripPointerCasts();
11465 const auto *AI = dyn_cast<AllocaInst>(V);
11466 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11467 return nullptr;
11468 auto Iter = StaticAllocas.insert({AI, Unknown});
11469 return &Iter.first->second;
11470 };
11471
11472 // Look for stores of arguments to static allocas. Look through bitcasts and
11473 // GEPs to handle type coercions, as long as the alloca is fully initialized
11474 // by the store. Any non-store use of an alloca escapes it and any subsequent
11475 // unanalyzed store might write it.
11476 // FIXME: Handle structs initialized with multiple stores.
11477 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11478 // Look for stores, and handle non-store uses conservatively.
11479 const auto *SI = dyn_cast<StoreInst>(&I);
11480 if (!SI) {
11481 // We will look through cast uses, so ignore them completely.
11482 if (I.isCast())
11483 continue;
11484 // Ignore debug info and pseudo op intrinsics, they don't escape or store
11485 // to allocas.
11486 if (I.isDebugOrPseudoInst())
11487 continue;
11488 // This is an unknown instruction. Assume it escapes or writes to all
11489 // static alloca operands.
11490 for (const Use &U : I.operands()) {
11491 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11492 *Info = StaticAllocaInfo::Clobbered;
11493 }
11494 continue;
11495 }
11496
11497 // If the stored value is a static alloca, mark it as escaped.
11498 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11499 *Info = StaticAllocaInfo::Clobbered;
11500
11501 // Check if the destination is a static alloca.
11502 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11503 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11504 if (!Info)
11505 continue;
11506 const AllocaInst *AI = cast<AllocaInst>(Dst);
11507
11508 // Skip allocas that have been initialized or clobbered.
11509 if (*Info != StaticAllocaInfo::Unknown)
11510 continue;
11511
11512 // Check if the stored value is an argument, and that this store fully
11513 // initializes the alloca.
11514 // If the argument type has padding bits we can't directly forward a pointer
11515 // as the upper bits may contain garbage.
11516 // Don't elide copies from the same argument twice.
11517 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11518 const auto *Arg = dyn_cast<Argument>(Val);
11519 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11520 Arg->getType()->isEmptyTy() ||
11521 DL.getTypeStoreSize(Arg->getType()) !=
11522 DL.getTypeAllocSize(AI->getAllocatedType()) ||
11523 !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11524 ArgCopyElisionCandidates.count(Arg)) {
11525 *Info = StaticAllocaInfo::Clobbered;
11526 continue;
11527 }
11528
11529 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
11530 << '\n');
11531
11532 // Mark this alloca and store for argument copy elision.
11533 *Info = StaticAllocaInfo::Elidable;
11534 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
11535
11536 // Stop scanning if we've seen all arguments. This will happen early in -O0
11537 // builds, which is useful, because -O0 builds have large entry blocks and
11538 // many allocas.
11539 if (ArgCopyElisionCandidates.size() == NumArgs)
11540 break;
11541 }
11542}
11543
11544/// Try to elide argument copies from memory into a local alloca. Succeeds if
11545/// ArgVal is a load from a suitable fixed stack object.
11548 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
11549 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
11550 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
11551 ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
11552 // Check if this is a load from a fixed stack object.
11553 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11554 if (!LNode)
11555 return;
11556 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11557 if (!FINode)
11558 return;
11559
11560 // Check that the fixed stack object is the right size and alignment.
11561 // Look at the alignment that the user wrote on the alloca instead of looking
11562 // at the stack object.
11563 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11564 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11565 const AllocaInst *AI = ArgCopyIter->second.first;
11566 int FixedIndex = FINode->getIndex();
11567 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
11568 int OldIndex = AllocaIndex;
11569 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11570 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
11571 LLVM_DEBUG(
11572 dbgs() << " argument copy elision failed due to bad fixed stack "
11573 "object size\n");
11574 return;
11575 }
11576 Align RequiredAlignment = AI->getAlign();
11577 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
11578 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
11579 "greater than stack argument alignment ("
11580 << DebugStr(RequiredAlignment) << " vs "
11581 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
11582 return;
11583 }
11584
11585 // Perform the elision. Delete the old stack object and replace its only use
11586 // in the variable info map. Mark the stack object as mutable and aliased.
11587 LLVM_DEBUG({
11588 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
11589 << " Replacing frame index " << OldIndex << " with " << FixedIndex
11590 << '\n';
11591 });
11592 MFI.RemoveStackObject(OldIndex);
11593 MFI.setIsImmutableObjectIndex(FixedIndex, false);
11594 MFI.setIsAliasedObjectIndex(FixedIndex, true);
11595 AllocaIndex = FixedIndex;
11596 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
11597 for (SDValue ArgVal : ArgVals)
11598 Chains.push_back(ArgVal.getValue(1));
11599
11600 // Avoid emitting code for the store implementing the copy.
11601 const StoreInst *SI = ArgCopyIter->second.second;
11602 ElidedArgCopyInstrs.insert(SI);
11603
11604 // Check for uses of the argument again so that we can avoid exporting ArgVal
11605 // if it is't used by anything other than the store.
11606 for (const Value *U : Arg.users()) {
11607 if (U != SI) {
11608 ArgHasUses = true;
11609 break;
11610 }
11611 }
11612}
11613
11614void SelectionDAGISel::LowerArguments(const Function &F) {
11615 SelectionDAG &DAG = SDB->DAG;
11616 SDLoc dl = SDB->getCurSDLoc();
11617 const DataLayout &DL = DAG.getDataLayout();
11619
11620 // In Naked functions we aren't going to save any registers.
11621 if (F.hasFnAttribute(Attribute::Naked))
11622 return;
11623
11624 if (!FuncInfo->CanLowerReturn) {
11625 // Put in an sret pointer parameter before all the other parameters.
11626 MVT ValueVT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
11627
11629 Flags.setSRet();
11630 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVT);
11631 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, true,
11633 Ins.push_back(RetArg);
11634 }
11635
11636 // Look for stores of arguments to static allocas. Mark such arguments with a
11637 // flag to ask the target to give us the memory location of that argument if
11638 // available.
11639 ArgCopyElisionMapTy ArgCopyElisionCandidates;
11641 ArgCopyElisionCandidates);
11642
11643 // Set up the incoming argument description vector.
11644 for (const Argument &Arg : F.args()) {
11645 unsigned ArgNo = Arg.getArgNo();
11646 SmallVector<EVT, 4> ValueVTs;
11647 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11648 bool isArgValueUsed = !Arg.use_empty();
11649 unsigned PartBase = 0;
11650 Type *FinalType = Arg.getType();
11651 if (Arg.hasAttribute(Attribute::ByVal))
11652 FinalType = Arg.getParamByValType();
11653 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11654 FinalType, F.getCallingConv(), F.isVarArg(), DL);
11655 for (unsigned Value = 0, NumValues = ValueVTs.size();
11656 Value != NumValues; ++Value) {
11657 EVT VT = ValueVTs[Value];
11658 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
11660
11661
11662 if (Arg.getType()->isPointerTy()) {
11663 Flags.setPointer();
11664 Flags.setPointerAddrSpace(
11665 cast<PointerType>(Arg.getType())->getAddressSpace());
11666 }
11667 if (Arg.hasAttribute(Attribute::ZExt))
11668 Flags.setZExt();
11669 if (Arg.hasAttribute(Attribute::SExt))
11670 Flags.setSExt();
11671 if (Arg.hasAttribute(Attribute::InReg)) {
11672 // If we are using vectorcall calling convention, a structure that is
11673 // passed InReg - is surely an HVA
11674 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11675 isa<StructType>(Arg.getType())) {
11676 // The first value of a structure is marked
11677 if (0 == Value)
11678 Flags.setHvaStart();
11679 Flags.setHva();
11680 }
11681 // Set InReg Flag
11682 Flags.setInReg();
11683 }
11684 if (Arg.hasAttribute(Attribute::StructRet))
11685 Flags.setSRet();
11686 if (Arg.hasAttribute(Attribute::SwiftSelf))
11687 Flags.setSwiftSelf();
11688 if (Arg.hasAttribute(Attribute::SwiftAsync))
11689 Flags.setSwiftAsync();
11690 if (Arg.hasAttribute(Attribute::SwiftError))
11691 Flags.setSwiftError();
11692 if (Arg.hasAttribute(Attribute::ByVal))
11693 Flags.setByVal();
11694 if (Arg.hasAttribute(Attribute::ByRef))
11695 Flags.setByRef();
11696 if (Arg.hasAttribute(Attribute::InAlloca)) {
11697 Flags.setInAlloca();
11698 // Set the byval flag for CCAssignFn callbacks that don't know about
11699 // inalloca. This way we can know how many bytes we should've allocated
11700 // and how many bytes a callee cleanup function will pop. If we port
11701 // inalloca to more targets, we'll have to add custom inalloca handling
11702 // in the various CC lowering callbacks.
11703 Flags.setByVal();
11704 }
11705 if (Arg.hasAttribute(Attribute::Preallocated)) {
11706 Flags.setPreallocated();
11707 // Set the byval flag for CCAssignFn callbacks that don't know about
11708 // preallocated. This way we can know how many bytes we should've
11709 // allocated and how many bytes a callee cleanup function will pop. If
11710 // we port preallocated to more targets, we'll have to add custom
11711 // preallocated handling in the various CC lowering callbacks.
11712 Flags.setByVal();
11713 }
11714
11715 // Certain targets (such as MIPS), may have a different ABI alignment
11716 // for a type depending on the context. Give the target a chance to
11717 // specify the alignment it wants.
11718 const Align OriginalAlignment(
11720 Flags.setOrigAlign(OriginalAlignment);
11721
11722 Align MemAlign;
11723 Type *ArgMemTy = nullptr;
11724 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11725 Flags.isByRef()) {
11726 if (!ArgMemTy)
11727 ArgMemTy = Arg.getPointeeInMemoryValueType();
11728
11729 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11730
11731 // For in-memory arguments, size and alignment should be passed from FE.
11732 // BE will guess if this info is not there but there are cases it cannot
11733 // get right.
11734 if (auto ParamAlign = Arg.getParamStackAlign())
11735 MemAlign = *ParamAlign;
11736 else if ((ParamAlign = Arg.getParamAlign()))
11737 MemAlign = *ParamAlign;
11738 else
11739 MemAlign = TLI->getByValTypeAlignment(ArgMemTy, DL);
11740 if (Flags.isByRef())
11741 Flags.setByRefSize(MemSize);
11742 else
11743 Flags.setByValSize(MemSize);
11744 } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11745 MemAlign = *ParamAlign;
11746 } else {
11747 MemAlign = OriginalAlignment;
11748 }
11749 Flags.setMemAlign(MemAlign);
11750
11751 if (Arg.hasAttribute(Attribute::Nest))
11752 Flags.setNest();
11753 if (NeedsRegBlock)
11754 Flags.setInConsecutiveRegs();
11755 if (ArgCopyElisionCandidates.count(&Arg))
11756 Flags.setCopyElisionCandidate();
11757 if (Arg.hasAttribute(Attribute::Returned))
11758 Flags.setReturned();
11759
11761 *CurDAG->getContext(), F.getCallingConv(), VT);
11762 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11763 *CurDAG->getContext(), F.getCallingConv(), VT);
11764 for (unsigned i = 0; i != NumRegs; ++i) {
11765 // For scalable vectors, use the minimum size; individual targets
11766 // are responsible for handling scalable vector arguments and
11767 // return values.
11768 ISD::InputArg MyFlags(
11769 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11770 PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11771 if (NumRegs > 1 && i == 0)
11772 MyFlags.Flags.setSplit();
11773 // if it isn't first piece, alignment must be 1
11774 else if (i > 0) {
11775 MyFlags.Flags.setOrigAlign(Align(1));
11776 if (i == NumRegs - 1)
11777 MyFlags.Flags.setSplitEnd();
11778 }
11779 Ins.push_back(MyFlags);
11780 }
11781 if (NeedsRegBlock && Value == NumValues - 1)
11782 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11783 PartBase += VT.getStoreSize().getKnownMinValue();
11784 }
11785 }
11786
11787 // Call the target to set up the argument values.
11789 SDValue NewRoot = TLI->LowerFormalArguments(
11790 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11791
11792 // Verify that the target's LowerFormalArguments behaved as expected.
11793 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11794 "LowerFormalArguments didn't return a valid chain!");
11795 assert(InVals.size() == Ins.size() &&
11796 "LowerFormalArguments didn't emit the correct number of values!");
11797 LLVM_DEBUG({
11798 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11799 assert(InVals[i].getNode() &&
11800 "LowerFormalArguments emitted a null value!");
11801 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11802 "LowerFormalArguments emitted a value with the wrong type!");
11803 }
11804 });
11805
11806 // Update the DAG with the new chain value resulting from argument lowering.
11807 DAG.setRoot(NewRoot);
11808
11809 // Set up the argument values.
11810 unsigned i = 0;
11811 if (!FuncInfo->CanLowerReturn) {
11812 // Create a virtual register for the sret pointer, and put in a copy
11813 // from the sret argument into it.
11814 MVT VT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
11815 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11816 std::optional<ISD::NodeType> AssertOp;
11817 SDValue ArgValue =
11818 getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11819 F.getCallingConv(), AssertOp);
11820
11821 MachineFunction& MF = SDB->DAG.getMachineFunction();
11823 Register SRetReg =
11824 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11825 FuncInfo->DemoteRegister = SRetReg;
11826 NewRoot =
11827 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11828 DAG.setRoot(NewRoot);
11829
11830 // i indexes lowered arguments. Bump it past the hidden sret argument.
11831 ++i;
11832 }
11833
11835 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11836 for (const Argument &Arg : F.args()) {
11837 SmallVector<SDValue, 4> ArgValues;
11838 SmallVector<EVT, 4> ValueVTs;
11839 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11840 unsigned NumValues = ValueVTs.size();
11841 if (NumValues == 0)
11842 continue;
11843
11844 bool ArgHasUses = !Arg.use_empty();
11845
11846 // Elide the copying store if the target loaded this argument from a
11847 // suitable fixed stack object.
11848 if (Ins[i].Flags.isCopyElisionCandidate()) {
11849 unsigned NumParts = 0;
11850 for (EVT VT : ValueVTs)
11852 F.getCallingConv(), VT);
11853
11854 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11855 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11856 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11857 }
11858
11859 // If this argument is unused then remember its value. It is used to generate
11860 // debugging information.
11861 bool isSwiftErrorArg =
11863 Arg.hasAttribute(Attribute::SwiftError);
11864 if (!ArgHasUses && !isSwiftErrorArg) {
11865 SDB->setUnusedArgValue(&Arg, InVals[i]);
11866
11867 // Also remember any frame index for use in FastISel.
11868 if (FrameIndexSDNode *FI =
11869 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11870 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11871 }
11872
11873 for (unsigned Val = 0; Val != NumValues; ++Val) {
11874 EVT VT = ValueVTs[Val];
11876 F.getCallingConv(), VT);
11877 unsigned NumParts = TLI->getNumRegistersForCallingConv(
11878 *CurDAG->getContext(), F.getCallingConv(), VT);
11879
11880 // Even an apparent 'unused' swifterror argument needs to be returned. So
11881 // we do generate a copy for it that can be used on return from the
11882 // function.
11883 if (ArgHasUses || isSwiftErrorArg) {
11884 std::optional<ISD::NodeType> AssertOp;
11885 if (Arg.hasAttribute(Attribute::SExt))
11886 AssertOp = ISD::AssertSext;
11887 else if (Arg.hasAttribute(Attribute::ZExt))
11888 AssertOp = ISD::AssertZext;
11889
11890 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11891 PartVT, VT, nullptr, NewRoot,
11892 F.getCallingConv(), AssertOp));
11893 }
11894
11895 i += NumParts;
11896 }
11897
11898 // We don't need to do anything else for unused arguments.
11899 if (ArgValues.empty())
11900 continue;
11901
11902 // Note down frame index.
11903 if (FrameIndexSDNode *FI =
11904 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11905 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11906
11907 SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11908 SDB->getCurSDLoc());
11909
11910 SDB->setValue(&Arg, Res);
11912 // We want to associate the argument with the frame index, among
11913 // involved operands, that correspond to the lowest address. The
11914 // getCopyFromParts function, called earlier, is swapping the order of
11915 // the operands to BUILD_PAIR depending on endianness. The result of
11916 // that swapping is that the least significant bits of the argument will
11917 // be in the first operand of the BUILD_PAIR node, and the most
11918 // significant bits will be in the second operand.
11919 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11920 if (LoadSDNode *LNode =
11921 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11922 if (FrameIndexSDNode *FI =
11923 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11924 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11925 }
11926
11927 // Analyses past this point are naive and don't expect an assertion.
11928 if (Res.getOpcode() == ISD::AssertZext)
11929 Res = Res.getOperand(0);
11930
11931 // Update the SwiftErrorVRegDefMap.
11932 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11933 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11934 if (Reg.isVirtual())
11936 Reg);
11937 }
11938
11939 // If this argument is live outside of the entry block, insert a copy from
11940 // wherever we got it to the vreg that other BB's will reference it as.
11941 if (Res.getOpcode() == ISD::CopyFromReg) {
11942 // If we can, though, try to skip creating an unnecessary vreg.
11943 // FIXME: This isn't very clean... it would be nice to make this more
11944 // general.
11945 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11946 if (Reg.isVirtual()) {
11947 FuncInfo->ValueMap[&Arg] = Reg;
11948 continue;
11949 }
11950 }
11952 FuncInfo->InitializeRegForValue(&Arg);
11953 SDB->CopyToExportRegsIfNeeded(&Arg);
11954 }
11955 }
11956
11957 if (!Chains.empty()) {
11958 Chains.push_back(NewRoot);
11959 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11960 }
11961
11962 DAG.setRoot(NewRoot);
11963
11964 assert(i == InVals.size() && "Argument register count mismatch!");
11965
11966 // If any argument copy elisions occurred and we have debug info, update the
11967 // stale frame indices used in the dbg.declare variable info table.
11968 if (!ArgCopyElisionFrameIndexMap.empty()) {
11971 auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11972 if (I != ArgCopyElisionFrameIndexMap.end())
11973 VI.updateStackSlot(I->second);
11974 }
11975 }
11976
11977 // Finally, if the target has anything special to do, allow it to do so.
11979}
11980
11981/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
11982/// ensure constants are generated when needed. Remember the virtual registers
11983/// that need to be added to the Machine PHI nodes as input. We cannot just
11984/// directly add them, because expansion might result in multiple MBB's for one
11985/// BB. As such, the start of the BB might correspond to a different MBB than
11986/// the end.
11987void
11988SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11990
11992
11993 // Check PHI nodes in successors that expect a value to be available from this
11994 // block.
11995 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11996 if (!isa<PHINode>(SuccBB->begin())) continue;
11997 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
11998
11999 // If this terminator has multiple identical successors (common for
12000 // switches), only handle each succ once.
12001 if (!SuccsHandled.insert(SuccMBB).second)
12002 continue;
12003
12005
12006 // At this point we know that there is a 1-1 correspondence between LLVM PHI
12007 // nodes and Machine PHI nodes, but the incoming operands have not been
12008 // emitted yet.
12009 for (const PHINode &PN : SuccBB->phis()) {
12010 // Ignore dead phi's.
12011 if (PN.use_empty())
12012 continue;
12013
12014 // Skip empty types
12015 if (PN.getType()->isEmptyTy())
12016 continue;
12017
12018 unsigned Reg;
12019 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12020
12021 if (const auto *C = dyn_cast<Constant>(PHIOp)) {
12022 unsigned &RegOut = ConstantsOut[C];
12023 if (RegOut == 0) {
12024 RegOut = FuncInfo.CreateRegs(C);
12025 // We need to zero/sign extend ConstantInt phi operands to match
12026 // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
12027 ISD::NodeType ExtendType = ISD::ANY_EXTEND;
12028 if (auto *CI = dyn_cast<ConstantInt>(C))
12029 ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
12031 CopyValueToVirtualRegister(C, RegOut, ExtendType);
12032 }
12033 Reg = RegOut;
12034 } else {
12036 FuncInfo.ValueMap.find(PHIOp);
12037 if (I != FuncInfo.ValueMap.end())
12038 Reg = I->second;
12039 else {
12040 assert(isa<AllocaInst>(PHIOp) &&
12041 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
12042 "Didn't codegen value into a register!??");
12043 Reg = FuncInfo.CreateRegs(PHIOp);
12044 CopyValueToVirtualRegister(PHIOp, Reg);
12045 }
12046 }
12047
12048 // Remember that this register needs to added to the machine PHI node as
12049 // the input for this MBB.
12050 SmallVector<EVT, 4> ValueVTs;
12051 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
12052 for (EVT VT : ValueVTs) {
12053 const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
12054 for (unsigned i = 0; i != NumRegisters; ++i)
12055 FuncInfo.PHINodesToUpdate.push_back(
12056 std::make_pair(&*MBBI++, Reg + i));
12057 Reg += NumRegisters;
12058 }
12059 }
12060 }
12061
12062 ConstantsOut.clear();
12063}
12064
12065MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
12067 if (++I == FuncInfo.MF->end())
12068 return nullptr;
12069 return &*I;
12070}
12071
12072/// During lowering new call nodes can be created (such as memset, etc.).
12073/// Those will become new roots of the current DAG, but complications arise
12074/// when they are tail calls. In such cases, the call lowering will update
12075/// the root, but the builder still needs to know that a tail call has been
12076/// lowered in order to avoid generating an additional return.
12077void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
12078 // If the node is null, we do have a tail call.
12079 if (MaybeTC.getNode() != nullptr)
12080 DAG.setRoot(MaybeTC);
12081 else
12082 HasTailCall = true;
12083}
12084
12085void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
12086 MachineBasicBlock *SwitchMBB,
12087 MachineBasicBlock *DefaultMBB) {
12088 MachineFunction *CurMF = FuncInfo.MF;
12089 MachineBasicBlock *NextMBB = nullptr;
12091 if (++BBI != FuncInfo.MF->end())
12092 NextMBB = &*BBI;
12093
12094 unsigned Size = W.LastCluster - W.FirstCluster + 1;
12095
12097
12098 if (Size == 2 && W.MBB == SwitchMBB) {
12099 // If any two of the cases has the same destination, and if one value
12100 // is the same as the other, but has one bit unset that the other has set,
12101 // use bit manipulation to do two compares at once. For example:
12102 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
12103 // TODO: This could be extended to merge any 2 cases in switches with 3
12104 // cases.
12105 // TODO: Handle cases where W.CaseBB != SwitchBB.
12106 CaseCluster &Small = *W.FirstCluster;
12107 CaseCluster &Big = *W.LastCluster;
12108
12109 if (Small.Low == Small.High && Big.Low == Big.High &&
12110 Small.MBB == Big.MBB) {
12111 const APInt &SmallValue = Small.Low->getValue();
12112 const APInt &BigValue = Big.Low->getValue();
12113
12114 // Check that there is only one bit different.
12115 APInt CommonBit = BigValue ^ SmallValue;
12116 if (CommonBit.isPowerOf2()) {
12117 SDValue CondLHS = getValue(Cond);
12118 EVT VT = CondLHS.getValueType();
12119 SDLoc DL = getCurSDLoc();
12120
12121 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
12122 DAG.getConstant(CommonBit, DL, VT));
12124 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
12125 ISD::SETEQ);
12126
12127 // Update successor info.
12128 // Both Small and Big will jump to Small.BB, so we sum up the
12129 // probabilities.
12130 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
12131 if (BPI)
12132 addSuccessorWithProb(
12133 SwitchMBB, DefaultMBB,
12134 // The default destination is the first successor in IR.
12135 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
12136 else
12137 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12138
12139 // Insert the true branch.
12140 SDValue BrCond =
12141 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
12142 DAG.getBasicBlock(Small.MBB));
12143 // Insert the false branch.
12144 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
12145 DAG.getBasicBlock(DefaultMBB));
12146
12147 DAG.setRoot(BrCond);
12148 return;
12149 }
12150 }
12151 }
12152
12153 if (TM.getOptLevel() != CodeGenOptLevel::None) {
12154 // Here, we order cases by probability so the most likely case will be
12155 // checked first. However, two clusters can have the same probability in
12156 // which case their relative ordering is non-deterministic. So we use Low
12157 // as a tie-breaker as clusters are guaranteed to never overlap.
12158 llvm::sort(W.FirstCluster, W.LastCluster + 1,
12159 [](const CaseCluster &a, const CaseCluster &b) {
12160 return a.Prob != b.Prob ?
12161 a.Prob > b.Prob :
12162 a.Low->getValue().slt(b.Low->getValue());
12163 });
12164
12165 // Rearrange the case blocks so that the last one falls through if possible
12166 // without changing the order of probabilities.
12167 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
12168 --I;
12169 if (I->Prob > W.LastCluster->Prob)
12170 break;
12171 if (I->Kind == CC_Range && I->MBB == NextMBB) {
12172 std::swap(*I, *W.LastCluster);
12173 break;
12174 }
12175 }
12176 }
12177
12178 // Compute total probability.
12179 BranchProbability DefaultProb = W.DefaultProb;
12180 BranchProbability UnhandledProbs = DefaultProb;
12181 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
12182 UnhandledProbs += I->Prob;
12183
12184 MachineBasicBlock *CurMBB = W.MBB;
12185 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
12186 bool FallthroughUnreachable = false;
12187 MachineBasicBlock *Fallthrough;
12188 if (I == W.LastCluster) {
12189 // For the last cluster, fall through to the default destination.
12190 Fallthrough = DefaultMBB;
12191 FallthroughUnreachable = isa<UnreachableInst>(
12192 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12193 } else {
12194 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12195 CurMF->insert(BBI, Fallthrough);
12196 // Put Cond in a virtual register to make it available from the new blocks.
12198 }
12199 UnhandledProbs -= I->Prob;
12200
12201 switch (I->Kind) {
12202 case CC_JumpTable: {
12203 // FIXME: Optimize away range check based on pivot comparisons.
12204 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12205 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12206
12207 // The jump block hasn't been inserted yet; insert it here.
12208 MachineBasicBlock *JumpMBB = JT->MBB;
12209 CurMF->insert(BBI, JumpMBB);
12210
12211 auto JumpProb = I->Prob;
12212 auto FallthroughProb = UnhandledProbs;
12213
12214 // If the default statement is a target of the jump table, we evenly
12215 // distribute the default probability to successors of CurMBB. Also
12216 // update the probability on the edge from JumpMBB to Fallthrough.
12217 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12218 SE = JumpMBB->succ_end();
12219 SI != SE; ++SI) {
12220 if (*SI == DefaultMBB) {
12221 JumpProb += DefaultProb / 2;
12222 FallthroughProb -= DefaultProb / 2;
12223 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12224 JumpMBB->normalizeSuccProbs();
12225 break;
12226 }
12227 }
12228
12229 // If the default clause is unreachable, propagate that knowledge into
12230 // JTH->FallthroughUnreachable which will use it to suppress the range
12231 // check.
12232 //
12233 // However, don't do this if we're doing branch target enforcement,
12234 // because a table branch _without_ a range check can be a tempting JOP
12235 // gadget - out-of-bounds inputs that are impossible in correct
12236 // execution become possible again if an attacker can influence the
12237 // control flow. So if an attacker doesn't already have a BTI bypass
12238 // available, we don't want them to be able to get one out of this
12239 // table branch.
12240 if (FallthroughUnreachable) {
12241 Function &CurFunc = CurMF->getFunction();
12242 if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12243 JTH->FallthroughUnreachable = true;
12244 }
12245
12246 if (!JTH->FallthroughUnreachable)
12247 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12248 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12249 CurMBB->normalizeSuccProbs();
12250
12251 // The jump table header will be inserted in our current block, do the
12252 // range check, and fall through to our fallthrough block.
12253 JTH->HeaderBB = CurMBB;
12254 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12255
12256 // If we're in the right place, emit the jump table header right now.
12257 if (CurMBB == SwitchMBB) {
12258 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
12259 JTH->Emitted = true;
12260 }
12261 break;
12262 }
12263 case CC_BitTests: {
12264 // FIXME: Optimize away range check based on pivot comparisons.
12265 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12266
12267 // The bit test blocks haven't been inserted yet; insert them here.
12268 for (BitTestCase &BTC : BTB->Cases)
12269 CurMF->insert(BBI, BTC.ThisBB);
12270
12271 // Fill in fields of the BitTestBlock.
12272 BTB->Parent = CurMBB;
12273 BTB->Default = Fallthrough;
12274
12275 BTB->DefaultProb = UnhandledProbs;
12276 // If the cases in bit test don't form a contiguous range, we evenly
12277 // distribute the probability on the edge to Fallthrough to two
12278 // successors of CurMBB.
12279 if (!BTB->ContiguousRange) {
12280 BTB->Prob += DefaultProb / 2;
12281 BTB->DefaultProb -= DefaultProb / 2;
12282 }
12283
12284 if (FallthroughUnreachable)
12285 BTB->FallthroughUnreachable = true;
12286
12287 // If we're in the right place, emit the bit test header right now.
12288 if (CurMBB == SwitchMBB) {
12289 visitBitTestHeader(*BTB, SwitchMBB);
12290 BTB->Emitted = true;
12291 }
12292 break;
12293 }
12294 case CC_Range: {
12295 const Value *RHS, *LHS, *MHS;
12297 if (I->Low == I->High) {
12298 // Check Cond == I->Low.
12299 CC = ISD::SETEQ;
12300 LHS = Cond;
12301 RHS=I->Low;
12302 MHS = nullptr;
12303 } else {
12304 // Check I->Low <= Cond <= I->High.
12305 CC = ISD::SETLE;
12306 LHS = I->Low;
12307 MHS = Cond;
12308 RHS = I->High;
12309 }
12310
12311 // If Fallthrough is unreachable, fold away the comparison.
12312 if (FallthroughUnreachable)
12313 CC = ISD::SETTRUE;
12314
12315 // The false probability is the sum of all unhandled cases.
12316 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12317 getCurSDLoc(), I->Prob, UnhandledProbs);
12318
12319 if (CurMBB == SwitchMBB)
12320 visitSwitchCase(CB, SwitchMBB);
12321 else
12322 SL->SwitchCases.push_back(CB);
12323
12324 break;
12325 }
12326 }
12327 CurMBB = Fallthrough;
12328 }
12329}
12330
12331void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
12332 const SwitchWorkListItem &W,
12333 Value *Cond,
12334 MachineBasicBlock *SwitchMBB) {
12335 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12336 "Clusters not sorted?");
12337 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12338
12339 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12340 SL->computeSplitWorkItemInfo(W);
12341
12342 // Use the first element on the right as pivot since we will make less-than
12343 // comparisons against it.
12344 CaseClusterIt PivotCluster = FirstRight;
12345 assert(PivotCluster > W.FirstCluster);
12346 assert(PivotCluster <= W.LastCluster);
12347
12348 CaseClusterIt FirstLeft = W.FirstCluster;
12349 CaseClusterIt LastRight = W.LastCluster;
12350
12351 const ConstantInt *Pivot = PivotCluster->Low;
12352
12353 // New blocks will be inserted immediately after the current one.
12355 ++BBI;
12356
12357 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
12358 // we can branch to its destination directly if it's squeezed exactly in
12359 // between the known lower bound and Pivot - 1.
12360 MachineBasicBlock *LeftMBB;
12361 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12362 FirstLeft->Low == W.GE &&
12363 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12364 LeftMBB = FirstLeft->MBB;
12365 } else {
12366 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12367 FuncInfo.MF->insert(BBI, LeftMBB);
12368 WorkList.push_back(
12369 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
12370 // Put Cond in a virtual register to make it available from the new blocks.
12372 }
12373
12374 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
12375 // single cluster, RHS.Low == Pivot, and we can branch to its destination
12376 // directly if RHS.High equals the current upper bound.
12377 MachineBasicBlock *RightMBB;
12378 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12379 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12380 RightMBB = FirstRight->MBB;
12381 } else {
12382 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12383 FuncInfo.MF->insert(BBI, RightMBB);
12384 WorkList.push_back(
12385 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
12386 // Put Cond in a virtual register to make it available from the new blocks.
12388 }
12389
12390 // Create the CaseBlock record that will be used to lower the branch.
12391 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
12392 getCurSDLoc(), LeftProb, RightProb);
12393
12394 if (W.MBB == SwitchMBB)
12395 visitSwitchCase(CB, SwitchMBB);
12396 else
12397 SL->SwitchCases.push_back(CB);
12398}
12399
12400// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
12401// from the swith statement.
12403 BranchProbability PeeledCaseProb) {
12404 if (PeeledCaseProb == BranchProbability::getOne())
12406 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
12407
12408 uint32_t Numerator = CaseProb.getNumerator();
12409 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
12410 return BranchProbability(Numerator, std::max(Numerator, Denominator));
12411}
12412
12413// Try to peel the top probability case if it exceeds the threshold.
12414// Return current MachineBasicBlock for the switch statement if the peeling
12415// does not occur.
12416// If the peeling is performed, return the newly created MachineBasicBlock
12417// for the peeled switch statement. Also update Clusters to remove the peeled
12418// case. PeeledCaseProb is the BranchProbability for the peeled case.
12419MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
12420 const SwitchInst &SI, CaseClusterVector &Clusters,
12421 BranchProbability &PeeledCaseProb) {
12422 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12423 // Don't perform if there is only one cluster or optimizing for size.
12424 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
12426 SwitchMBB->getParent()->getFunction().hasMinSize())
12427 return SwitchMBB;
12428
12430 unsigned PeeledCaseIndex = 0;
12431 bool SwitchPeeled = false;
12432 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
12433 CaseCluster &CC = Clusters[Index];
12434 if (CC.Prob < TopCaseProb)
12435 continue;
12436 TopCaseProb = CC.Prob;
12437 PeeledCaseIndex = Index;
12438 SwitchPeeled = true;
12439 }
12440 if (!SwitchPeeled)
12441 return SwitchMBB;
12442
12443 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
12444 << TopCaseProb << "\n");
12445
12446 // Record the MBB for the peeled switch statement.
12447 MachineFunction::iterator BBI(SwitchMBB);
12448 ++BBI;
12449 MachineBasicBlock *PeeledSwitchMBB =
12451 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12452
12453 ExportFromCurrentBlock(SI.getCondition());
12454 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12455 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12456 nullptr, nullptr, TopCaseProb.getCompl()};
12457 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12458
12459 Clusters.erase(PeeledCaseIt);
12460 for (CaseCluster &CC : Clusters) {
12461 LLVM_DEBUG(
12462 dbgs() << "Scale the probablity for one cluster, before scaling: "
12463 << CC.Prob << "\n");
12464 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
12465 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
12466 }
12467 PeeledCaseProb = TopCaseProb;
12468 return PeeledSwitchMBB;
12469}
12470
12471void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
12472 // Extract cases from the switch.
12474 CaseClusterVector Clusters;
12475 Clusters.reserve(SI.getNumCases());
12476 for (auto I : SI.cases()) {
12477 MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor());
12478 const ConstantInt *CaseVal = I.getCaseValue();
12479 BranchProbability Prob =
12480 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12481 : BranchProbability(1, SI.getNumCases() + 1);
12482 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
12483 }
12484
12485 MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest());
12486
12487 // Cluster adjacent cases with the same destination. We do this at all
12488 // optimization levels because it's cheap to do and will make codegen faster
12489 // if there are many clusters.
12490 sortAndRangeify(Clusters);
12491
12492 // The branch probablity of the peeled case.
12494 MachineBasicBlock *PeeledSwitchMBB =
12495 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12496
12497 // If there is only the default destination, jump there directly.
12498 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12499 if (Clusters.empty()) {
12500 assert(PeeledSwitchMBB == SwitchMBB);
12501 SwitchMBB->addSuccessor(DefaultMBB);
12502 if (DefaultMBB != NextBlock(SwitchMBB)) {
12503 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
12504 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
12505 }
12506 return;
12507 }
12508
12509 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12510 DAG.getBFI());
12511 SL->findBitTestClusters(Clusters, &SI);
12512
12513 LLVM_DEBUG({
12514 dbgs() << "Case clusters: ";
12515 for (const CaseCluster &C : Clusters) {
12516 if (C.Kind == CC_JumpTable)
12517 dbgs() << "JT:";
12518 if (C.Kind == CC_BitTests)
12519 dbgs() << "BT:";
12520
12521 C.Low->getValue().print(dbgs(), true);
12522 if (C.Low != C.High) {
12523 dbgs() << '-';
12524 C.High->getValue().print(dbgs(), true);
12525 }
12526 dbgs() << ' ';
12527 }
12528 dbgs() << '\n';
12529 });
12530
12531 assert(!Clusters.empty());
12532 SwitchWorkList WorkList;
12533 CaseClusterIt First = Clusters.begin();
12534 CaseClusterIt Last = Clusters.end() - 1;
12535 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12536 // Scale the branchprobability for DefaultMBB if the peel occurs and
12537 // DefaultMBB is not replaced.
12538 if (PeeledCaseProb != BranchProbability::getZero() &&
12539 DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest()))
12540 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
12541 WorkList.push_back(
12542 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
12543
12544 while (!WorkList.empty()) {
12545 SwitchWorkListItem W = WorkList.pop_back_val();
12546 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12547
12548 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
12549 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12550 // For optimized builds, lower large range as a balanced binary tree.
12551 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
12552 continue;
12553 }
12554
12555 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
12556 }
12557}
12558
12559void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
12561 auto DL = getCurSDLoc();
12562 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12563 setValue(&I, DAG.getStepVector(DL, ResultVT));
12564}
12565
12566void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
12568 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12569
12570 SDLoc DL = getCurSDLoc();
12571 SDValue V = getValue(I.getOperand(0));
12572 assert(VT == V.getValueType() && "Malformed vector.reverse!");
12573
12574 if (VT.isScalableVector()) {
12576 return;
12577 }
12578
12579 // Use VECTOR_SHUFFLE for the fixed-length vector
12580 // to maintain existing behavior.
12582 unsigned NumElts = VT.getVectorMinNumElements();
12583 for (unsigned i = 0; i != NumElts; ++i)
12584 Mask.push_back(NumElts - 1 - i);
12585
12586 setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
12587}
12588
12589void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
12590 auto DL = getCurSDLoc();
12591 SDValue InVec = getValue(I.getOperand(0));
12592 EVT OutVT =
12594
12595 unsigned OutNumElts = OutVT.getVectorMinNumElements();
12596
12597 // ISD Node needs the input vectors split into two equal parts
12598 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12600 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12601 DAG.getVectorIdxConstant(OutNumElts, DL));
12602
12603 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12604 // legalisation and combines.
12605 if (OutVT.isFixedLengthVector()) {
12606 SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12607 createStrideMask(0, 2, OutNumElts));
12608 SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12609 createStrideMask(1, 2, OutNumElts));
12610 SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12611 setValue(&I, Res);
12612 return;
12613 }
12614
12616 DAG.getVTList(OutVT, OutVT), Lo, Hi);
12617 setValue(&I, Res);
12618}
12619
12620void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
12621 auto DL = getCurSDLoc();
12622 EVT InVT = getValue(I.getOperand(0)).getValueType();
12623 SDValue InVec0 = getValue(I.getOperand(0));
12624 SDValue InVec1 = getValue(I.getOperand(1));
12626 EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12627
12628 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12629 // legalisation and combines.
12630 if (OutVT.isFixedLengthVector()) {
12631 unsigned NumElts = InVT.getVectorMinNumElements();
12632 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
12633 setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12634 createInterleaveMask(NumElts, 2)));
12635 return;
12636 }
12637
12639 DAG.getVTList(InVT, InVT), InVec0, InVec1);
12640 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
12641 Res.getValue(1));
12642 setValue(&I, Res);
12643}
12644
12645void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12646 SmallVector<EVT, 4> ValueVTs;
12648 ValueVTs);
12649 unsigned NumValues = ValueVTs.size();
12650 if (NumValues == 0) return;
12651
12652 SmallVector<SDValue, 4> Values(NumValues);
12653 SDValue Op = getValue(I.getOperand(0));
12654
12655 for (unsigned i = 0; i != NumValues; ++i)
12656 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12657 SDValue(Op.getNode(), Op.getResNo() + i));
12658
12660 DAG.getVTList(ValueVTs), Values));
12661}
12662
12663void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12665 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12666
12667 SDLoc DL = getCurSDLoc();
12668 SDValue V1 = getValue(I.getOperand(0));
12669 SDValue V2 = getValue(I.getOperand(1));
12670 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12671
12672 // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12673 if (VT.isScalableVector()) {
12674 setValue(
12675 &I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12677 Imm, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
12678 return;
12679 }
12680
12681 unsigned NumElts = VT.getVectorNumElements();
12682
12683 uint64_t Idx = (NumElts + Imm) % NumElts;
12684
12685 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12687 for (unsigned i = 0; i < NumElts; ++i)
12688 Mask.push_back(Idx + i);
12689 setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12690}
12691
12692// Consider the following MIR after SelectionDAG, which produces output in
12693// phyregs in the first case or virtregs in the second case.
12694//
12695// INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12696// %5:gr32 = COPY $ebx
12697// %6:gr32 = COPY $edx
12698// %1:gr32 = COPY %6:gr32
12699// %0:gr32 = COPY %5:gr32
12700//
12701// INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12702// %1:gr32 = COPY %6:gr32
12703// %0:gr32 = COPY %5:gr32
12704//
12705// Given %0, we'd like to return $ebx in the first case and %5 in the second.
12706// Given %1, we'd like to return $edx in the first case and %6 in the second.
12707//
12708// If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12709// to a single virtreg (such as %0). The remaining outputs monotonically
12710// increase in virtreg number from there. If a callbr has no outputs, then it
12711// should not have a corresponding callbr landingpad; in fact, the callbr
12712// landingpad would not even be able to refer to such a callbr.
12714 MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12715 // There is definitely at least one copy.
12716 assert(MI->getOpcode() == TargetOpcode::COPY &&
12717 "start of copy chain MUST be COPY");
12718 Reg = MI->getOperand(1).getReg();
12719 MI = MRI.def_begin(Reg)->getParent();
12720 // There may be an optional second copy.
12721 if (MI->getOpcode() == TargetOpcode::COPY) {
12722 assert(Reg.isVirtual() && "expected COPY of virtual register");
12723 Reg = MI->getOperand(1).getReg();
12724 assert(Reg.isPhysical() && "expected COPY of physical register");
12725 MI = MRI.def_begin(Reg)->getParent();
12726 }
12727 // The start of the chain must be an INLINEASM_BR.
12728 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12729 "end of copy chain MUST be INLINEASM_BR");
12730 return Reg;
12731}
12732
12733// We must do this walk rather than the simpler
12734// setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12735// otherwise we will end up with copies of virtregs only valid along direct
12736// edges.
12737void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12738 SmallVector<EVT, 8> ResultVTs;
12739 SmallVector<SDValue, 8> ResultValues;
12740 const auto *CBR =
12741 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12742
12746
12747 unsigned InitialDef = FuncInfo.ValueMap[CBR];
12748 SDValue Chain = DAG.getRoot();
12749
12750 // Re-parse the asm constraints string.
12751 TargetLowering::AsmOperandInfoVector TargetConstraints =
12752 TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12753 for (auto &T : TargetConstraints) {
12754 SDISelAsmOperandInfo OpInfo(T);
12755 if (OpInfo.Type != InlineAsm::isOutput)
12756 continue;
12757
12758 // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12759 // individual constraint.
12760 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12761
12762 switch (OpInfo.ConstraintType) {
12765 // Fill in OpInfo.AssignedRegs.Regs.
12766 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12767
12768 // getRegistersForValue may produce 1 to many registers based on whether
12769 // the OpInfo.ConstraintVT is legal on the target or not.
12770 for (Register &Reg : OpInfo.AssignedRegs.Regs) {
12771 Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12772 if (Register::isPhysicalRegister(OriginalDef))
12773 FuncInfo.MBB->addLiveIn(OriginalDef);
12774 // Update the assigned registers to use the original defs.
12775 Reg = OriginalDef;
12776 }
12777
12778 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12779 DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12780 ResultValues.push_back(V);
12781 ResultVTs.push_back(OpInfo.ConstraintVT);
12782 break;
12783 }
12785 SDValue Flag;
12786 SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12787 OpInfo, DAG);
12788 ++InitialDef;
12789 ResultValues.push_back(V);
12790 ResultVTs.push_back(OpInfo.ConstraintVT);
12791 break;
12792 }
12793 default:
12794 break;
12795 }
12796 }
12798 DAG.getVTList(ResultVTs), ResultValues);
12799 setValue(&I, V);
12800}
unsigned const MachineRegisterInfo * MRI
@ Poison
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:943
#define Check(C,...)
Hexagon Common GEP
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
support::ulittle16_t & Lo
Definition: aarch32.cpp:204
support::ulittle16_t & Hi
Definition: aarch32.cpp:203
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
Definition: APInt.h:78
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1015
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
an instruction to allocate memory on the stack
Definition: Instructions.h:63
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
Definition: Function.cpp:349
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
iterator begin() const
Definition: ArrayRef.h:156
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
@ Add
*p = old + v
Definition: Instructions.h:720
@ FAdd
*p = old + v
Definition: Instructions.h:741
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:764
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:734
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:768
@ FSub
*p = old - v
Definition: Instructions.h:744
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:756
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:732
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:738
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:752
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:736
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:748
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:760
@ Nand
*p = ~(old & v)
Definition: Instructions.h:726
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:386
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Instruction & back() const
Definition: BasicBlock.h:473
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
Definition: BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
BitVector & set()
Definition: BitVector.h:351
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
The address of a basic block.
Definition: Constants.h:893
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2051
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1269
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:2027
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1275
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:1935
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1207
unsigned arg_size() const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
Definition: Constants.h:587
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2321
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:208
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
This class represents a range of values.
Definition: ConstantRange.h:47
uint64_t getZExtValue() const
Constant Vector Declarations.
Definition: Constants.h:511
This is an important base class in LLVM.
Definition: Constant.h:42
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Debug location.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:709
bool isBigEndian() const
Definition: DataLayout.h:198
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:421
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:847
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition: DebugLoc.h:33
DILocation * getInlinedAt() const
Definition: DebugLoc.cpp:39
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
unsigned size() const
Definition: DenseMap.h:99
bool empty() const
Definition: DenseMap.h:98
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:152
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:317
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
bool allowReassoc() const
Flag queries.
Definition: FMF.h:65
An instruction for ordering other memory operations.
Definition: Instructions.h:424
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:791
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
MachineBasicBlock * getMBB(const BasicBlock *BB) const
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
unsigned getCurrentCallSite()
Get the call site currently being processed, if any. Return zero if none.
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
Definition: DerivedTypes.h:105
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:137
Type * getReturnType() const
Definition: DerivedTypes.h:126
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:251
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:704
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:345
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1048
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:353
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:256
size_t arg_size() const
Definition: Function.h:901
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:234
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Garbage collection metadata for a single function.
Definition: GCMetadata.h:78
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
Definition: GCMetadata.h:118
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:567
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:471
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1750
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Definition: DerivedTypes.h:53
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:176
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Definition: MCContext.cpp:345
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
Definition: MCContext.cpp:235
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:1069
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
Definition: MapVector.h:163
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition: MapVector.h:118
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:686
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Definition: Operator.h:155
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
void dump() const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineFunction * MF
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
Definition: SelectionDAG.h:369
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:748
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
Definition: SelectionDAG.h:980
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:575
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
Definition: SelectionDAG.h:497
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:799
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
Definition: SelectionDAG.h:511
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void addMMRAMetadata(const SDNode *Node, MDNode *MMRA)
Set MMRAMetadata to be associated with Node.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getRegister(Register Reg, EVT VT)
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:501
static constexpr unsigned MaxRecursionDepth
Definition: SelectionDAG.h:456
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:854
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:825
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:495
ProfileSummaryInfo * getPSI() const
Definition: SelectionDAG.h:510
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:753
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
Definition: SelectionDAG.h:503
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
Definition: SelectionDAG.h:888
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, SDValue Op2)
Create the DAG equivalent of vector_partial_reduce where Op1 and Op2 are its operands and ReducedTY i...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:496
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
Definition: SelectionDAG.h:734
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:698
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
Definition: SelectionDAG.h:507
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:584
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:578
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
Definition: SelectionDAG.h:904
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void swap(SmallVectorImpl &RHS)
Definition: SmallVector.h:968
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
pointer data()
Return a pointer to the vector's buffer, even if empty().
Definition: SmallVector.h:286
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:596
Class to represent struct types.
Definition: DerivedTypes.h:218
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Multiway switch.
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.experimental.vector.partial.reduce.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
TargetOptions Options
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_Latency
The latency of instruction.
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
bool hasConditionalLoadStoreForType(Type *Ty=nullptr) const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:383
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:928
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
Type * getElementType() const
Definition: DerivedTypes.h:460
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: Lint.cpp:87
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
Definition: CallingConv.h:163
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:243
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:753
@ CONVERGENCECTRL_ANCHOR
Definition: ISDOpcodes.h:1470
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition: ISDOpcodes.h:491
@ ATOMIC_LOAD_FMAX
Definition: ISDOpcodes.h:1347
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:44
@ SET_FPENV
Sets the current floating-point environment.
Definition: ISDOpcodes.h:1069
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
Definition: ISDOpcodes.h:1417
@ VECREDUCE_SMIN
Definition: ISDOpcodes.h:1450
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
Definition: ISDOpcodes.h:153
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1340
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:574
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition: ISDOpcodes.h:374
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1342
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1312
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1343
@ RESET_FPENV
Set floating-point environment to default state.
Definition: ISDOpcodes.h:1073
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:380
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1092
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ ATOMIC_LOAD_USUB_COND
Definition: ISDOpcodes.h:1351
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:498
@ FATAN2
FATAN2 - atan2, inspired by libm.
Definition: ISDOpcodes.h:999
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:205
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
Definition: ISDOpcodes.h:157
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1325
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:841
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:558
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
Definition: ISDOpcodes.h:1435
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
Definition: ISDOpcodes.h:1439
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:717
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1304
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
Definition: ISDOpcodes.h:1096
@ VECREDUCE_SMAX
Definition: ISDOpcodes.h:1449
@ STRICT_FSETCCS
Definition: ISDOpcodes.h:492
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
Definition: ISDOpcodes.h:495
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
Definition: ISDOpcodes.h:1383
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1338
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1339
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1270
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
Definition: ISDOpcodes.h:997
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition: ISDOpcodes.h:387
@ ATOMIC_LOAD_FADD
Definition: ISDOpcodes.h:1345
@ FrameIndex
Definition: ISDOpcodes.h:80
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:1173
@ ATOMIC_LOAD_USUB_SAT
Definition: ISDOpcodes.h:1352
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:141
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
Definition: ISDOpcodes.h:1179
@ SET_ROUNDING
Set rounding mode.
Definition: ISDOpcodes.h:936
@ CONVERGENCECTRL_GLUE
Definition: ISDOpcodes.h:1476
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
Definition: ISDOpcodes.h:1231
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
Definition: ISDOpcodes.h:1259
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
Definition: ISDOpcodes.h:107
@ CONVERGENCECTRL_ENTRY
Definition: ISDOpcodes.h:1471
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:1118
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
Definition: ISDOpcodes.h:1432
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:752
@ WRITE_REGISTER
Definition: ISDOpcodes.h:125
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:1292
@ VECREDUCE_FMIN
Definition: ISDOpcodes.h:1436
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ ATOMIC_LOAD_FSUB
Definition: ISDOpcodes.h:1346
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:334
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1341
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
Definition: ISDOpcodes.h:1234
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1123
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
Definition: ISDOpcodes.h:601
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:515
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:522
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:356
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1308
@ VECREDUCE_UMAX
Definition: ISDOpcodes.h:1451
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:229
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:642
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ ATOMIC_LOAD_FMIN
Definition: ISDOpcodes.h:1348
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:215
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:330
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
Definition: ISDOpcodes.h:1296
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1444
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
Definition: ISDOpcodes.h:931
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
Definition: ISDOpcodes.h:1188
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1087
@ GET_FPENV
Gets the current floating-point environment.
Definition: ISDOpcodes.h:1064
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ PtrAuthGlobalAddress
A ptrauth constant.
Definition: ISDOpcodes.h:90
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1336
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:588
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:47
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
Definition: ISDOpcodes.h:124
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:550
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1282
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:907
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
Definition: ISDOpcodes.h:1407
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1344
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
Definition: ISDOpcodes.h:120
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1031
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
Definition: ISDOpcodes.h:1286
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition: ISDOpcodes.h:366
@ SMULO
Same for multiplication.
Definition: ISDOpcodes.h:338
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:697
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
Definition: ISDOpcodes.h:606
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:393
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:939
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:766
@ VECREDUCE_UMIN
Definition: ISDOpcodes.h:1452
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
Definition: ISDOpcodes.h:1245
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1168
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition: ISDOpcodes.h:135
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ ATOMIC_LOAD_UDEC_WRAP
Definition: ISDOpcodes.h:1350
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1334
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition: ISDOpcodes.h:480
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1050
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1335
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1253
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition: ISDOpcodes.h:705
@ VECREDUCE_FMUL
Definition: ISDOpcodes.h:1433
@ STRICT_FADD
Constrained versions of the binary floating point operators.
Definition: ISDOpcodes.h:407
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition: ISDOpcodes.h:223
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:539
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
Definition: ISDOpcodes.h:627
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1333
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
Definition: ISDOpcodes.h:1004
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:920
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition: ISDOpcodes.h:669
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
Definition: ISDOpcodes.h:112
@ CONVERGENCECTRL_LOOP
Definition: ISDOpcodes.h:1472
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1165
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:906
@ VECREDUCE_FMINIMUM
Definition: ISDOpcodes.h:1440
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
Definition: ISDOpcodes.h:147
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ VECREDUCE_SEQ_FMUL
Definition: ISDOpcodes.h:1418
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
Definition: ISDOpcodes.h:1184
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ ATOMIC_LOAD_UINC_WRAP
Definition: ISDOpcodes.h:1349
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ AssertZext
Definition: ISDOpcodes.h:62
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
Definition: ISDOpcodes.h:595
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
Definition: ISDOpcodes.h:1398
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
Definition: ISDOpcodes.h:1055
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1276
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:198
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:530
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
Definition: ISDOpcodes.h:1575
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1600
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
@ ReallyHidden
Definition: CommandLine.h:138
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:147
ExceptionBehavior
Exception behavior used for floating point operations.
Definition: FPEnv.h:38
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:41
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
Definition: FPEnv.h:40
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition: FPEnv.h:39
constexpr float log2ef
Definition: MathExtras.h:66
constexpr double e
Definition: MathExtras.h:47
constexpr float ln2f
Definition: MathExtras.h:64
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:353
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition: Analysis.cpp:233
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:255
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
@ Done
Definition: Threading.h:61
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition: bit.h:307
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:555
gep_type_iterator gep_type_end(const User *GEP)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
Definition: STLExtras.h:877
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
Definition: STLExtras.h:322
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition: Analysis.cpp:199
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2614
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2298
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:535
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
Definition: Analysis.cpp:221
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2099
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:177
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2087
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition: bit.h:327
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
#define NC
Definition: regutils.h:42
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:265
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Extended Value Type.
Definition: ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:390
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:74
uint64_t getScalarStoreSize() const
Definition: ValueTypes.h:397
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:279
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:295
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:147
ElementCount getVectorElementCount() const
Definition: ValueTypes.h:345
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
Definition: ValueTypes.h:354
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:380
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:289
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
Definition: ValueTypes.h:179
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:376
bool isFixedLengthVector() const
Definition: ValueTypes.h:181
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:287
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition: ValueTypes.h:174
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition: ValueTypes.h:102
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition: ValueTypes.h:448
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
void setPointerAddrSpace(unsigned AS)
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
Definition: InlineAsm.h:126
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
RegsForValue()=default
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)