LLVM 23.0.0git
LegalizeVectorOps.cpp
Go to the documentation of this file.
1//===- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the SelectionDAG::LegalizeVectors method.
10//
11// The vector legalizer looks for vector operations which might need to be
12// scalarized and legalizes them. This is a separate step from Legalize because
13// scalarizing can introduce illegal types. For example, suppose we have an
14// ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition
15// on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
16// operation, which introduces nodes with the illegal type i64 which must be
17// expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC;
18// the operation must be unrolled, which introduces nodes with the illegal
19// type i8 which must be promoted.
20//
21// This does not legalize vector manipulations like ISD::BUILD_VECTOR,
22// or operations that happen to take a vector which are custom-lowered;
23// the legalization for such operations never produces nodes
24// with illegal types, so it's okay to put off legalizing them until
25// SelectionDAG::Legalize runs.
26//
27//===----------------------------------------------------------------------===//
28
29#include "llvm/ADT/DenseMap.h"
39#include "llvm/IR/DataLayout.h"
42#include "llvm/Support/Debug.h"
44#include <cassert>
45#include <cstdint>
46#include <iterator>
47#include <utility>
48
49using namespace llvm;
50
51#define DEBUG_TYPE "legalizevectorops"
52
53namespace {
54
55class VectorLegalizer {
56 SelectionDAG& DAG;
57 const TargetLowering &TLI;
58 bool Changed = false; // Keep track of whether anything changed
59
60 /// For nodes that are of legal width, and that have more than one use, this
61 /// map indicates what regularized operand to use. This allows us to avoid
62 /// legalizing the same thing more than once.
64
65 /// Adds a node to the translation cache.
66 void AddLegalizedOperand(SDValue From, SDValue To) {
67 LegalizedNodes.insert(std::make_pair(From, To));
68 // If someone requests legalization of the new node, return itself.
69 if (From != To)
70 LegalizedNodes.insert(std::make_pair(To, To));
71 }
72
73 /// Legalizes the given node.
74 SDValue LegalizeOp(SDValue Op);
75
76 /// Assuming the node is legal, "legalize" the results.
77 SDValue TranslateLegalizeResults(SDValue Op, SDNode *Result);
78
79 /// Make sure Results are legal and update the translation cache.
80 SDValue RecursivelyLegalizeResults(SDValue Op,
82
83 /// Wrapper to interface LowerOperation with a vector of Results.
84 /// Returns false if the target wants to use default expansion. Otherwise
85 /// returns true. If return is true and the Results are empty, then the
86 /// target wants to keep the input node as is.
87 bool LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results);
88
89 /// Implements unrolling a VSETCC.
90 SDValue UnrollVSETCC(SDNode *Node);
91
92 /// Implement expand-based legalization of vector operations.
93 ///
94 /// This is just a high-level routine to dispatch to specific code paths for
95 /// operations to legalize them.
97
98 /// Implements expansion for FP_TO_UINT; falls back to UnrollVectorOp if
99 /// FP_TO_SINT isn't legal.
100 void ExpandFP_TO_UINT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
101
102 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
103 /// SINT_TO_FLOAT and SHR on vectors isn't legal.
104 void ExpandUINT_TO_FLOAT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
105
106 /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
107 SDValue ExpandSEXTINREG(SDNode *Node);
108
109 /// Implement expansion for ANY_EXTEND_VECTOR_INREG.
110 ///
111 /// Shuffles the low lanes of the operand into place and bitcasts to the proper
112 /// type. The contents of the bits in the extended part of each element are
113 /// undef.
114 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node);
115
116 /// Implement expansion for SIGN_EXTEND_VECTOR_INREG.
117 ///
118 /// Shuffles the low lanes of the operand into place, bitcasts to the proper
119 /// type, then shifts left and arithmetic shifts right to introduce a sign
120 /// extension.
121 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node);
122
123 /// Implement expansion for ZERO_EXTEND_VECTOR_INREG.
124 ///
125 /// Shuffles the low lanes of the operand into place and blends zeros into
126 /// the remaining lanes, finally bitcasting to the proper type.
127 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node);
128
129 /// Expand bswap of vectors into a shuffle if legal.
130 SDValue ExpandBSWAP(SDNode *Node);
131
132 /// Implement vselect in terms of XOR, AND, OR when blend is not
133 /// supported by the target.
134 SDValue ExpandVSELECT(SDNode *Node);
135 SDValue ExpandVP_SELECT(SDNode *Node);
136 SDValue ExpandVP_MERGE(SDNode *Node);
137 SDValue ExpandVP_REM(SDNode *Node);
138 SDValue ExpandVP_FNEG(SDNode *Node);
139 SDValue ExpandVP_FABS(SDNode *Node);
140 SDValue ExpandVP_FCOPYSIGN(SDNode *Node);
141 SDValue ExpandLOOP_DEPENDENCE_MASK(SDNode *N);
142 SDValue ExpandMaskedBinOp(SDNode *N);
143 SDValue ExpandSELECT(SDNode *Node);
144 std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
145 SDValue ExpandStore(SDNode *N);
146 SDValue ExpandFNEG(SDNode *Node);
147 SDValue ExpandFABS(SDNode *Node);
148 SDValue ExpandFCOPYSIGN(SDNode *Node);
149 void ExpandFSUB(SDNode *Node, SmallVectorImpl<SDValue> &Results);
150 void ExpandSETCC(SDNode *Node, SmallVectorImpl<SDValue> &Results);
151 SDValue ExpandBITREVERSE(SDNode *Node);
152 void ExpandUADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
153 void ExpandSADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
154 void ExpandMULO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
155 void ExpandFixedPointDiv(SDNode *Node, SmallVectorImpl<SDValue> &Results);
156 void ExpandStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
157 void ExpandREM(SDNode *Node, SmallVectorImpl<SDValue> &Results);
158
159 bool tryExpandVecMathCall(SDNode *Node, RTLIB::Libcall LC,
161
162 void UnrollStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
163
164 /// Implements vector promotion.
165 ///
166 /// This is essentially just bitcasting the operands to a different type and
167 /// bitcasting the result back to the original type.
169
170 /// Implements [SU]INT_TO_FP vector promotion.
171 ///
172 /// This is a [zs]ext of the input operand to a larger integer type.
173 void PromoteINT_TO_FP(SDNode *Node, SmallVectorImpl<SDValue> &Results);
174
175 /// Implements FP_TO_[SU]INT vector promotion of the result type.
176 ///
177 /// It is promoted to a larger integer type. The result is then
178 /// truncated back to the original type.
179 void PromoteFP_TO_INT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
180
181 /// Implements vector setcc operation promotion.
182 ///
183 /// All vector operands are promoted to a vector type with larger element
184 /// type.
185 void PromoteSETCC(SDNode *Node, SmallVectorImpl<SDValue> &Results);
186
187 void PromoteSTRICT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
188
189 /// Calculate the reduction using a type of higher precision and round the
190 /// result to match the original type. Setting NonArithmetic signifies the
191 /// rounding of the result does not affect its value.
192 void PromoteFloatVECREDUCE(SDNode *Node, SmallVectorImpl<SDValue> &Results,
193 bool NonArithmetic);
194
195 void PromoteVECTOR_COMPRESS(SDNode *Node, SmallVectorImpl<SDValue> &Results);
196
197public:
198 VectorLegalizer(SelectionDAG& dag) :
199 DAG(dag), TLI(dag.getTargetLoweringInfo()) {}
200
201 /// Begin legalizer the vector operations in the DAG.
202 bool Run();
203};
204
205} // end anonymous namespace
206
207bool VectorLegalizer::Run() {
208 // Before we start legalizing vector nodes, check if there are any vectors.
209 bool HasVectors = false;
211 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
212 // Check if the values of the nodes contain vectors. We don't need to check
213 // the operands because we are going to check their values at some point.
214 HasVectors = llvm::any_of(I->values(), [](EVT T) { return T.isVector(); });
215
216 // If we found a vector node we can start the legalization.
217 if (HasVectors)
218 break;
219 }
220
221 // If this basic block has no vectors then no need to legalize vectors.
222 if (!HasVectors)
223 return false;
224
225 // The legalize process is inherently a bottom-up recursive process (users
226 // legalize their uses before themselves). Given infinite stack space, we
227 // could just start legalizing on the root and traverse the whole graph. In
228 // practice however, this causes us to run out of stack space on large basic
229 // blocks. To avoid this problem, compute an ordering of the nodes where each
230 // node is only legalized after all of its operands are legalized.
233 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
234 LegalizeOp(SDValue(&*I, 0));
235
236 // Finally, it's possible the root changed. Get the new root.
237 SDValue OldRoot = DAG.getRoot();
238 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
239 DAG.setRoot(LegalizedNodes[OldRoot]);
240
241 LegalizedNodes.clear();
242
243 // Remove dead nodes now.
244 DAG.RemoveDeadNodes();
245
246 return Changed;
247}
248
249SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDNode *Result) {
250 assert(Op->getNumValues() == Result->getNumValues() &&
251 "Unexpected number of results");
252 // Generic legalization: just pass the operand through.
253 for (unsigned i = 0, e = Op->getNumValues(); i != e; ++i)
254 AddLegalizedOperand(Op.getValue(i), SDValue(Result, i));
255 return SDValue(Result, Op.getResNo());
256}
257
259VectorLegalizer::RecursivelyLegalizeResults(SDValue Op,
261 assert(Results.size() == Op->getNumValues() &&
262 "Unexpected number of results");
263 // Make sure that the generated code is itself legal.
264 for (unsigned i = 0, e = Results.size(); i != e; ++i) {
265 Results[i] = LegalizeOp(Results[i]);
266 AddLegalizedOperand(Op.getValue(i), Results[i]);
267 }
268
269 return Results[Op.getResNo()];
270}
271
272SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
273 // Note that LegalizeOp may be reentered even from single-use nodes, which
274 // means that we always must cache transformed nodes.
275 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
276 if (I != LegalizedNodes.end()) return I->second;
277
278 // Legalize the operands
280 for (const SDValue &Oper : Op->op_values())
281 Ops.push_back(LegalizeOp(Oper));
282
283 SDNode *Node = DAG.UpdateNodeOperands(Op.getNode(), Ops);
284
285 bool HasVectorValueOrOp =
286 llvm::any_of(Node->values(), [](EVT T) { return T.isVector(); }) ||
287 llvm::any_of(Node->op_values(),
288 [](SDValue O) { return O.getValueType().isVector(); });
289 if (!HasVectorValueOrOp)
290 return TranslateLegalizeResults(Op, Node);
291
292 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
293 EVT ValVT;
294 switch (Op.getOpcode()) {
295 default:
296 return TranslateLegalizeResults(Op, Node);
297 case ISD::LOAD: {
298 LoadSDNode *LD = cast<LoadSDNode>(Node);
299 ISD::LoadExtType ExtType = LD->getExtensionType();
300 EVT LoadedVT = LD->getMemoryVT();
301 if (LoadedVT.isVector() && ExtType != ISD::NON_EXTLOAD)
302 Action = TLI.getLoadAction(LD->getValueType(0), LoadedVT, LD->getAlign(),
303 LD->getAddressSpace(), ExtType, false);
304 break;
305 }
306 case ISD::STORE: {
307 StoreSDNode *ST = cast<StoreSDNode>(Node);
308 EVT StVT = ST->getMemoryVT();
309 MVT ValVT = ST->getValue().getSimpleValueType();
310 if (StVT.isVector() && ST->isTruncatingStore())
311 Action = TLI.getTruncStoreAction(ValVT, StVT, ST->getAlign(),
312 ST->getAddressSpace());
313 break;
314 }
316 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
317 // This operation lies about being legal: when it claims to be legal,
318 // it should actually be expanded.
319 if (Action == TargetLowering::Legal)
320 Action = TargetLowering::Expand;
321 break;
322#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
323 case ISD::STRICT_##DAGN:
324#include "llvm/IR/ConstrainedOps.def"
325 ValVT = Node->getValueType(0);
326 if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
327 Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
328 ValVT = Node->getOperand(1).getValueType();
329 if (Op.getOpcode() == ISD::STRICT_FSETCC ||
330 Op.getOpcode() == ISD::STRICT_FSETCCS) {
331 MVT OpVT = Node->getOperand(1).getSimpleValueType();
332 ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(3))->get();
333 Action = TLI.getCondCodeAction(CCCode, OpVT);
334 if (Action == TargetLowering::Legal)
335 Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
336 } else {
337 Action = TLI.getOperationAction(Node->getOpcode(), ValVT);
338 }
339 // If we're asked to expand a strict vector floating-point operation,
340 // by default we're going to simply unroll it. That is usually the
341 // best approach, except in the case where the resulting strict (scalar)
342 // operations would themselves use the fallback mutation to non-strict.
343 // In that specific case, just do the fallback on the vector op.
344 if (Action == TargetLowering::Expand && !TLI.isStrictFPEnabled() &&
345 TLI.getStrictFPOperationAction(Node->getOpcode(), ValVT) ==
346 TargetLowering::Legal) {
347 EVT EltVT = ValVT.getVectorElementType();
348 if (TLI.getOperationAction(Node->getOpcode(), EltVT)
349 == TargetLowering::Expand &&
350 TLI.getStrictFPOperationAction(Node->getOpcode(), EltVT)
351 == TargetLowering::Legal)
352 Action = TargetLowering::Legal;
353 }
354 break;
355 case ISD::ADD:
356 case ISD::SUB:
357 case ISD::MUL:
358 case ISD::MULHS:
359 case ISD::MULHU:
360 case ISD::SDIV:
361 case ISD::UDIV:
362 case ISD::SREM:
363 case ISD::UREM:
364 case ISD::SDIVREM:
365 case ISD::UDIVREM:
366 case ISD::FADD:
367 case ISD::FSUB:
368 case ISD::FMUL:
369 case ISD::FDIV:
370 case ISD::FREM:
371 case ISD::AND:
372 case ISD::OR:
373 case ISD::XOR:
374 case ISD::SHL:
375 case ISD::SRA:
376 case ISD::SRL:
377 case ISD::FSHL:
378 case ISD::FSHR:
379 case ISD::ROTL:
380 case ISD::ROTR:
381 case ISD::ABS:
382 case ISD::ABDS:
383 case ISD::ABDU:
384 case ISD::AVGCEILS:
385 case ISD::AVGCEILU:
386 case ISD::AVGFLOORS:
387 case ISD::AVGFLOORU:
388 case ISD::BSWAP:
389 case ISD::BITREVERSE:
390 case ISD::CTLZ:
391 case ISD::CTTZ:
394 case ISD::CTPOP:
395 case ISD::CLMUL:
396 case ISD::CLMULH:
397 case ISD::CLMULR:
398 case ISD::SELECT:
399 case ISD::VSELECT:
400 case ISD::SELECT_CC:
401 case ISD::ZERO_EXTEND:
402 case ISD::ANY_EXTEND:
403 case ISD::TRUNCATE:
404 case ISD::SIGN_EXTEND:
405 case ISD::FP_TO_SINT:
406 case ISD::FP_TO_UINT:
407 case ISD::FNEG:
408 case ISD::FABS:
409 case ISD::FMINNUM:
410 case ISD::FMAXNUM:
413 case ISD::FMINIMUM:
414 case ISD::FMAXIMUM:
415 case ISD::FMINIMUMNUM:
416 case ISD::FMAXIMUMNUM:
417 case ISD::FCOPYSIGN:
418 case ISD::FSQRT:
419 case ISD::FSIN:
420 case ISD::FCOS:
421 case ISD::FTAN:
422 case ISD::FASIN:
423 case ISD::FACOS:
424 case ISD::FATAN:
425 case ISD::FATAN2:
426 case ISD::FSINH:
427 case ISD::FCOSH:
428 case ISD::FTANH:
429 case ISD::FLDEXP:
430 case ISD::FPOWI:
431 case ISD::FPOW:
432 case ISD::FCBRT:
433 case ISD::FLOG:
434 case ISD::FLOG2:
435 case ISD::FLOG10:
436 case ISD::FEXP:
437 case ISD::FEXP2:
438 case ISD::FEXP10:
439 case ISD::FCEIL:
440 case ISD::FTRUNC:
441 case ISD::FRINT:
442 case ISD::FNEARBYINT:
443 case ISD::FROUND:
444 case ISD::FROUNDEVEN:
445 case ISD::FFLOOR:
446 case ISD::FP_ROUND:
447 case ISD::FP_EXTEND:
449 case ISD::FMA:
454 case ISD::SMIN:
455 case ISD::SMAX:
456 case ISD::UMIN:
457 case ISD::UMAX:
458 case ISD::SMUL_LOHI:
459 case ISD::UMUL_LOHI:
460 case ISD::SADDO:
461 case ISD::UADDO:
462 case ISD::SSUBO:
463 case ISD::USUBO:
464 case ISD::SMULO:
465 case ISD::UMULO:
468 case ISD::FFREXP:
469 case ISD::FMODF:
470 case ISD::FSINCOS:
471 case ISD::FSINCOSPI:
472 case ISD::SADDSAT:
473 case ISD::UADDSAT:
474 case ISD::SSUBSAT:
475 case ISD::USUBSAT:
476 case ISD::SSHLSAT:
477 case ISD::USHLSAT:
480 case ISD::MGATHER:
482 case ISD::SCMP:
483 case ISD::UCMP:
486 case ISD::MASKED_UDIV:
487 case ISD::MASKED_SDIV:
488 case ISD::MASKED_UREM:
489 case ISD::MASKED_SREM:
490 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
491 break;
492 case ISD::SMULFIX:
493 case ISD::SMULFIXSAT:
494 case ISD::UMULFIX:
495 case ISD::UMULFIXSAT:
496 case ISD::SDIVFIX:
497 case ISD::SDIVFIXSAT:
498 case ISD::UDIVFIX:
499 case ISD::UDIVFIXSAT: {
500 unsigned Scale = Node->getConstantOperandVal(2);
501 Action = TLI.getFixedPointOperationAction(Node->getOpcode(),
502 Node->getValueType(0), Scale);
503 break;
504 }
505 case ISD::LROUND:
506 case ISD::LLROUND:
507 case ISD::LRINT:
508 case ISD::LLRINT:
509 case ISD::SINT_TO_FP:
510 case ISD::UINT_TO_FP:
526 case ISD::CTTZ_ELTS:
529 Action = TLI.getOperationAction(Node->getOpcode(),
530 Node->getOperand(0).getValueType());
531 break;
534 Action = TLI.getOperationAction(Node->getOpcode(),
535 Node->getOperand(1).getValueType());
536 break;
537 case ISD::SETCC: {
538 MVT OpVT = Node->getOperand(0).getSimpleValueType();
539 ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
540 Action = TLI.getCondCodeAction(CCCode, OpVT);
541 if (Action == TargetLowering::Legal)
542 Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
543 break;
544 }
549 Action =
550 TLI.getPartialReduceMLAAction(Op.getOpcode(), Node->getValueType(0),
551 Node->getOperand(1).getValueType());
552 break;
553
554#define BEGIN_REGISTER_VP_SDNODE(VPID, LEGALPOS, ...) \
555 case ISD::VPID: { \
556 EVT LegalizeVT = LEGALPOS < 0 ? Node->getValueType(-(1 + LEGALPOS)) \
557 : Node->getOperand(LEGALPOS).getValueType(); \
558 if (ISD::VPID == ISD::VP_SETCC) { \
559 ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get(); \
560 Action = TLI.getCondCodeAction(CCCode, LegalizeVT.getSimpleVT()); \
561 if (Action != TargetLowering::Legal) \
562 break; \
563 } \
564 /* Defer non-vector results to LegalizeDAG. */ \
565 if (!Node->getValueType(0).isVector() && \
566 Node->getValueType(0) != MVT::Other) { \
567 Action = TargetLowering::Legal; \
568 break; \
569 } \
570 Action = TLI.getOperationAction(Node->getOpcode(), LegalizeVT); \
571 } break;
572#include "llvm/IR/VPIntrinsics.def"
573 }
574
575 LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
576
577 SmallVector<SDValue, 8> ResultVals;
578 switch (Action) {
579 default: llvm_unreachable("This action is not supported yet!");
580 case TargetLowering::Promote:
581 assert((Op.getOpcode() != ISD::LOAD && Op.getOpcode() != ISD::STORE) &&
582 "This action is not supported yet!");
583 LLVM_DEBUG(dbgs() << "Promoting\n");
584 Promote(Node, ResultVals);
585 assert(!ResultVals.empty() && "No results for promotion?");
586 break;
587 case TargetLowering::Legal:
588 LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
589 break;
590 case TargetLowering::Custom:
591 LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
592 if (LowerOperationWrapper(Node, ResultVals))
593 break;
594 LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
595 [[fallthrough]];
596 case TargetLowering::Expand:
597 LLVM_DEBUG(dbgs() << "Expanding\n");
598 Expand(Node, ResultVals);
599 break;
600 }
601
602 if (ResultVals.empty())
603 return TranslateLegalizeResults(Op, Node);
604
605 Changed = true;
606 return RecursivelyLegalizeResults(Op, ResultVals);
607}
608
609// FIXME: This is very similar to TargetLowering::LowerOperationWrapper. Can we
610// merge them somehow?
611bool VectorLegalizer::LowerOperationWrapper(SDNode *Node,
612 SmallVectorImpl<SDValue> &Results) {
613 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
614
615 if (!Res.getNode())
616 return false;
617
618 if (Res == SDValue(Node, 0))
619 return true;
620
621 // If the original node has one result, take the return value from
622 // LowerOperation as is. It might not be result number 0.
623 if (Node->getNumValues() == 1) {
624 Results.push_back(Res);
625 return true;
626 }
627
628 // If the original node has multiple results, then the return node should
629 // have the same number of results.
630 assert((Node->getNumValues() == Res->getNumValues()) &&
631 "Lowering returned the wrong number of results!");
632
633 // Places new result values base on N result number.
634 for (unsigned I = 0, E = Node->getNumValues(); I != E; ++I)
635 Results.push_back(Res.getValue(I));
636
637 return true;
638}
639
640void VectorLegalizer::PromoteSETCC(SDNode *Node,
641 SmallVectorImpl<SDValue> &Results) {
642 MVT VecVT = Node->getOperand(0).getSimpleValueType();
643 MVT NewVecVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VecVT);
644
645 unsigned ExtOp = VecVT.isFloatingPoint() ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
646
647 SDLoc DL(Node);
648 SmallVector<SDValue, 5> Operands(Node->getNumOperands());
649
650 Operands[0] = DAG.getNode(ExtOp, DL, NewVecVT, Node->getOperand(0));
651 Operands[1] = DAG.getNode(ExtOp, DL, NewVecVT, Node->getOperand(1));
652 Operands[2] = Node->getOperand(2);
653
654 if (Node->getOpcode() == ISD::VP_SETCC) {
655 Operands[3] = Node->getOperand(3); // mask
656 Operands[4] = Node->getOperand(4); // evl
657 }
658
659 SDValue Res = DAG.getNode(Node->getOpcode(), DL, Node->getSimpleValueType(0),
660 Operands, Node->getFlags());
661
662 Results.push_back(Res);
663}
664
665void VectorLegalizer::PromoteSTRICT(SDNode *Node,
666 SmallVectorImpl<SDValue> &Results) {
667 MVT VecVT = Node->getOperand(1).getSimpleValueType();
668 MVT NewVecVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VecVT);
669
670 assert(VecVT.isFloatingPoint());
671
672 SDLoc DL(Node);
673 SmallVector<SDValue, 5> Operands(Node->getNumOperands());
675
676 for (unsigned j = 1; j != Node->getNumOperands(); ++j)
677 if (Node->getOperand(j).getValueType().isVector() &&
678 !(ISD::isVPOpcode(Node->getOpcode()) &&
679 ISD::getVPMaskIdx(Node->getOpcode()) == j)) // Skip mask operand.
680 {
681 // promote the vector operand.
682 SDValue Ext =
683 DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {NewVecVT, MVT::Other},
684 {Node->getOperand(0), Node->getOperand(j)});
685 Operands[j] = Ext.getValue(0);
686 Chains.push_back(Ext.getValue(1));
687 } else
688 Operands[j] = Node->getOperand(j); // Skip no vector operand.
689
690 SDVTList VTs = DAG.getVTList(NewVecVT, Node->getValueType(1));
691
692 Operands[0] = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
693
694 SDValue Res =
695 DAG.getNode(Node->getOpcode(), DL, VTs, Operands, Node->getFlags());
696
697 SDValue Round =
698 DAG.getNode(ISD::STRICT_FP_ROUND, DL, {VecVT, MVT::Other},
699 {Res.getValue(1), Res.getValue(0),
700 DAG.getIntPtrConstant(0, DL, /*isTarget=*/true)});
701
702 Results.push_back(Round.getValue(0));
703 Results.push_back(Round.getValue(1));
704}
705
706void VectorLegalizer::PromoteFloatVECREDUCE(SDNode *Node,
707 SmallVectorImpl<SDValue> &Results,
708 bool NonArithmetic) {
709 MVT OpVT = Node->getOperand(0).getSimpleValueType();
710 assert(OpVT.isFloatingPoint() && "Expected floating point reduction!");
711 MVT NewOpVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OpVT);
712
713 SDLoc DL(Node);
714 SDValue NewOp = DAG.getNode(ISD::FP_EXTEND, DL, NewOpVT, Node->getOperand(0));
715 SDValue Rdx =
716 DAG.getNode(Node->getOpcode(), DL, NewOpVT.getVectorElementType(), NewOp,
717 Node->getFlags());
718 SDValue Res =
719 DAG.getNode(ISD::FP_ROUND, DL, Node->getValueType(0), Rdx,
720 DAG.getIntPtrConstant(NonArithmetic, DL, /*isTarget=*/true));
721 Results.push_back(Res);
722}
723
724void VectorLegalizer::PromoteVECTOR_COMPRESS(
725 SDNode *Node, SmallVectorImpl<SDValue> &Results) {
726 SDLoc DL(Node);
727 EVT VT = Node->getValueType(0);
728 MVT PromotedVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT.getSimpleVT());
729 assert((VT.isInteger() || VT.getSizeInBits() == PromotedVT.getSizeInBits()) &&
730 "Only integer promotion or bitcasts between types is supported");
731
732 SDValue Vec = Node->getOperand(0);
733 SDValue Mask = Node->getOperand(1);
734 SDValue Passthru = Node->getOperand(2);
735 if (VT.isInteger()) {
736 Vec = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, Vec);
737 Mask = TLI.promoteTargetBoolean(DAG, Mask, PromotedVT);
738 Passthru = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, Passthru);
739 } else {
740 Vec = DAG.getBitcast(PromotedVT, Vec);
741 Passthru = DAG.getBitcast(PromotedVT, Passthru);
742 }
743
745 DAG.getNode(ISD::VECTOR_COMPRESS, DL, PromotedVT, Vec, Mask, Passthru);
746 Result = VT.isInteger() ? DAG.getNode(ISD::TRUNCATE, DL, VT, Result)
747 : DAG.getBitcast(VT, Result);
748 Results.push_back(Result);
749}
750
751void VectorLegalizer::Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
752 // For a few operations there is a specific concept for promotion based on
753 // the operand's type.
754 switch (Node->getOpcode()) {
755 case ISD::SINT_TO_FP:
756 case ISD::UINT_TO_FP:
759 // "Promote" the operation by extending the operand.
760 PromoteINT_TO_FP(Node, Results);
761 return;
762 case ISD::FP_TO_UINT:
763 case ISD::FP_TO_SINT:
766 // Promote the operation by extending the operand.
767 PromoteFP_TO_INT(Node, Results);
768 return;
769 case ISD::VP_SETCC:
770 case ISD::SETCC:
771 // Promote the operation by extending the operand.
772 PromoteSETCC(Node, Results);
773 return;
774 case ISD::STRICT_FADD:
775 case ISD::STRICT_FSUB:
776 case ISD::STRICT_FMUL:
777 case ISD::STRICT_FDIV:
779 case ISD::STRICT_FMA:
780 PromoteSTRICT(Node, Results);
781 return;
784 PromoteFloatVECREDUCE(Node, Results, /*NonArithmetic=*/false);
785 return;
790 PromoteFloatVECREDUCE(Node, Results, /*NonArithmetic=*/true);
791 return;
793 PromoteVECTOR_COMPRESS(Node, Results);
794 return;
795
796 case ISD::FP_ROUND:
797 case ISD::FP_EXTEND:
798 // These operations are used to do promotion so they can't be promoted
799 // themselves.
800 llvm_unreachable("Don't know how to promote this operation!");
801 case ISD::VP_FABS:
802 case ISD::VP_FCOPYSIGN:
803 case ISD::VP_FNEG:
804 // Promoting fabs, fneg, and fcopysign changes their semantics.
805 llvm_unreachable("These operations should not be promoted");
806 }
807
808 // There are currently two cases of vector promotion:
809 // 1) Bitcasting a vector of integers to a different type to a vector of the
810 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
811 // 2) Extending a vector of floats to a vector of the same number of larger
812 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
813 assert(Node->getNumValues() == 1 &&
814 "Can't promote a vector with multiple results!");
815 MVT VT = Node->getSimpleValueType(0);
816 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
817 SDLoc dl(Node);
818 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
819
820 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
821 // Do not promote the mask operand of a VP OP.
822 bool SkipPromote = ISD::isVPOpcode(Node->getOpcode()) &&
823 ISD::getVPMaskIdx(Node->getOpcode()) == j;
824 if (Node->getOperand(j).getValueType().isVector() && !SkipPromote)
825 if (Node->getOperand(j)
826 .getValueType()
827 .getVectorElementType()
828 .isFloatingPoint() &&
830 if (ISD::isVPOpcode(Node->getOpcode())) {
831 unsigned EVLIdx =
833 unsigned MaskIdx = *ISD::getVPMaskIdx(Node->getOpcode());
834 Operands[j] =
835 DAG.getNode(ISD::VP_FP_EXTEND, dl, NVT, Node->getOperand(j),
836 Node->getOperand(MaskIdx), Node->getOperand(EVLIdx));
837 } else {
838 Operands[j] =
839 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(j));
840 }
841 else
842 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(j));
843 else
844 Operands[j] = Node->getOperand(j);
845 }
846
847 SDValue Res =
848 DAG.getNode(Node->getOpcode(), dl, NVT, Operands, Node->getFlags());
849
850 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
853 if (ISD::isVPOpcode(Node->getOpcode())) {
854 unsigned EVLIdx = *ISD::getVPExplicitVectorLengthIdx(Node->getOpcode());
855 unsigned MaskIdx = *ISD::getVPMaskIdx(Node->getOpcode());
856 Res = DAG.getNode(ISD::VP_FP_ROUND, dl, VT, Res,
857 Node->getOperand(MaskIdx), Node->getOperand(EVLIdx));
858 } else {
859 Res = DAG.getNode(ISD::FP_ROUND, dl, VT, Res,
860 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
861 }
862 else
863 Res = DAG.getNode(ISD::BITCAST, dl, VT, Res);
864
865 Results.push_back(Res);
866}
867
868void VectorLegalizer::PromoteINT_TO_FP(SDNode *Node,
869 SmallVectorImpl<SDValue> &Results) {
870 // INT_TO_FP operations may require the input operand be promoted even
871 // when the type is otherwise legal.
872 bool IsStrict = Node->isStrictFPOpcode();
873 MVT VT = Node->getOperand(IsStrict ? 1 : 0).getSimpleValueType();
874 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
876 "Vectors have different number of elements!");
877
878 SDLoc dl(Node);
879 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
880
881 unsigned Opc = (Node->getOpcode() == ISD::UINT_TO_FP ||
882 Node->getOpcode() == ISD::STRICT_UINT_TO_FP)
885 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
886 if (Node->getOperand(j).getValueType().isVector())
887 Operands[j] = DAG.getNode(Opc, dl, NVT, Node->getOperand(j));
888 else
889 Operands[j] = Node->getOperand(j);
890 }
891
892 if (IsStrict) {
893 SDValue Res = DAG.getNode(Node->getOpcode(), dl,
894 {Node->getValueType(0), MVT::Other}, Operands);
895 Results.push_back(Res);
896 Results.push_back(Res.getValue(1));
897 return;
898 }
899
900 SDValue Res =
901 DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Operands);
902 Results.push_back(Res);
903}
904
905// For FP_TO_INT we promote the result type to a vector type with wider
906// elements and then truncate the result. This is different from the default
907// PromoteVector which uses bitcast to promote thus assumning that the
908// promoted vector type has the same overall size.
909void VectorLegalizer::PromoteFP_TO_INT(SDNode *Node,
910 SmallVectorImpl<SDValue> &Results) {
911 MVT VT = Node->getSimpleValueType(0);
912 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
913 bool IsStrict = Node->isStrictFPOpcode();
915 "Vectors have different number of elements!");
916
917 unsigned NewOpc = Node->getOpcode();
918 // Change FP_TO_UINT to FP_TO_SINT if possible.
919 // TODO: Should we only do this if FP_TO_UINT itself isn't legal?
920 if (NewOpc == ISD::FP_TO_UINT &&
922 NewOpc = ISD::FP_TO_SINT;
923
924 if (NewOpc == ISD::STRICT_FP_TO_UINT &&
926 NewOpc = ISD::STRICT_FP_TO_SINT;
927
928 SDLoc dl(Node);
929 SDValue Promoted, Chain;
930 if (IsStrict) {
931 Promoted = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
932 {Node->getOperand(0), Node->getOperand(1)});
933 Chain = Promoted.getValue(1);
934 } else
935 Promoted = DAG.getNode(NewOpc, dl, NVT, Node->getOperand(0));
936
937 // Assert that the converted value fits in the original type. If it doesn't
938 // (eg: because the value being converted is too big), then the result of the
939 // original operation was undefined anyway, so the assert is still correct.
940 if (Node->getOpcode() == ISD::FP_TO_UINT ||
941 Node->getOpcode() == ISD::STRICT_FP_TO_UINT)
942 NewOpc = ISD::AssertZext;
943 else
944 NewOpc = ISD::AssertSext;
945
946 Promoted = DAG.getNode(NewOpc, dl, NVT, Promoted,
947 DAG.getValueType(VT.getScalarType()));
948 Promoted = DAG.getNode(ISD::TRUNCATE, dl, VT, Promoted);
949 Results.push_back(Promoted);
950 if (IsStrict)
951 Results.push_back(Chain);
952}
953
954std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDNode *N) {
955 LoadSDNode *LD = cast<LoadSDNode>(N);
956 return TLI.scalarizeVectorLoad(LD, DAG);
957}
958
959SDValue VectorLegalizer::ExpandStore(SDNode *N) {
960 StoreSDNode *ST = cast<StoreSDNode>(N);
961 SDValue TF = TLI.scalarizeVectorStore(ST, DAG);
962 return TF;
963}
964
965void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
966 switch (Node->getOpcode()) {
967 case ISD::LOAD: {
968 std::pair<SDValue, SDValue> Tmp = ExpandLoad(Node);
969 Results.push_back(Tmp.first);
970 Results.push_back(Tmp.second);
971 return;
972 }
973 case ISD::STORE:
974 Results.push_back(ExpandStore(Node));
975 return;
977 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
978 Results.push_back(Node->getOperand(i));
979 return;
981 if (SDValue Expanded = ExpandSEXTINREG(Node)) {
982 Results.push_back(Expanded);
983 return;
984 }
985 break;
987 Results.push_back(ExpandANY_EXTEND_VECTOR_INREG(Node));
988 return;
990 Results.push_back(ExpandSIGN_EXTEND_VECTOR_INREG(Node));
991 return;
993 Results.push_back(ExpandZERO_EXTEND_VECTOR_INREG(Node));
994 return;
995 case ISD::BSWAP:
996 if (SDValue Expanded = ExpandBSWAP(Node)) {
997 Results.push_back(Expanded);
998 return;
999 }
1000 break;
1001 case ISD::VP_BSWAP:
1002 Results.push_back(TLI.expandVPBSWAP(Node, DAG));
1003 return;
1004 case ISD::VSELECT:
1005 if (SDValue Expanded = ExpandVSELECT(Node)) {
1006 Results.push_back(Expanded);
1007 return;
1008 }
1009 break;
1010 case ISD::VP_SELECT:
1011 if (SDValue Expanded = ExpandVP_SELECT(Node)) {
1012 Results.push_back(Expanded);
1013 return;
1014 }
1015 break;
1016 case ISD::VP_SREM:
1017 case ISD::VP_UREM:
1018 if (SDValue Expanded = ExpandVP_REM(Node)) {
1019 Results.push_back(Expanded);
1020 return;
1021 }
1022 break;
1023 case ISD::VP_FNEG:
1024 if (SDValue Expanded = ExpandVP_FNEG(Node)) {
1025 Results.push_back(Expanded);
1026 return;
1027 }
1028 break;
1029 case ISD::VP_FABS:
1030 if (SDValue Expanded = ExpandVP_FABS(Node)) {
1031 Results.push_back(Expanded);
1032 return;
1033 }
1034 break;
1035 case ISD::VP_FCOPYSIGN:
1036 if (SDValue Expanded = ExpandVP_FCOPYSIGN(Node)) {
1037 Results.push_back(Expanded);
1038 return;
1039 }
1040 break;
1041 case ISD::SELECT:
1042 if (SDValue Expanded = ExpandSELECT(Node)) {
1043 Results.push_back(Expanded);
1044 return;
1045 }
1046 break;
1047 case ISD::SELECT_CC: {
1048 if (Node->getValueType(0).isScalableVector()) {
1049 EVT CondVT = TLI.getSetCCResultType(
1050 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
1051 SDValue SetCC =
1052 DAG.getNode(ISD::SETCC, SDLoc(Node), CondVT, Node->getOperand(0),
1053 Node->getOperand(1), Node->getOperand(4));
1054 Results.push_back(DAG.getSelect(SDLoc(Node), Node->getValueType(0), SetCC,
1055 Node->getOperand(2),
1056 Node->getOperand(3)));
1057 return;
1058 }
1059 break;
1060 }
1061 case ISD::FP_TO_UINT:
1062 ExpandFP_TO_UINT(Node, Results);
1063 return;
1064 case ISD::UINT_TO_FP:
1065 ExpandUINT_TO_FLOAT(Node, Results);
1066 return;
1067 case ISD::FNEG:
1068 if (SDValue Expanded = ExpandFNEG(Node)) {
1069 Results.push_back(Expanded);
1070 return;
1071 }
1072 break;
1073 case ISD::FABS:
1074 if (SDValue Expanded = ExpandFABS(Node)) {
1075 Results.push_back(Expanded);
1076 return;
1077 }
1078 break;
1079 case ISD::FCOPYSIGN:
1080 if (SDValue Expanded = ExpandFCOPYSIGN(Node)) {
1081 Results.push_back(Expanded);
1082 return;
1083 }
1084 break;
1085 case ISD::FCANONICALIZE: {
1086 // If the scalar element type has a
1087 // Legal/Custom FCANONICALIZE, don't
1088 // mess with the vector, fall back.
1089 EVT VT = Node->getValueType(0);
1090 EVT EltVT = VT.getVectorElementType();
1092 TargetLowering::Expand)
1093 break;
1094 // Otherwise canonicalize the whole vector.
1095 SDValue Mul = TLI.expandFCANONICALIZE(Node, DAG);
1096 Results.push_back(Mul);
1097 return;
1098 }
1099 case ISD::FSUB:
1100 ExpandFSUB(Node, Results);
1101 return;
1102 case ISD::SETCC:
1103 case ISD::VP_SETCC:
1104 ExpandSETCC(Node, Results);
1105 return;
1106 case ISD::ABS:
1107 if (SDValue Expanded = TLI.expandABS(Node, DAG)) {
1108 Results.push_back(Expanded);
1109 return;
1110 }
1111 break;
1112 case ISD::ABDS:
1113 case ISD::ABDU:
1114 if (SDValue Expanded = TLI.expandABD(Node, DAG)) {
1115 Results.push_back(Expanded);
1116 return;
1117 }
1118 break;
1119 case ISD::AVGCEILS:
1120 case ISD::AVGCEILU:
1121 case ISD::AVGFLOORS:
1122 case ISD::AVGFLOORU:
1123 if (SDValue Expanded = TLI.expandAVG(Node, DAG)) {
1124 Results.push_back(Expanded);
1125 return;
1126 }
1127 break;
1128 case ISD::BITREVERSE:
1129 if (SDValue Expanded = ExpandBITREVERSE(Node)) {
1130 Results.push_back(Expanded);
1131 return;
1132 }
1133 break;
1134 case ISD::VP_BITREVERSE:
1135 if (SDValue Expanded = TLI.expandVPBITREVERSE(Node, DAG)) {
1136 Results.push_back(Expanded);
1137 return;
1138 }
1139 break;
1140 case ISD::CTPOP:
1141 if (SDValue Expanded = TLI.expandCTPOP(Node, DAG)) {
1142 Results.push_back(Expanded);
1143 return;
1144 }
1145 break;
1146 case ISD::VP_CTPOP:
1147 if (SDValue Expanded = TLI.expandVPCTPOP(Node, DAG)) {
1148 Results.push_back(Expanded);
1149 return;
1150 }
1151 break;
1152 case ISD::CTLZ:
1154 if (SDValue Expanded = TLI.expandCTLZ(Node, DAG)) {
1155 Results.push_back(Expanded);
1156 return;
1157 }
1158 break;
1159 case ISD::VP_CTLZ:
1160 case ISD::VP_CTLZ_ZERO_UNDEF:
1161 if (SDValue Expanded = TLI.expandVPCTLZ(Node, DAG)) {
1162 Results.push_back(Expanded);
1163 return;
1164 }
1165 break;
1166 case ISD::CTTZ:
1168 if (SDValue Expanded = TLI.expandCTTZ(Node, DAG)) {
1169 Results.push_back(Expanded);
1170 return;
1171 }
1172 break;
1173 case ISD::VP_CTTZ:
1174 case ISD::VP_CTTZ_ZERO_UNDEF:
1175 if (SDValue Expanded = TLI.expandVPCTTZ(Node, DAG)) {
1176 Results.push_back(Expanded);
1177 return;
1178 }
1179 break;
1180 case ISD::FSHL:
1181 case ISD::VP_FSHL:
1182 case ISD::FSHR:
1183 case ISD::VP_FSHR:
1184 if (SDValue Expanded = TLI.expandFunnelShift(Node, DAG)) {
1185 Results.push_back(Expanded);
1186 return;
1187 }
1188 break;
1189 case ISD::CLMUL:
1190 case ISD::CLMULR:
1191 case ISD::CLMULH:
1192 if (SDValue Expanded = TLI.expandCLMUL(Node, DAG)) {
1193 Results.push_back(Expanded);
1194 return;
1195 }
1196 break;
1197 case ISD::ROTL:
1198 case ISD::ROTR:
1199 if (SDValue Expanded = TLI.expandROT(Node, false /*AllowVectorOps*/, DAG)) {
1200 Results.push_back(Expanded);
1201 return;
1202 }
1203 break;
1204 case ISD::FMINNUM:
1205 case ISD::FMAXNUM:
1206 if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG)) {
1207 Results.push_back(Expanded);
1208 return;
1209 }
1210 break;
1211 case ISD::FMINIMUM:
1212 case ISD::FMAXIMUM:
1213 Results.push_back(TLI.expandFMINIMUM_FMAXIMUM(Node, DAG));
1214 return;
1215 case ISD::FMINIMUMNUM:
1216 case ISD::FMAXIMUMNUM:
1217 Results.push_back(TLI.expandFMINIMUMNUM_FMAXIMUMNUM(Node, DAG));
1218 return;
1219 case ISD::SMIN:
1220 case ISD::SMAX:
1221 case ISD::UMIN:
1222 case ISD::UMAX:
1223 if (SDValue Expanded = TLI.expandIntMINMAX(Node, DAG)) {
1224 Results.push_back(Expanded);
1225 return;
1226 }
1227 break;
1228 case ISD::UADDO:
1229 case ISD::USUBO:
1230 ExpandUADDSUBO(Node, Results);
1231 return;
1232 case ISD::SADDO:
1233 case ISD::SSUBO:
1234 ExpandSADDSUBO(Node, Results);
1235 return;
1236 case ISD::UMULO:
1237 case ISD::SMULO:
1238 ExpandMULO(Node, Results);
1239 return;
1240 case ISD::USUBSAT:
1241 case ISD::SSUBSAT:
1242 case ISD::UADDSAT:
1243 case ISD::SADDSAT:
1244 if (SDValue Expanded = TLI.expandAddSubSat(Node, DAG)) {
1245 Results.push_back(Expanded);
1246 return;
1247 }
1248 break;
1249 case ISD::USHLSAT:
1250 case ISD::SSHLSAT:
1251 if (SDValue Expanded = TLI.expandShlSat(Node, DAG)) {
1252 Results.push_back(Expanded);
1253 return;
1254 }
1255 break;
1258 // Expand the fpsosisat if it is scalable to prevent it from unrolling below.
1259 if (Node->getValueType(0).isScalableVector()) {
1260 if (SDValue Expanded = TLI.expandFP_TO_INT_SAT(Node, DAG)) {
1261 Results.push_back(Expanded);
1262 return;
1263 }
1264 }
1265 break;
1266 case ISD::SMULFIX:
1267 case ISD::UMULFIX:
1268 if (SDValue Expanded = TLI.expandFixedPointMul(Node, DAG)) {
1269 Results.push_back(Expanded);
1270 return;
1271 }
1272 break;
1273 case ISD::SMULFIXSAT:
1274 case ISD::UMULFIXSAT:
1275 // FIXME: We do not expand SMULFIXSAT/UMULFIXSAT here yet, not sure exactly
1276 // why. Maybe it results in worse codegen compared to the unroll for some
1277 // targets? This should probably be investigated. And if we still prefer to
1278 // unroll an explanation could be helpful.
1279 break;
1280 case ISD::SDIVFIX:
1281 case ISD::UDIVFIX:
1282 ExpandFixedPointDiv(Node, Results);
1283 return;
1284 case ISD::SDIVFIXSAT:
1285 case ISD::UDIVFIXSAT:
1286 break;
1287#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1288 case ISD::STRICT_##DAGN:
1289#include "llvm/IR/ConstrainedOps.def"
1290 ExpandStrictFPOp(Node, Results);
1291 return;
1292 case ISD::VECREDUCE_ADD:
1293 case ISD::VECREDUCE_MUL:
1294 case ISD::VECREDUCE_AND:
1295 case ISD::VECREDUCE_OR:
1296 case ISD::VECREDUCE_XOR:
1307 Results.push_back(TLI.expandVecReduce(Node, DAG));
1308 return;
1313 Results.push_back(TLI.expandPartialReduceMLA(Node, DAG));
1314 return;
1317 Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
1318 return;
1319 case ISD::SREM:
1320 case ISD::UREM:
1321 ExpandREM(Node, Results);
1322 return;
1323 case ISD::VP_MERGE:
1324 if (SDValue Expanded = ExpandVP_MERGE(Node)) {
1325 Results.push_back(Expanded);
1326 return;
1327 }
1328 break;
1329 case ISD::FREM: {
1330 RTLIB::Libcall LC = RTLIB::getREM(Node->getValueType(0));
1331 if (tryExpandVecMathCall(Node, LC, Results))
1332 return;
1333
1334 break;
1335 }
1336 case ISD::FSINCOS:
1337 case ISD::FSINCOSPI: {
1338 EVT VT = Node->getValueType(0);
1339 RTLIB::Libcall LC = Node->getOpcode() == ISD::FSINCOS
1340 ? RTLIB::getSINCOS(VT)
1341 : RTLIB::getSINCOSPI(VT);
1342 if (LC != RTLIB::UNKNOWN_LIBCALL &&
1343 TLI.expandMultipleResultFPLibCall(DAG, LC, Node, Results))
1344 return;
1345
1346 // TODO: Try to see if there's a narrower call available to use before
1347 // scalarizing.
1348 break;
1349 }
1350 case ISD::FPOW: {
1351 RTLIB::Libcall LC = RTLIB::getPOW(Node->getValueType(0));
1352 if (tryExpandVecMathCall(Node, LC, Results))
1353 return;
1354
1355 // TODO: Try to see if there's a narrower call available to use before
1356 // scalarizing.
1357 break;
1358 }
1359 case ISD::FCBRT: {
1360 RTLIB::Libcall LC = RTLIB::getCBRT(Node->getValueType(0));
1361 if (tryExpandVecMathCall(Node, LC, Results))
1362 return;
1363
1364 // TODO: Try to see if there's a narrower call available to use before
1365 // scalarizing.
1366 break;
1367 }
1368 case ISD::FMODF: {
1369 EVT VT = Node->getValueType(0);
1370 RTLIB::Libcall LC = RTLIB::getMODF(VT);
1371 if (LC != RTLIB::UNKNOWN_LIBCALL &&
1372 TLI.expandMultipleResultFPLibCall(DAG, LC, Node, Results,
1373 /*CallRetResNo=*/0))
1374 return;
1375 break;
1376 }
1378 Results.push_back(TLI.expandVECTOR_COMPRESS(Node, DAG));
1379 return;
1380 case ISD::CTTZ_ELTS:
1382 Results.push_back(TLI.expandCttzElts(Node, DAG));
1383 return;
1385 Results.push_back(TLI.expandVectorFindLastActive(Node, DAG));
1386 return;
1387 case ISD::SCMP:
1388 case ISD::UCMP:
1389 Results.push_back(TLI.expandCMP(Node, DAG));
1390 return;
1393 Results.push_back(ExpandLOOP_DEPENDENCE_MASK(Node));
1394 return;
1395
1396 case ISD::FADD:
1397 case ISD::FMUL:
1398 case ISD::FMA:
1399 case ISD::FDIV:
1400 case ISD::FCEIL:
1401 case ISD::FFLOOR:
1402 case ISD::FNEARBYINT:
1403 case ISD::FRINT:
1404 case ISD::FROUND:
1405 case ISD::FROUNDEVEN:
1406 case ISD::FTRUNC:
1407 case ISD::FSQRT:
1408 if (SDValue Expanded = TLI.expandVectorNaryOpBySplitting(Node, DAG)) {
1409 Results.push_back(Expanded);
1410 return;
1411 }
1412 break;
1413 case ISD::MASKED_UDIV:
1414 case ISD::MASKED_SDIV:
1415 case ISD::MASKED_UREM:
1416 case ISD::MASKED_SREM:
1417 Results.push_back(ExpandMaskedBinOp(Node));
1418 return;
1419 }
1420
1421 SDValue Unrolled = DAG.UnrollVectorOp(Node);
1422 if (Node->getNumValues() == 1) {
1423 Results.push_back(Unrolled);
1424 } else {
1425 assert(Node->getNumValues() == Unrolled->getNumValues() &&
1426 "VectorLegalizer Expand returned wrong number of results!");
1427 for (unsigned I = 0, E = Unrolled->getNumValues(); I != E; ++I)
1428 Results.push_back(Unrolled.getValue(I));
1429 }
1430}
1431
1432SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
1433 // Lower a select instruction where the condition is a scalar and the
1434 // operands are vectors. Lower this select to VSELECT and implement it
1435 // using XOR AND OR. The selector bit is broadcasted.
1436 EVT VT = Node->getValueType(0);
1437 SDLoc DL(Node);
1438
1439 SDValue Mask = Node->getOperand(0);
1440 SDValue Op1 = Node->getOperand(1);
1441 SDValue Op2 = Node->getOperand(2);
1442
1443 assert(VT.isVector() && !Mask.getValueType().isVector()
1444 && Op1.getValueType() == Op2.getValueType() && "Invalid type");
1445
1446 // If we can't even use the basic vector operations of
1447 // AND,OR,XOR, we will have to scalarize the op.
1448 // Notice that the operation may be 'promoted' which means that it is
1449 // 'bitcasted' to another type which is handled.
1450 // Also, we need to be able to construct a splat vector using either
1451 // BUILD_VECTOR or SPLAT_VECTOR.
1452 // FIXME: Should we also permit fixed-length SPLAT_VECTOR as a fallback to
1453 // BUILD_VECTOR?
1454 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
1455 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
1456 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
1459 VT) == TargetLowering::Expand)
1460 return SDValue();
1461
1462 // Generate a mask operand.
1463 EVT MaskTy = VT.changeVectorElementTypeToInteger();
1464
1465 // What is the size of each element in the vector mask.
1466 EVT BitTy = MaskTy.getScalarType();
1467
1468 Mask = DAG.getSelect(DL, BitTy, Mask, DAG.getAllOnesConstant(DL, BitTy),
1469 DAG.getConstant(0, DL, BitTy));
1470
1471 // Broadcast the mask so that the entire vector is all one or all zero.
1472 Mask = DAG.getSplat(MaskTy, DL, Mask);
1473
1474 // Bitcast the operands to be the same type as the mask.
1475 // This is needed when we select between FP types because
1476 // the mask is a vector of integers.
1477 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
1478 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
1479
1480 SDValue NotMask = DAG.getNOT(DL, Mask, MaskTy);
1481
1482 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
1483 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
1484 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
1485 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
1486}
1487
1488SDValue VectorLegalizer::ExpandSEXTINREG(SDNode *Node) {
1489 EVT VT = Node->getValueType(0);
1490
1491 // Make sure that the SRA and SHL instructions are available.
1492 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand ||
1493 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand)
1494 return SDValue();
1495
1496 SDLoc DL(Node);
1497 EVT OrigTy = cast<VTSDNode>(Node->getOperand(1))->getVT();
1498
1499 unsigned BW = VT.getScalarSizeInBits();
1500 unsigned OrigBW = OrigTy.getScalarSizeInBits();
1501 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
1502
1503 SDValue Op = DAG.getNode(ISD::SHL, DL, VT, Node->getOperand(0), ShiftSz);
1504 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
1505}
1506
1507// Generically expand a vector anyext in register to a shuffle of the relevant
1508// lanes into the appropriate locations, with other lanes left undef.
1509SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node) {
1510 SDLoc DL(Node);
1511 EVT VT = Node->getValueType(0);
1512 int NumElements = VT.getVectorNumElements();
1513 SDValue Src = Node->getOperand(0);
1514 EVT SrcVT = Src.getValueType();
1515 int NumSrcElements = SrcVT.getVectorNumElements();
1516
1517 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1518 // into a larger vector type.
1519 if (SrcVT.bitsLE(VT)) {
1520 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1521 "ANY_EXTEND_VECTOR_INREG vector size mismatch");
1522 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1523 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1524 NumSrcElements);
1525 Src = DAG.getInsertSubvector(DL, DAG.getUNDEF(SrcVT), Src, 0);
1526 }
1527
1528 // Build a base mask of undef shuffles.
1529 SmallVector<int, 16> ShuffleMask;
1530 ShuffleMask.resize(NumSrcElements, -1);
1531
1532 // Place the extended lanes into the correct locations.
1533 int ExtLaneScale = NumSrcElements / NumElements;
1534 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1535 for (int i = 0; i < NumElements; ++i)
1536 ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
1537
1538 return DAG.getNode(
1539 ISD::BITCAST, DL, VT,
1540 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getPOISON(SrcVT), ShuffleMask));
1541}
1542
1543SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node) {
1544 SDLoc DL(Node);
1545 EVT VT = Node->getValueType(0);
1546 SDValue Src = Node->getOperand(0);
1547 EVT SrcVT = Src.getValueType();
1548
1549 // First build an any-extend node which can be legalized above when we
1550 // recurse through it.
1552
1553 // Now we need sign extend. Do this by shifting the elements. Even if these
1554 // aren't legal operations, they have a better chance of being legalized
1555 // without full scalarization than the sign extension does.
1556 unsigned EltWidth = VT.getScalarSizeInBits();
1557 unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
1558 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
1559 return DAG.getNode(ISD::SRA, DL, VT,
1560 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
1561 ShiftAmount);
1562}
1563
1564// Generically expand a vector zext in register to a shuffle of the relevant
1565// lanes into the appropriate locations, a blend of zero into the high bits,
1566// and a bitcast to the wider element type.
1567SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node) {
1568 SDLoc DL(Node);
1569 EVT VT = Node->getValueType(0);
1570 int NumElements = VT.getVectorNumElements();
1571 SDValue Src = Node->getOperand(0);
1572 EVT SrcVT = Src.getValueType();
1573 int NumSrcElements = SrcVT.getVectorNumElements();
1574
1575 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1576 // into a larger vector type.
1577 if (SrcVT.bitsLE(VT)) {
1578 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1579 "ZERO_EXTEND_VECTOR_INREG vector size mismatch");
1580 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1581 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1582 NumSrcElements);
1583 Src = DAG.getInsertSubvector(DL, DAG.getUNDEF(SrcVT), Src, 0);
1584 }
1585
1586 // Build up a zero vector to blend into this one.
1587 SDValue Zero = DAG.getConstant(0, DL, SrcVT);
1588
1589 // Shuffle the incoming lanes into the correct position, and pull all other
1590 // lanes from the zero vector.
1591 auto ShuffleMask = llvm::to_vector<16>(llvm::seq<int>(0, NumSrcElements));
1592
1593 int ExtLaneScale = NumSrcElements / NumElements;
1594 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1595 for (int i = 0; i < NumElements; ++i)
1596 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
1597
1598 return DAG.getNode(ISD::BITCAST, DL, VT,
1599 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
1600}
1601
1602static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
1603 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
1604 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
1605 for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
1606 ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
1607}
1608
1609SDValue VectorLegalizer::ExpandBSWAP(SDNode *Node) {
1610 EVT VT = Node->getValueType(0);
1611
1612 // Scalable vectors can't use shuffle expansion.
1613 if (VT.isScalableVector())
1614 return TLI.expandBSWAP(Node, DAG);
1615
1616 // Generate a byte wise shuffle mask for the BSWAP.
1617 SmallVector<int, 16> ShuffleMask;
1618 createBSWAPShuffleMask(VT, ShuffleMask);
1619 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
1620
1621 // Only emit a shuffle if the mask is legal.
1622 if (TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) {
1623 SDLoc DL(Node);
1624 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1625 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getPOISON(ByteVT),
1626 ShuffleMask);
1627 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
1628 }
1629
1630 // If we have the appropriate vector bit operations, it is better to use them
1631 // than unrolling and expanding each component.
1632 if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1636 return TLI.expandBSWAP(Node, DAG);
1637
1638 // Otherwise let the caller unroll.
1639 return SDValue();
1640}
1641
1642SDValue VectorLegalizer::ExpandBITREVERSE(SDNode *Node) {
1643 EVT VT = Node->getValueType(0);
1644
1645 // We can't unroll or use shuffles for scalable vectors.
1646 if (VT.isScalableVector())
1647 return TLI.expandBITREVERSE(Node, DAG);
1648
1649 // If we have the scalar operation, it's probably cheaper to unroll it.
1651 return SDValue();
1652
1653 // If the vector element width is a whole number of bytes, test if its legal
1654 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte
1655 // vector. This greatly reduces the number of bit shifts necessary.
1656 unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
1657 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) {
1658 SmallVector<int, 16> BSWAPMask;
1659 createBSWAPShuffleMask(VT, BSWAPMask);
1660
1661 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size());
1662 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) &&
1664 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) &&
1665 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) &&
1668 SDLoc DL(Node);
1669 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1670 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getPOISON(ByteVT),
1671 BSWAPMask);
1672 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op);
1673 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
1674 return Op;
1675 }
1676 }
1677
1678 // If we have the appropriate vector bit operations, it is better to use them
1679 // than unrolling and expanding each component.
1680 if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1684 return TLI.expandBITREVERSE(Node, DAG);
1685
1686 // Otherwise unroll.
1687 return SDValue();
1688}
1689
1690SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
1691 // Implement VSELECT in terms of XOR, AND, OR
1692 // on platforms which do not support blend natively.
1693 SDLoc DL(Node);
1694
1695 SDValue Mask = Node->getOperand(0);
1696 SDValue Op1 = Node->getOperand(1);
1697 SDValue Op2 = Node->getOperand(2);
1698
1699 EVT VT = Mask.getValueType();
1700
1701 // If we can't even use the basic vector operations of
1702 // AND,OR,XOR, we will have to scalarize the op.
1703 // Notice that the operation may be 'promoted' which means that it is
1704 // 'bitcasted' to another type which is handled.
1705 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
1706 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
1707 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand)
1708 return SDValue();
1709
1710 // This operation also isn't safe with AND, OR, XOR when the boolean type is
1711 // 0/1 and the select operands aren't also booleans, as we need an all-ones
1712 // vector constant to mask with.
1713 // FIXME: Sign extend 1 to all ones if that's legal on the target.
1714 auto BoolContents = TLI.getBooleanContents(Op1.getValueType());
1715 if (BoolContents != TargetLowering::ZeroOrNegativeOneBooleanContent &&
1716 !(BoolContents == TargetLowering::ZeroOrOneBooleanContent &&
1717 Op1.getValueType().getVectorElementType() == MVT::i1))
1718 return SDValue();
1719
1720 // If the mask and the type are different sizes, unroll the vector op. This
1721 // can occur when getSetCCResultType returns something that is different in
1722 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
1723 if (VT.getSizeInBits() != Op1.getValueSizeInBits())
1724 return SDValue();
1725
1726 // Bitcast the operands to be the same type as the mask.
1727 // This is needed when we select between FP types because
1728 // the mask is a vector of integers.
1729 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
1730 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
1731
1732 SDValue NotMask = DAG.getNOT(DL, Mask, VT);
1733
1734 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
1735 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
1736 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
1737 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
1738}
1739
1740SDValue VectorLegalizer::ExpandVP_SELECT(SDNode *Node) {
1741 // Implement VP_SELECT in terms of VP_XOR, VP_AND and VP_OR on platforms which
1742 // do not support it natively.
1743 SDLoc DL(Node);
1744
1745 SDValue Mask = Node->getOperand(0);
1746 SDValue Op1 = Node->getOperand(1);
1747 SDValue Op2 = Node->getOperand(2);
1748 SDValue EVL = Node->getOperand(3);
1749
1750 EVT VT = Mask.getValueType();
1751
1752 // If we can't even use the basic vector operations of
1753 // VP_AND,VP_OR,VP_XOR, we will have to scalarize the op.
1754 if (TLI.getOperationAction(ISD::VP_AND, VT) == TargetLowering::Expand ||
1755 TLI.getOperationAction(ISD::VP_XOR, VT) == TargetLowering::Expand ||
1756 TLI.getOperationAction(ISD::VP_OR, VT) == TargetLowering::Expand)
1757 return SDValue();
1758
1759 // This operation also isn't safe when the operands aren't also booleans.
1760 if (Op1.getValueType().getVectorElementType() != MVT::i1)
1761 return SDValue();
1762
1763 SDValue Ones = DAG.getAllOnesConstant(DL, VT);
1764 SDValue NotMask = DAG.getNode(ISD::VP_XOR, DL, VT, Mask, Ones, Ones, EVL);
1765
1766 Op1 = DAG.getNode(ISD::VP_AND, DL, VT, Op1, Mask, Ones, EVL);
1767 Op2 = DAG.getNode(ISD::VP_AND, DL, VT, Op2, NotMask, Ones, EVL);
1768 return DAG.getNode(ISD::VP_OR, DL, VT, Op1, Op2, Ones, EVL);
1769}
1770
1771SDValue VectorLegalizer::ExpandVP_MERGE(SDNode *Node) {
1772 // Implement VP_MERGE in terms of VSELECT. Construct a mask where vector
1773 // indices less than the EVL/pivot are true. Combine that with the original
1774 // mask for a full-length mask. Use a full-length VSELECT to select between
1775 // the true and false values.
1776 SDLoc DL(Node);
1777
1778 SDValue Mask = Node->getOperand(0);
1779 SDValue Op1 = Node->getOperand(1);
1780 SDValue Op2 = Node->getOperand(2);
1781 SDValue EVL = Node->getOperand(3);
1782
1783 EVT MaskVT = Mask.getValueType();
1784 bool IsFixedLen = MaskVT.isFixedLengthVector();
1785
1786 EVT EVLVecVT = EVT::getVectorVT(*DAG.getContext(), EVL.getValueType(),
1787 MaskVT.getVectorElementCount());
1788
1789 // If we can't construct the EVL mask efficiently, it's better to unroll.
1790 if ((IsFixedLen &&
1792 (!IsFixedLen &&
1793 (!TLI.isOperationLegalOrCustom(ISD::STEP_VECTOR, EVLVecVT) ||
1795 return SDValue();
1796
1797 // If using a SETCC would result in a different type than the mask type,
1798 // unroll.
1799 if (TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
1800 EVLVecVT) != MaskVT)
1801 return SDValue();
1802
1803 SDValue StepVec = DAG.getStepVector(DL, EVLVecVT);
1804 SDValue SplatEVL = DAG.getSplat(EVLVecVT, DL, EVL);
1805 SDValue EVLMask =
1806 DAG.getSetCC(DL, MaskVT, StepVec, SplatEVL, ISD::CondCode::SETULT);
1807
1808 SDValue FullMask = DAG.getNode(ISD::AND, DL, MaskVT, Mask, EVLMask);
1809 return DAG.getSelect(DL, Node->getValueType(0), FullMask, Op1, Op2);
1810}
1811
1812SDValue VectorLegalizer::ExpandVP_REM(SDNode *Node) {
1813 // Implement VP_SREM/UREM in terms of VP_SDIV/VP_UDIV, VP_MUL, VP_SUB.
1814 EVT VT = Node->getValueType(0);
1815
1816 unsigned DivOpc = Node->getOpcode() == ISD::VP_SREM ? ISD::VP_SDIV : ISD::VP_UDIV;
1817
1818 if (!TLI.isOperationLegalOrCustom(DivOpc, VT) ||
1819 !TLI.isOperationLegalOrCustom(ISD::VP_MUL, VT) ||
1820 !TLI.isOperationLegalOrCustom(ISD::VP_SUB, VT))
1821 return SDValue();
1822
1823 SDLoc DL(Node);
1824
1825 SDValue Dividend = Node->getOperand(0);
1826 SDValue Divisor = Node->getOperand(1);
1827 SDValue Mask = Node->getOperand(2);
1828 SDValue EVL = Node->getOperand(3);
1829
1830 // X % Y -> X-X/Y*Y
1831 SDValue Div = DAG.getNode(DivOpc, DL, VT, Dividend, Divisor, Mask, EVL);
1832 SDValue Mul = DAG.getNode(ISD::VP_MUL, DL, VT, Divisor, Div, Mask, EVL);
1833 return DAG.getNode(ISD::VP_SUB, DL, VT, Dividend, Mul, Mask, EVL);
1834}
1835
1836SDValue VectorLegalizer::ExpandVP_FNEG(SDNode *Node) {
1837 EVT VT = Node->getValueType(0);
1838 EVT IntVT = VT.changeVectorElementTypeToInteger();
1839
1840 if (!TLI.isOperationLegalOrCustom(ISD::VP_XOR, IntVT))
1841 return SDValue();
1842
1843 SDValue Mask = Node->getOperand(1);
1844 SDValue EVL = Node->getOperand(2);
1845
1846 SDLoc DL(Node);
1847 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
1848 SDValue SignMask = DAG.getConstant(
1849 APInt::getSignMask(IntVT.getScalarSizeInBits()), DL, IntVT);
1850 SDValue Xor = DAG.getNode(ISD::VP_XOR, DL, IntVT, Cast, SignMask, Mask, EVL);
1851 return DAG.getNode(ISD::BITCAST, DL, VT, Xor);
1852}
1853
1854SDValue VectorLegalizer::ExpandVP_FABS(SDNode *Node) {
1855 EVT VT = Node->getValueType(0);
1856 EVT IntVT = VT.changeVectorElementTypeToInteger();
1857
1858 if (!TLI.isOperationLegalOrCustom(ISD::VP_AND, IntVT))
1859 return SDValue();
1860
1861 SDValue Mask = Node->getOperand(1);
1862 SDValue EVL = Node->getOperand(2);
1863
1864 SDLoc DL(Node);
1865 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
1866 SDValue ClearSignMask = DAG.getConstant(
1868 SDValue ClearSign =
1869 DAG.getNode(ISD::VP_AND, DL, IntVT, Cast, ClearSignMask, Mask, EVL);
1870 return DAG.getNode(ISD::BITCAST, DL, VT, ClearSign);
1871}
1872
1873SDValue VectorLegalizer::ExpandVP_FCOPYSIGN(SDNode *Node) {
1874 EVT VT = Node->getValueType(0);
1875
1876 if (VT != Node->getOperand(1).getValueType())
1877 return SDValue();
1878
1879 EVT IntVT = VT.changeVectorElementTypeToInteger();
1880 if (!TLI.isOperationLegalOrCustom(ISD::VP_AND, IntVT) ||
1881 !TLI.isOperationLegalOrCustom(ISD::VP_XOR, IntVT))
1882 return SDValue();
1883
1884 SDValue Mask = Node->getOperand(2);
1885 SDValue EVL = Node->getOperand(3);
1886
1887 SDLoc DL(Node);
1888 SDValue Mag = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
1889 SDValue Sign = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(1));
1890
1891 SDValue SignMask = DAG.getConstant(
1892 APInt::getSignMask(IntVT.getScalarSizeInBits()), DL, IntVT);
1893 SDValue SignBit =
1894 DAG.getNode(ISD::VP_AND, DL, IntVT, Sign, SignMask, Mask, EVL);
1895
1896 SDValue ClearSignMask = DAG.getConstant(
1898 SDValue ClearedSign =
1899 DAG.getNode(ISD::VP_AND, DL, IntVT, Mag, ClearSignMask, Mask, EVL);
1900
1901 SDValue CopiedSign = DAG.getNode(ISD::VP_OR, DL, IntVT, ClearedSign, SignBit,
1902 Mask, EVL, SDNodeFlags::Disjoint);
1903
1904 return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
1905}
1906
1907SDValue VectorLegalizer::ExpandLOOP_DEPENDENCE_MASK(SDNode *N) {
1908 SDLoc DL(N);
1909 EVT VT = N->getValueType(0);
1910 SDValue SourceValue = N->getOperand(0);
1911 SDValue SinkValue = N->getOperand(1);
1912 SDValue EltSizeInBytes = N->getOperand(2);
1913
1914 // Note: The lane offset is scalable if the mask is scalable.
1915 ElementCount LaneOffsetEC =
1916 ElementCount::get(N->getConstantOperandVal(3), VT.isScalableVT());
1917
1918 EVT PtrVT = SourceValue->getValueType(0);
1919 bool IsReadAfterWrite = N->getOpcode() == ISD::LOOP_DEPENDENCE_RAW_MASK;
1920
1921 // Take the difference between the pointers and divided by the element size,
1922 // to see how many lanes separate them.
1923 SDValue Diff = DAG.getNode(ISD::SUB, DL, PtrVT, SinkValue, SourceValue);
1924 if (IsReadAfterWrite)
1925 Diff = DAG.getNode(ISD::ABS, DL, PtrVT, Diff);
1926 Diff = DAG.getNode(ISD::SDIV, DL, PtrVT, Diff, EltSizeInBytes);
1927
1928 // The pointers do not alias if:
1929 // * Diff <= 0 (WAR_MASK)
1930 // * Diff == 0 (RAW_MASK)
1931 EVT CmpVT =
1932 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), PtrVT);
1933 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
1934 SDValue Cmp = DAG.getSetCC(DL, CmpVT, Diff, Zero,
1935 IsReadAfterWrite ? ISD::SETEQ : ISD::SETLE);
1936
1937 // The pointers do not alias if:
1938 // Lane + LaneOffset < Diff (WAR/RAW_MASK)
1939 SDValue LaneOffset = DAG.getElementCount(DL, PtrVT, LaneOffsetEC);
1940 SDValue MaskN =
1941 DAG.getSelect(DL, PtrVT, Cmp, DAG.getConstant(-1, DL, PtrVT), Diff);
1942
1943 return DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, VT, LaneOffset, MaskN);
1944}
1945
1946SDValue VectorLegalizer::ExpandMaskedBinOp(SDNode *N) {
1947 // Masked bin ops don't have undefined behaviour when dividing by zero
1948 // on disabled lanes and produce poison instead. Replace the divisor on the
1949 // disabled lanes with 1 to avoid division by zero or overflow.
1950 SDLoc dl(N);
1951 EVT VT = N->getValueType(0);
1952 SDValue SafeDivisor = DAG.getSelect(
1953 dl, VT, N->getOperand(2), N->getOperand(1), DAG.getConstant(1, dl, VT));
1954 return DAG.getNode(ISD::getUnmaskedBinOpOpcode(N->getOpcode()), dl, VT,
1955 N->getOperand(0), SafeDivisor);
1956}
1957
1958void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
1959 SmallVectorImpl<SDValue> &Results) {
1960 // Attempt to expand using TargetLowering.
1961 SDValue Result, Chain;
1962 if (TLI.expandFP_TO_UINT(Node, Result, Chain, DAG)) {
1963 Results.push_back(Result);
1964 if (Node->isStrictFPOpcode())
1965 Results.push_back(Chain);
1966 return;
1967 }
1968
1969 // Otherwise go ahead and unroll.
1970 if (Node->isStrictFPOpcode()) {
1971 UnrollStrictFPOp(Node, Results);
1972 return;
1973 }
1974
1975 Results.push_back(DAG.UnrollVectorOp(Node));
1976}
1977
1978void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
1979 SmallVectorImpl<SDValue> &Results) {
1980 bool IsStrict = Node->isStrictFPOpcode();
1981 unsigned OpNo = IsStrict ? 1 : 0;
1982 SDValue Src = Node->getOperand(OpNo);
1983 EVT SrcVT = Src.getValueType();
1984 EVT DstVT = Node->getValueType(0);
1985 SDLoc DL(Node);
1986
1987 // Attempt to expand using TargetLowering.
1989 SDValue Chain;
1990 if (TLI.expandUINT_TO_FP(Node, Result, Chain, DAG)) {
1991 Results.push_back(Result);
1992 if (IsStrict)
1993 Results.push_back(Chain);
1994 return;
1995 }
1996
1997 // Make sure that the SINT_TO_FP and SRL instructions are available.
1998 if (((!IsStrict && TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) ==
1999 TargetLowering::Expand) ||
2000 (IsStrict && TLI.getOperationAction(ISD::STRICT_SINT_TO_FP, SrcVT) ==
2001 TargetLowering::Expand)) ||
2002 TLI.getOperationAction(ISD::SRL, SrcVT) == TargetLowering::Expand) {
2003 if (IsStrict) {
2004 UnrollStrictFPOp(Node, Results);
2005 return;
2006 }
2007
2008 Results.push_back(DAG.UnrollVectorOp(Node));
2009 return;
2010 }
2011
2012 unsigned BW = SrcVT.getScalarSizeInBits();
2013 assert((BW == 64 || BW == 32) &&
2014 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
2015
2016 // If STRICT_/FMUL is not supported by the target (in case of f16) replace the
2017 // UINT_TO_FP with a larger float and round to the smaller type
2018 if ((!IsStrict && !TLI.isOperationLegalOrCustom(ISD::FMUL, DstVT)) ||
2019 (IsStrict && !TLI.isOperationLegalOrCustom(ISD::STRICT_FMUL, DstVT))) {
2020 EVT FPVT = BW == 32 ? MVT::f32 : MVT::f64;
2021 SDValue UIToFP;
2023 SDValue TargetZero = DAG.getIntPtrConstant(0, DL, /*isTarget=*/true);
2024 EVT FloatVecVT = SrcVT.changeVectorElementType(*DAG.getContext(), FPVT);
2025 if (IsStrict) {
2026 UIToFP = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {FloatVecVT, MVT::Other},
2027 {Node->getOperand(0), Src});
2028 Result = DAG.getNode(ISD::STRICT_FP_ROUND, DL, {DstVT, MVT::Other},
2029 {Node->getOperand(0), UIToFP, TargetZero});
2030 Results.push_back(Result);
2031 Results.push_back(Result.getValue(1));
2032 } else {
2033 UIToFP = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVecVT, Src);
2034 Result = DAG.getNode(ISD::FP_ROUND, DL, DstVT, UIToFP, TargetZero);
2035 Results.push_back(Result);
2036 }
2037
2038 return;
2039 }
2040
2041 SDValue HalfWord = DAG.getConstant(BW / 2, DL, SrcVT);
2042
2043 // Constants to clear the upper part of the word.
2044 // Notice that we can also use SHL+SHR, but using a constant is slightly
2045 // faster on x86.
2046 uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF;
2047 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, SrcVT);
2048
2049 // Two to the power of half-word-size.
2050 SDValue TWOHW = DAG.getConstantFP(1ULL << (BW / 2), DL, DstVT);
2051
2052 // Clear upper part of LO, lower HI
2053 SDValue HI = DAG.getNode(ISD::SRL, DL, SrcVT, Src, HalfWord);
2054 SDValue LO = DAG.getNode(ISD::AND, DL, SrcVT, Src, HalfWordMask);
2055
2056 if (IsStrict) {
2057 // Convert hi and lo to floats
2058 // Convert the hi part back to the upper values
2059 // TODO: Can any fast-math-flags be set on these nodes?
2060 SDValue fHI = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {DstVT, MVT::Other},
2061 {Node->getOperand(0), HI});
2062 fHI = DAG.getNode(ISD::STRICT_FMUL, DL, {DstVT, MVT::Other},
2063 {fHI.getValue(1), fHI, TWOHW});
2064 SDValue fLO = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {DstVT, MVT::Other},
2065 {Node->getOperand(0), LO});
2066
2067 SDValue TF = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, fHI.getValue(1),
2068 fLO.getValue(1));
2069
2070 // Add the two halves
2071 SDValue Result =
2072 DAG.getNode(ISD::STRICT_FADD, DL, {DstVT, MVT::Other}, {TF, fHI, fLO});
2073
2074 Results.push_back(Result);
2075 Results.push_back(Result.getValue(1));
2076 return;
2077 }
2078
2079 // Convert hi and lo to floats
2080 // Convert the hi part back to the upper values
2081 // TODO: Can any fast-math-flags be set on these nodes?
2082 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, DstVT, HI);
2083 fHI = DAG.getNode(ISD::FMUL, DL, DstVT, fHI, TWOHW);
2084 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, DstVT, LO);
2085
2086 // Add the two halves
2087 Results.push_back(DAG.getNode(ISD::FADD, DL, DstVT, fHI, fLO));
2088}
2089
2090SDValue VectorLegalizer::ExpandFNEG(SDNode *Node) {
2091 EVT VT = Node->getValueType(0);
2092 EVT IntVT = VT.changeVectorElementTypeToInteger();
2093
2094 if (!TLI.isOperationLegalOrCustom(ISD::XOR, IntVT))
2095 return SDValue();
2096
2097 // FIXME: The FSUB check is here to force unrolling v1f64 vectors on AArch64.
2099 !VT.isScalableVector())
2100 return SDValue();
2101
2102 SDLoc DL(Node);
2103 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
2104 SDValue SignMask = DAG.getConstant(
2105 APInt::getSignMask(IntVT.getScalarSizeInBits()), DL, IntVT);
2106 SDValue Xor = DAG.getNode(ISD::XOR, DL, IntVT, Cast, SignMask);
2107 return DAG.getNode(ISD::BITCAST, DL, VT, Xor);
2108}
2109
2110SDValue VectorLegalizer::ExpandFABS(SDNode *Node) {
2111 EVT VT = Node->getValueType(0);
2112 EVT IntVT = VT.changeVectorElementTypeToInteger();
2113
2114 if (!TLI.isOperationLegalOrCustom(ISD::AND, IntVT))
2115 return SDValue();
2116
2117 // FIXME: The FSUB check is here to force unrolling v1f64 vectors on AArch64.
2119 !VT.isScalableVector())
2120 return SDValue();
2121
2122 SDLoc DL(Node);
2123 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
2124 SDValue ClearSignMask = DAG.getConstant(
2126 SDValue ClearedSign = DAG.getNode(ISD::AND, DL, IntVT, Cast, ClearSignMask);
2127 return DAG.getNode(ISD::BITCAST, DL, VT, ClearedSign);
2128}
2129
2130SDValue VectorLegalizer::ExpandFCOPYSIGN(SDNode *Node) {
2131 EVT VT = Node->getValueType(0);
2132 EVT IntVT = VT.changeVectorElementTypeToInteger();
2133
2134 if (VT != Node->getOperand(1).getValueType() ||
2135 !TLI.isOperationLegalOrCustom(ISD::AND, IntVT) ||
2136 !TLI.isOperationLegalOrCustom(ISD::OR, IntVT))
2137 return SDValue();
2138
2139 // FIXME: The FSUB check is here to force unrolling v1f64 vectors on AArch64.
2141 !VT.isScalableVector())
2142 return SDValue();
2143
2144 SDLoc DL(Node);
2145 SDValue Mag = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(0));
2146 SDValue Sign = DAG.getNode(ISD::BITCAST, DL, IntVT, Node->getOperand(1));
2147
2148 SDValue SignMask = DAG.getConstant(
2149 APInt::getSignMask(IntVT.getScalarSizeInBits()), DL, IntVT);
2150 SDValue SignBit = DAG.getNode(ISD::AND, DL, IntVT, Sign, SignMask);
2151
2152 SDValue ClearSignMask = DAG.getConstant(
2154 SDValue ClearedSign = DAG.getNode(ISD::AND, DL, IntVT, Mag, ClearSignMask);
2155
2156 SDValue CopiedSign = DAG.getNode(ISD::OR, DL, IntVT, ClearedSign, SignBit,
2158
2159 return DAG.getNode(ISD::BITCAST, DL, VT, CopiedSign);
2160}
2161
2162void VectorLegalizer::ExpandFSUB(SDNode *Node,
2163 SmallVectorImpl<SDValue> &Results) {
2164 // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal,
2165 // we can defer this to operation legalization where it will be lowered as
2166 // a+(-b).
2167 EVT VT = Node->getValueType(0);
2168 if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
2170 return; // Defer to LegalizeDAG
2171
2172 if (SDValue Expanded = TLI.expandVectorNaryOpBySplitting(Node, DAG)) {
2173 Results.push_back(Expanded);
2174 return;
2175 }
2176
2177 SDValue Tmp = DAG.UnrollVectorOp(Node);
2178 Results.push_back(Tmp);
2179}
2180
2181void VectorLegalizer::ExpandSETCC(SDNode *Node,
2182 SmallVectorImpl<SDValue> &Results) {
2183 bool NeedInvert = false;
2184 bool IsVP = Node->getOpcode() == ISD::VP_SETCC;
2185 bool IsStrict = Node->getOpcode() == ISD::STRICT_FSETCC ||
2186 Node->getOpcode() == ISD::STRICT_FSETCCS;
2187 bool IsSignaling = Node->getOpcode() == ISD::STRICT_FSETCCS;
2188 unsigned Offset = IsStrict ? 1 : 0;
2189
2190 SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
2191 SDValue LHS = Node->getOperand(0 + Offset);
2192 SDValue RHS = Node->getOperand(1 + Offset);
2193 SDValue CC = Node->getOperand(2 + Offset);
2194
2195 MVT OpVT = LHS.getSimpleValueType();
2196 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
2197
2198 if (TLI.getCondCodeAction(CCCode, OpVT) != TargetLowering::Expand) {
2199 if (IsStrict) {
2200 UnrollStrictFPOp(Node, Results);
2201 return;
2202 }
2203 Results.push_back(UnrollVSETCC(Node));
2204 return;
2205 }
2206
2207 SDValue Mask, EVL;
2208 if (IsVP) {
2209 Mask = Node->getOperand(3 + Offset);
2210 EVL = Node->getOperand(4 + Offset);
2211 }
2212
2213 SDLoc dl(Node);
2214 bool Legalized =
2215 TLI.LegalizeSetCCCondCode(DAG, Node->getValueType(0), LHS, RHS, CC, Mask,
2216 EVL, NeedInvert, dl, Chain, IsSignaling);
2217
2218 if (Legalized) {
2219 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
2220 // condition code, create a new SETCC node.
2221 if (CC.getNode()) {
2222 if (IsStrict) {
2223 LHS = DAG.getNode(Node->getOpcode(), dl, Node->getVTList(),
2224 {Chain, LHS, RHS, CC}, Node->getFlags());
2225 Chain = LHS.getValue(1);
2226 } else if (IsVP) {
2227 LHS = DAG.getNode(ISD::VP_SETCC, dl, Node->getValueType(0),
2228 {LHS, RHS, CC, Mask, EVL}, Node->getFlags());
2229 } else {
2230 LHS = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), LHS, RHS, CC,
2231 Node->getFlags());
2232 }
2233 }
2234
2235 // If we expanded the SETCC by inverting the condition code, then wrap
2236 // the existing SETCC in a NOT to restore the intended condition.
2237 if (NeedInvert) {
2238 if (!IsVP)
2239 LHS = DAG.getLogicalNOT(dl, LHS, LHS->getValueType(0));
2240 else
2241 LHS = DAG.getVPLogicalNOT(dl, LHS, Mask, EVL, LHS->getValueType(0));
2242 }
2243 } else {
2244 assert(!IsStrict && "Don't know how to expand for strict nodes.");
2245
2246 // Otherwise, SETCC for the given comparison type must be completely
2247 // illegal; expand it into a SELECT_CC.
2248 EVT VT = Node->getValueType(0);
2249 LHS = DAG.getNode(ISD::SELECT_CC, dl, VT, LHS, RHS,
2250 DAG.getBoolConstant(true, dl, VT, LHS.getValueType()),
2251 DAG.getBoolConstant(false, dl, VT, LHS.getValueType()),
2252 CC, Node->getFlags());
2253 }
2254
2255 Results.push_back(LHS);
2256 if (IsStrict)
2257 Results.push_back(Chain);
2258}
2259
2260void VectorLegalizer::ExpandUADDSUBO(SDNode *Node,
2261 SmallVectorImpl<SDValue> &Results) {
2262 SDValue Result, Overflow;
2263 TLI.expandUADDSUBO(Node, Result, Overflow, DAG);
2264 Results.push_back(Result);
2265 Results.push_back(Overflow);
2266}
2267
2268void VectorLegalizer::ExpandSADDSUBO(SDNode *Node,
2269 SmallVectorImpl<SDValue> &Results) {
2270 SDValue Result, Overflow;
2271 TLI.expandSADDSUBO(Node, Result, Overflow, DAG);
2272 Results.push_back(Result);
2273 Results.push_back(Overflow);
2274}
2275
2276void VectorLegalizer::ExpandMULO(SDNode *Node,
2277 SmallVectorImpl<SDValue> &Results) {
2278 SDValue Result, Overflow;
2279 if (!TLI.expandMULO(Node, Result, Overflow, DAG))
2280 std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Node);
2281
2282 Results.push_back(Result);
2283 Results.push_back(Overflow);
2284}
2285
2286void VectorLegalizer::ExpandFixedPointDiv(SDNode *Node,
2287 SmallVectorImpl<SDValue> &Results) {
2288 SDNode *N = Node;
2289 if (SDValue Expanded = TLI.expandFixedPointDiv(N->getOpcode(), SDLoc(N),
2290 N->getOperand(0), N->getOperand(1), N->getConstantOperandVal(2), DAG))
2291 Results.push_back(Expanded);
2292}
2293
2294void VectorLegalizer::ExpandStrictFPOp(SDNode *Node,
2295 SmallVectorImpl<SDValue> &Results) {
2296 if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP) {
2297 ExpandUINT_TO_FLOAT(Node, Results);
2298 return;
2299 }
2300 if (Node->getOpcode() == ISD::STRICT_FP_TO_UINT) {
2301 ExpandFP_TO_UINT(Node, Results);
2302 return;
2303 }
2304
2305 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
2306 Node->getOpcode() == ISD::STRICT_FSETCCS) {
2307 ExpandSETCC(Node, Results);
2308 return;
2309 }
2310
2311 UnrollStrictFPOp(Node, Results);
2312}
2313
2314void VectorLegalizer::ExpandREM(SDNode *Node,
2315 SmallVectorImpl<SDValue> &Results) {
2316 assert((Node->getOpcode() == ISD::SREM || Node->getOpcode() == ISD::UREM) &&
2317 "Expected REM node");
2318
2320 if (!TLI.expandREM(Node, Result, DAG))
2321 Result = DAG.UnrollVectorOp(Node);
2322 Results.push_back(Result);
2323}
2324
2325// Try to expand libm nodes into vector math routine calls. Callers provide the
2326// LibFunc equivalent of the passed in Node, which is used to lookup mappings
2327// within TargetLibraryInfo. The only mappings considered are those where the
2328// result and all operands are the same vector type. While predicated nodes are
2329// not supported, we will emit calls to masked routines by passing in an all
2330// true mask.
2331bool VectorLegalizer::tryExpandVecMathCall(SDNode *Node, RTLIB::Libcall LC,
2332 SmallVectorImpl<SDValue> &Results) {
2333 // Chain must be propagated but currently strict fp operations are down
2334 // converted to their none strict counterpart.
2335 assert(!Node->isStrictFPOpcode() && "Unexpected strict fp operation!");
2336
2337 RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
2338 if (LCImpl == RTLIB::Unsupported)
2339 return false;
2340
2341 EVT VT = Node->getValueType(0);
2342 const RTLIB::RuntimeLibcallsInfo &RTLCI = TLI.getRuntimeLibcallsInfo();
2343 LLVMContext &Ctx = *DAG.getContext();
2344
2345 auto [FuncTy, FuncAttrs] = RTLCI.getFunctionTy(
2346 Ctx, DAG.getSubtarget().getTargetTriple(), DAG.getDataLayout(), LCImpl);
2347
2348 SDLoc DL(Node);
2349 TargetLowering::ArgListTy Args;
2350
2351 bool HasMaskArg = RTLCI.hasVectorMaskArgument(LCImpl);
2352
2353 // Sanity check just in case function has unexpected parameters.
2354 assert(FuncTy->getNumParams() == Node->getNumOperands() + HasMaskArg &&
2355 EVT::getEVT(FuncTy->getReturnType(), true) == VT &&
2356 "mismatch in value type and call signature type");
2357
2358 for (unsigned I = 0, E = FuncTy->getNumParams(); I != E; ++I) {
2359 Type *ParamTy = FuncTy->getParamType(I);
2360
2361 if (HasMaskArg && I == E - 1) {
2362 assert(cast<VectorType>(ParamTy)->getElementType()->isIntegerTy(1) &&
2363 "unexpected vector mask type");
2364 EVT MaskVT = TLI.getSetCCResultType(DAG.getDataLayout(), Ctx, VT);
2365 Args.emplace_back(DAG.getBoolConstant(true, DL, MaskVT, VT),
2366 MaskVT.getTypeForEVT(Ctx));
2367
2368 } else {
2369 SDValue Op = Node->getOperand(I);
2370 assert(Op.getValueType() == EVT::getEVT(ParamTy, true) &&
2371 "mismatch in value type and call argument type");
2372 Args.emplace_back(Op, ParamTy);
2373 }
2374 }
2375
2376 // Emit a call to the vector function.
2377 SDValue Callee =
2378 DAG.getExternalSymbol(LCImpl, TLI.getPointerTy(DAG.getDataLayout()));
2379 CallingConv::ID CC = RTLCI.getLibcallImplCallingConv(LCImpl);
2380
2381 TargetLowering::CallLoweringInfo CLI(DAG);
2382 CLI.setDebugLoc(DL)
2383 .setChain(DAG.getEntryNode())
2384 .setLibCallee(CC, FuncTy->getReturnType(), Callee, std::move(Args));
2385
2386 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
2387 Results.push_back(CallResult.first);
2388 return true;
2389}
2390
2391void VectorLegalizer::UnrollStrictFPOp(SDNode *Node,
2392 SmallVectorImpl<SDValue> &Results) {
2393 EVT VT = Node->getValueType(0);
2394 EVT EltVT = VT.getVectorElementType();
2395 unsigned NumElems = VT.getVectorNumElements();
2396 unsigned NumOpers = Node->getNumOperands();
2397 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2398
2399 EVT TmpEltVT = EltVT;
2400 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
2401 Node->getOpcode() == ISD::STRICT_FSETCCS)
2402 TmpEltVT = TLI.getSetCCResultType(DAG.getDataLayout(),
2403 *DAG.getContext(), TmpEltVT);
2404
2405 EVT ValueVTs[] = {TmpEltVT, MVT::Other};
2406 SDValue Chain = Node->getOperand(0);
2407 SDLoc dl(Node);
2408
2409 SmallVector<SDValue, 32> OpValues;
2410 SmallVector<SDValue, 32> OpChains;
2411 for (unsigned i = 0; i < NumElems; ++i) {
2413 SDValue Idx = DAG.getVectorIdxConstant(i, dl);
2414
2415 // The Chain is the first operand.
2416 Opers.push_back(Chain);
2417
2418 // Now process the remaining operands.
2419 for (unsigned j = 1; j < NumOpers; ++j) {
2420 SDValue Oper = Node->getOperand(j);
2421 EVT OperVT = Oper.getValueType();
2422
2423 if (OperVT.isVector())
2424 Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
2425 OperVT.getVectorElementType(), Oper, Idx);
2426
2427 Opers.push_back(Oper);
2428 }
2429
2430 SDValue ScalarOp = DAG.getNode(Node->getOpcode(), dl, ValueVTs, Opers);
2431 SDValue ScalarResult = ScalarOp.getValue(0);
2432 SDValue ScalarChain = ScalarOp.getValue(1);
2433
2434 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
2435 Node->getOpcode() == ISD::STRICT_FSETCCS)
2436 ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult,
2437 DAG.getAllOnesConstant(dl, EltVT),
2438 DAG.getConstant(0, dl, EltVT));
2439
2440 OpValues.push_back(ScalarResult);
2441 OpChains.push_back(ScalarChain);
2442 }
2443
2444 SDValue Result = DAG.getBuildVector(VT, dl, OpValues);
2445 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains);
2446
2447 Results.push_back(Result);
2448 Results.push_back(NewChain);
2449}
2450
2451SDValue VectorLegalizer::UnrollVSETCC(SDNode *Node) {
2452 EVT VT = Node->getValueType(0);
2453 unsigned NumElems = VT.getVectorNumElements();
2454 EVT EltVT = VT.getVectorElementType();
2455 SDValue LHS = Node->getOperand(0);
2456 SDValue RHS = Node->getOperand(1);
2457 SDValue CC = Node->getOperand(2);
2458 EVT TmpEltVT = LHS.getValueType().getVectorElementType();
2459 SDLoc dl(Node);
2460 SmallVector<SDValue, 8> Ops(NumElems);
2461 for (unsigned i = 0; i < NumElems; ++i) {
2462 SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
2463 DAG.getVectorIdxConstant(i, dl));
2464 SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
2465 DAG.getVectorIdxConstant(i, dl));
2466 // FIXME: We should use i1 setcc + boolext here, but it causes regressions.
2467 Ops[i] = DAG.getNode(ISD::SETCC, dl,
2469 *DAG.getContext(), TmpEltVT),
2470 LHSElem, RHSElem, CC);
2471 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
2472 DAG.getBoolConstant(true, dl, EltVT, VT),
2473 DAG.getConstant(0, dl, EltVT));
2474 }
2475 return DAG.getBuildVector(VT, dl, Ops);
2476}
2477
2479 return VectorLegalizer(*this).Run();
2480}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the DenseMap class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl< int > &ShuffleMask)
#define I(x, y, z)
Definition MD5.cpp:57
#define T
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
bool isBigEndian() const
Definition DataLayout.h:216
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
size_t size() const
Definition Function.h:858
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
const Triple & getTargetTriple() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition ArrayRef.h:298
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI bool LegalizeVectors()
This transforms the SelectionDAG into a SelectionDAG that only uses vector math operations supported ...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
allnodes_const_iterator allnodes_end() const
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
ilist< SDNode >::iterator allnodes_iterator
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void resize(size_type N)
void push_back(const T &Elt)
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
bool isStrictFPEnabled() const
Return true if the target support strict float operation.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeAction getPartialReduceMLAAction(unsigned Opc, EVT AccVT, EVT InputVT) const
Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treated.
LegalizeAction getLoadAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
const RTLIB::RuntimeLibcallsInfo & getRuntimeLibcallsInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
bool expandMultipleResultFPLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={}) const
Expands a node with multiple results to an FP or vector libcall.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandFCANONICALIZE(SDNode *Node, SelectionDAG &DAG) const
Expand FCANONICALIZE to FMUL with 1.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
SDValue expandCLMUL(SDNode *N, SelectionDAG &DAG) const
Expand carryless multiply.
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue expandCttzElts(SDNode *Node, SelectionDAG &DAG) const
Expand a CTTZ_ELTS or CTTZ_ELTS_ZERO_POISON by calculating (VL - i) for each active lane (i),...
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const
Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, consisting of zext/sext,...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:261
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition ISDOpcodes.h:511
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:400
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ CTTZ_ELTS
Returns the number of number of trailing (least significant) zero elements in a vector.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:880
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition ISDOpcodes.h:910
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
Definition ISDOpcodes.h:515
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:774
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition ISDOpcodes.h:407
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
Definition ISDOpcodes.h:438
@ CONVERT_FROM_ARBITRARY_FP
CONVERT_FROM_ARBITRARY_FP - This operator converts from an arbitrary floating-point represented as an...
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition ISDOpcodes.h:715
@ STRICT_UINT_TO_FP
Definition ISDOpcodes.h:485
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:787
@ PARTIAL_REDUCE_FMLA
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
Definition ISDOpcodes.h:691
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:765
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition ISDOpcodes.h:899
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:413
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:978
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:805
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
Definition ISDOpcodes.h:484
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:478
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition ISDOpcodes.h:500
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:477
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:926
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:505
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition ISDOpcodes.h:710
@ STRICT_FADD
Constrained versions of the binary floating point operators.
Definition ISDOpcodes.h:427
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:959
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition ISDOpcodes.h:699
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition ISDOpcodes.h:921
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:945
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ VECREDUCE_SEQ_FMUL
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ PARTIAL_REDUCE_SUMLA
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ CTTZ_ELTS_ZERO_POISON
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition ISDOpcodes.h:722
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
LLVM_ABI NodeType getUnmaskedBinOpOpcode(unsigned MaskedOpc)
Given a MaskedOpc of ISD::MASKED_(U|S)(DIV|REM), returns the unmasked ISD::(U|S)(DIV|REM).
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI Libcall getREM(EVT VT)
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getCBRT(EVT RetVT)
getCBRT - Return the CBRT_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getPOW(EVT RetVT)
getPOW - Return the POW_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
@ Xor
Bitwise or logical XOR of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
#define N
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:90
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition ValueTypes.h:98
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
bool isScalableVT() const
Return true if the type is a scalable type.
Definition ValueTypes.h:195
bool isFixedLengthVector() const
Definition ValueTypes.h:189
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:182
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
Definition ValueTypes.h:316
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall.
std::pair< FunctionType *, AttributeList > getFunctionTy(LLVMContext &Ctx, const Triple &TT, const DataLayout &DL, RTLIB::LibcallImpl LibcallImpl) const
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.