LLVM 19.0.0git
LegalizeDAG.cpp
Go to the documentation of this file.
1//===- LegalizeDAG.cpp - Implement SelectionDAG::Legalize -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the SelectionDAG::Legalize method.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
15#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/SetVector.h"
19#include "llvm/ADT/SmallSet.h"
36#include "llvm/IR/CallingConv.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/Metadata.h"
42#include "llvm/IR/Type.h"
45#include "llvm/Support/Debug.h"
51#include <cassert>
52#include <cstdint>
53#include <tuple>
54#include <utility>
55
56using namespace llvm;
57
58#define DEBUG_TYPE "legalizedag"
59
60namespace {
61
62/// Keeps track of state when getting the sign of a floating-point value as an
63/// integer.
64struct FloatSignAsInt {
65 EVT FloatVT;
66 SDValue Chain;
67 SDValue FloatPtr;
68 SDValue IntPtr;
69 MachinePointerInfo IntPointerInfo;
70 MachinePointerInfo FloatPointerInfo;
71 SDValue IntValue;
72 APInt SignMask;
73 uint8_t SignBit;
74};
75
76//===----------------------------------------------------------------------===//
77/// This takes an arbitrary SelectionDAG as input and
78/// hacks on it until the target machine can handle it. This involves
79/// eliminating value sizes the machine cannot handle (promoting small sizes to
80/// large sizes or splitting up large values into small values) as well as
81/// eliminating operations the machine cannot handle.
82///
83/// This code also does a small amount of optimization and recognition of idioms
84/// as part of its processing. For example, if a target does not support a
85/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
86/// will attempt merge setcc and brc instructions into brcc's.
87class SelectionDAGLegalize {
88 const TargetMachine &TM;
89 const TargetLowering &TLI;
90 SelectionDAG &DAG;
91
92 /// The set of nodes which have already been legalized. We hold a
93 /// reference to it in order to update as necessary on node deletion.
94 SmallPtrSetImpl<SDNode *> &LegalizedNodes;
95
96 /// A set of all the nodes updated during legalization.
97 SmallSetVector<SDNode *, 16> *UpdatedNodes;
98
99 EVT getSetCCResultType(EVT VT) const {
100 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
101 }
102
103 // Libcall insertion helpers.
104
105public:
106 SelectionDAGLegalize(SelectionDAG &DAG,
107 SmallPtrSetImpl<SDNode *> &LegalizedNodes,
108 SmallSetVector<SDNode *, 16> *UpdatedNodes = nullptr)
109 : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG),
110 LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {}
111
112 /// Legalizes the given operation.
113 void LegalizeOp(SDNode *Node);
114
115private:
116 SDValue OptimizeFloatStore(StoreSDNode *ST);
117
118 void LegalizeLoadOps(SDNode *Node);
119 void LegalizeStoreOps(SDNode *Node);
120
121 SDValue ExpandINSERT_VECTOR_ELT(SDValue Op);
122
123 /// Return a vector shuffle operation which
124 /// performs the same shuffe in terms of order or result bytes, but on a type
125 /// whose vector element type is narrower than the original shuffle type.
126 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
127 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, const SDLoc &dl,
128 SDValue N1, SDValue N2,
129 ArrayRef<int> Mask) const;
130
131 std::pair<SDValue, SDValue> ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
133 std::pair<SDValue, SDValue> ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
134
135 void ExpandFrexpLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
136 void ExpandFPLibCall(SDNode *Node, RTLIB::Libcall LC,
138 void ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
139 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
140 RTLIB::Libcall Call_F128,
141 RTLIB::Libcall Call_PPCF128,
143 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned,
144 RTLIB::Libcall Call_I8,
145 RTLIB::Libcall Call_I16,
146 RTLIB::Libcall Call_I32,
147 RTLIB::Libcall Call_I64,
148 RTLIB::Libcall Call_I128);
149 void ExpandArgFPLibCall(SDNode *Node,
150 RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64,
151 RTLIB::Libcall Call_F80, RTLIB::Libcall Call_F128,
152 RTLIB::Libcall Call_PPCF128,
154 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
155 void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results);
156
157 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT,
158 const SDLoc &dl);
159 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT,
160 const SDLoc &dl, SDValue ChainIn);
161 SDValue ExpandBUILD_VECTOR(SDNode *Node);
162 SDValue ExpandSPLAT_VECTOR(SDNode *Node);
163 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
164 void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
166 void getSignAsIntValue(FloatSignAsInt &State, const SDLoc &DL,
167 SDValue Value) const;
168 SDValue modifySignAsInt(const FloatSignAsInt &State, const SDLoc &DL,
169 SDValue NewIntValue) const;
170 SDValue ExpandFCOPYSIGN(SDNode *Node) const;
171 SDValue ExpandFABS(SDNode *Node) const;
172 SDValue ExpandFNEG(SDNode *Node) const;
173 SDValue expandLdexp(SDNode *Node) const;
174 SDValue expandFrexp(SDNode *Node) const;
175
176 SDValue ExpandLegalINT_TO_FP(SDNode *Node, SDValue &Chain);
177 void PromoteLegalINT_TO_FP(SDNode *N, const SDLoc &dl,
179 void PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl,
181 SDValue PromoteLegalFP_TO_INT_SAT(SDNode *Node, const SDLoc &dl);
182
183 /// Implements vector reduce operation promotion.
184 ///
185 /// All vector operands are promoted to a vector type with larger element
186 /// type, and the start value is promoted to a larger scalar type. Then the
187 /// result is truncated back to the original scalar type.
188 SDValue PromoteReduction(SDNode *Node);
189
190 SDValue ExpandPARITY(SDValue Op, const SDLoc &dl);
191
192 SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
193 SDValue ExpandInsertToVectorThroughStack(SDValue Op);
194 SDValue ExpandVectorBuildThroughStack(SDNode* Node);
195
196 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP);
197 SDValue ExpandConstant(ConstantSDNode *CP);
198
199 // if ExpandNode returns false, LegalizeOp falls back to ConvertNodeToLibcall
200 bool ExpandNode(SDNode *Node);
201 void ConvertNodeToLibcall(SDNode *Node);
202 void PromoteNode(SDNode *Node);
203
204public:
205 // Node replacement helpers
206
207 void ReplacedNode(SDNode *N) {
208 LegalizedNodes.erase(N);
209 if (UpdatedNodes)
210 UpdatedNodes->insert(N);
211 }
212
213 void ReplaceNode(SDNode *Old, SDNode *New) {
214 LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
215 dbgs() << " with: "; New->dump(&DAG));
216
217 assert(Old->getNumValues() == New->getNumValues() &&
218 "Replacing one node with another that produces a different number "
219 "of values!");
220 DAG.ReplaceAllUsesWith(Old, New);
221 if (UpdatedNodes)
222 UpdatedNodes->insert(New);
223 ReplacedNode(Old);
224 }
225
226 void ReplaceNode(SDValue Old, SDValue New) {
227 LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
228 dbgs() << " with: "; New->dump(&DAG));
229
230 DAG.ReplaceAllUsesWith(Old, New);
231 if (UpdatedNodes)
232 UpdatedNodes->insert(New.getNode());
233 ReplacedNode(Old.getNode());
234 }
235
236 void ReplaceNode(SDNode *Old, const SDValue *New) {
237 LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG));
238
239 DAG.ReplaceAllUsesWith(Old, New);
240 for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) {
241 LLVM_DEBUG(dbgs() << (i == 0 ? " with: " : " and: ");
242 New[i]->dump(&DAG));
243 if (UpdatedNodes)
244 UpdatedNodes->insert(New[i].getNode());
245 }
246 ReplacedNode(Old);
247 }
248
249 void ReplaceNodeWithValue(SDValue Old, SDValue New) {
250 LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
251 dbgs() << " with: "; New->dump(&DAG));
252
253 DAG.ReplaceAllUsesOfValueWith(Old, New);
254 if (UpdatedNodes)
255 UpdatedNodes->insert(New.getNode());
256 ReplacedNode(Old.getNode());
257 }
258};
259
260} // end anonymous namespace
261
262// Helper function that generates an MMO that considers the alignment of the
263// stack, and the size of the stack object
265 MachineFunction &MF,
266 bool isObjectScalable) {
267 auto &MFI = MF.getFrameInfo();
268 int FI = cast<FrameIndexSDNode>(StackPtr)->getIndex();
270 LocationSize ObjectSize = isObjectScalable
272 : LocationSize::precise(MFI.getObjectSize(FI));
274 ObjectSize, MFI.getObjectAlign(FI));
275}
276
277/// Return a vector shuffle operation which
278/// performs the same shuffle in terms of order or result bytes, but on a type
279/// whose vector element type is narrower than the original shuffle type.
280/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
281SDValue SelectionDAGLegalize::ShuffleWithNarrowerEltType(
282 EVT NVT, EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
283 ArrayRef<int> Mask) const {
284 unsigned NumMaskElts = VT.getVectorNumElements();
285 unsigned NumDestElts = NVT.getVectorNumElements();
286 unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
287
288 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!");
289
290 if (NumEltsGrowth == 1)
291 return DAG.getVectorShuffle(NVT, dl, N1, N2, Mask);
292
293 SmallVector<int, 8> NewMask;
294 for (unsigned i = 0; i != NumMaskElts; ++i) {
295 int Idx = Mask[i];
296 for (unsigned j = 0; j != NumEltsGrowth; ++j) {
297 if (Idx < 0)
298 NewMask.push_back(-1);
299 else
300 NewMask.push_back(Idx * NumEltsGrowth + j);
301 }
302 }
303 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?");
304 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?");
305 return DAG.getVectorShuffle(NVT, dl, N1, N2, NewMask);
306}
307
308/// Expands the ConstantFP node to an integer constant or
309/// a load from the constant pool.
311SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
312 bool Extend = false;
313 SDLoc dl(CFP);
314
315 // If a FP immediate is precise when represented as a float and if the
316 // target can do an extending load from float to double, we put it into
317 // the constant pool as a float, even if it's is statically typed as a
318 // double. This shrinks FP constants and canonicalizes them for targets where
319 // an FP extending load is the same cost as a normal load (such as on the x87
320 // fp stack or PPC FP unit).
321 EVT VT = CFP->getValueType(0);
322 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
323 if (!UseCP) {
324 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
325 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), dl,
326 (VT == MVT::f64) ? MVT::i64 : MVT::i32);
327 }
328
329 APFloat APF = CFP->getValueAPF();
330 EVT OrigVT = VT;
331 EVT SVT = VT;
332
333 // We don't want to shrink SNaNs. Converting the SNaN back to its real type
334 // can cause it to be changed into a QNaN on some platforms (e.g. on SystemZ).
335 if (!APF.isSignaling()) {
336 while (SVT != MVT::f32 && SVT != MVT::f16 && SVT != MVT::bf16) {
337 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
339 // Only do this if the target has a native EXTLOAD instruction from
340 // smaller type.
341 TLI.isLoadExtLegal(ISD::EXTLOAD, OrigVT, SVT) &&
342 TLI.ShouldShrinkFPConstant(OrigVT)) {
343 Type *SType = SVT.getTypeForEVT(*DAG.getContext());
344 LLVMC = cast<ConstantFP>(ConstantFoldCastOperand(
345 Instruction::FPTrunc, LLVMC, SType, DAG.getDataLayout()));
346 VT = SVT;
347 Extend = true;
348 }
349 }
350 }
351
352 SDValue CPIdx =
353 DAG.getConstantPool(LLVMC, TLI.getPointerTy(DAG.getDataLayout()));
354 Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign();
355 if (Extend) {
356 SDValue Result = DAG.getExtLoad(
357 ISD::EXTLOAD, dl, OrigVT, DAG.getEntryNode(), CPIdx,
358 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), VT,
359 Alignment);
360 return Result;
361 }
362 SDValue Result = DAG.getLoad(
363 OrigVT, dl, DAG.getEntryNode(), CPIdx,
364 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment);
365 return Result;
366}
367
368/// Expands the Constant node to a load from the constant pool.
369SDValue SelectionDAGLegalize::ExpandConstant(ConstantSDNode *CP) {
370 SDLoc dl(CP);
371 EVT VT = CP->getValueType(0);
372 SDValue CPIdx = DAG.getConstantPool(CP->getConstantIntValue(),
373 TLI.getPointerTy(DAG.getDataLayout()));
374 Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign();
375 SDValue Result = DAG.getLoad(
376 VT, dl, DAG.getEntryNode(), CPIdx,
377 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment);
378 return Result;
379}
380
381SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Op) {
382 SDValue Vec = Op.getOperand(0);
383 SDValue Val = Op.getOperand(1);
384 SDValue Idx = Op.getOperand(2);
385 SDLoc dl(Op);
386
387 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
388 // SCALAR_TO_VECTOR requires that the type of the value being inserted
389 // match the element type of the vector being created, except for
390 // integers in which case the inserted value can be over width.
391 EVT EltVT = Vec.getValueType().getVectorElementType();
392 if (Val.getValueType() == EltVT ||
393 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) {
394 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
395 Vec.getValueType(), Val);
396
397 unsigned NumElts = Vec.getValueType().getVectorNumElements();
398 // We generate a shuffle of InVec and ScVec, so the shuffle mask
399 // should be 0,1,2,3,4,5... with the appropriate element replaced with
400 // elt 0 of the RHS.
401 SmallVector<int, 8> ShufOps;
402 for (unsigned i = 0; i != NumElts; ++i)
403 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts);
404
405 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, ShufOps);
406 }
407 }
408 return ExpandInsertToVectorThroughStack(Op);
409}
410
411SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
412 if (!ISD::isNormalStore(ST))
413 return SDValue();
414
415 LLVM_DEBUG(dbgs() << "Optimizing float store operations\n");
416 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
417 // FIXME: move this to the DAG Combiner! Note that we can't regress due
418 // to phase ordering between legalized code and the dag combiner. This
419 // probably means that we need to integrate dag combiner and legalizer
420 // together.
421 // We generally can't do this one for long doubles.
422 SDValue Chain = ST->getChain();
423 SDValue Ptr = ST->getBasePtr();
424 SDValue Value = ST->getValue();
425 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
426 AAMDNodes AAInfo = ST->getAAInfo();
427 SDLoc dl(ST);
428
429 // Don't optimise TargetConstantFP
430 if (Value.getOpcode() == ISD::TargetConstantFP)
431 return SDValue();
432
433 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
434 if (CFP->getValueType(0) == MVT::f32 &&
435 TLI.isTypeLegal(MVT::i32)) {
436 SDValue Con = DAG.getConstant(CFP->getValueAPF().
437 bitcastToAPInt().zextOrTrunc(32),
438 SDLoc(CFP), MVT::i32);
439 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
440 ST->getOriginalAlign(), MMOFlags, AAInfo);
441 }
442
443 if (CFP->getValueType(0) == MVT::f64 &&
444 !TLI.isFPImmLegal(CFP->getValueAPF(), MVT::f64)) {
445 // If this target supports 64-bit registers, do a single 64-bit store.
446 if (TLI.isTypeLegal(MVT::i64)) {
447 SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
448 zextOrTrunc(64), SDLoc(CFP), MVT::i64);
449 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(),
450 ST->getOriginalAlign(), MMOFlags, AAInfo);
451 }
452
453 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) {
454 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
455 // stores. If the target supports neither 32- nor 64-bits, this
456 // xform is certainly not worth it.
457 const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt();
458 SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32);
459 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32);
460 if (DAG.getDataLayout().isBigEndian())
461 std::swap(Lo, Hi);
462
463 Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(),
464 ST->getOriginalAlign(), MMOFlags, AAInfo);
465 Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(4), dl);
466 Hi = DAG.getStore(Chain, dl, Hi, Ptr,
467 ST->getPointerInfo().getWithOffset(4),
468 ST->getOriginalAlign(), MMOFlags, AAInfo);
469
470 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
471 }
472 }
473 }
474 return SDValue();
475}
476
477void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
478 StoreSDNode *ST = cast<StoreSDNode>(Node);
479 SDValue Chain = ST->getChain();
480 SDValue Ptr = ST->getBasePtr();
481 SDLoc dl(Node);
482
483 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
484 AAMDNodes AAInfo = ST->getAAInfo();
485
486 if (!ST->isTruncatingStore()) {
487 LLVM_DEBUG(dbgs() << "Legalizing store operation\n");
488 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
489 ReplaceNode(ST, OptStore);
490 return;
491 }
492
493 SDValue Value = ST->getValue();
494 MVT VT = Value.getSimpleValueType();
495 switch (TLI.getOperationAction(ISD::STORE, VT)) {
496 default: llvm_unreachable("This action is not supported yet!");
497 case TargetLowering::Legal: {
498 // If this is an unaligned store and the target doesn't support it,
499 // expand it.
500 EVT MemVT = ST->getMemoryVT();
501 const DataLayout &DL = DAG.getDataLayout();
502 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
503 *ST->getMemOperand())) {
504 LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
505 SDValue Result = TLI.expandUnalignedStore(ST, DAG);
506 ReplaceNode(SDValue(ST, 0), Result);
507 } else
508 LLVM_DEBUG(dbgs() << "Legal store\n");
509 break;
510 }
511 case TargetLowering::Custom: {
512 LLVM_DEBUG(dbgs() << "Trying custom lowering\n");
513 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
514 if (Res && Res != SDValue(Node, 0))
515 ReplaceNode(SDValue(Node, 0), Res);
516 return;
517 }
518 case TargetLowering::Promote: {
519 MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT);
520 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
521 "Can only promote stores to same size type");
522 Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value);
523 SDValue Result = DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
524 ST->getOriginalAlign(), MMOFlags, AAInfo);
525 ReplaceNode(SDValue(Node, 0), Result);
526 break;
527 }
528 }
529 return;
530 }
531
532 LLVM_DEBUG(dbgs() << "Legalizing truncating store operations\n");
533 SDValue Value = ST->getValue();
534 EVT StVT = ST->getMemoryVT();
535 TypeSize StWidth = StVT.getSizeInBits();
536 TypeSize StSize = StVT.getStoreSizeInBits();
537 auto &DL = DAG.getDataLayout();
538
539 if (StWidth != StSize) {
540 // Promote to a byte-sized store with upper bits zero if not
541 // storing an integral number of bytes. For example, promote
542 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
543 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StSize.getFixedValue());
544 Value = DAG.getZeroExtendInReg(Value, dl, StVT);
546 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), NVT,
547 ST->getOriginalAlign(), MMOFlags, AAInfo);
548 ReplaceNode(SDValue(Node, 0), Result);
549 } else if (!StVT.isVector() && !isPowerOf2_64(StWidth.getFixedValue())) {
550 // If not storing a power-of-2 number of bits, expand as two stores.
551 assert(!StVT.isVector() && "Unsupported truncstore!");
552 unsigned StWidthBits = StWidth.getFixedValue();
553 unsigned LogStWidth = Log2_32(StWidthBits);
554 assert(LogStWidth < 32);
555 unsigned RoundWidth = 1 << LogStWidth;
556 assert(RoundWidth < StWidthBits);
557 unsigned ExtraWidth = StWidthBits - RoundWidth;
558 assert(ExtraWidth < RoundWidth);
559 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
560 "Store size not an integral number of bytes!");
561 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
562 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
563 SDValue Lo, Hi;
564 unsigned IncrementSize;
565
566 if (DL.isLittleEndian()) {
567 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
568 // Store the bottom RoundWidth bits.
569 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
570 RoundVT, ST->getOriginalAlign(), MMOFlags, AAInfo);
571
572 // Store the remaining ExtraWidth bits.
573 IncrementSize = RoundWidth / 8;
574 Ptr =
575 DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
576 Hi = DAG.getNode(
577 ISD::SRL, dl, Value.getValueType(), Value,
578 DAG.getConstant(RoundWidth, dl,
579 TLI.getShiftAmountTy(Value.getValueType(), DL)));
580 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
581 ST->getPointerInfo().getWithOffset(IncrementSize),
582 ExtraVT, ST->getOriginalAlign(), MMOFlags, AAInfo);
583 } else {
584 // Big endian - avoid unaligned stores.
585 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
586 // Store the top RoundWidth bits.
587 Hi = DAG.getNode(
588 ISD::SRL, dl, Value.getValueType(), Value,
589 DAG.getConstant(ExtraWidth, dl,
590 TLI.getShiftAmountTy(Value.getValueType(), DL)));
591 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(), RoundVT,
592 ST->getOriginalAlign(), MMOFlags, AAInfo);
593
594 // Store the remaining ExtraWidth bits.
595 IncrementSize = RoundWidth / 8;
596 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
597 DAG.getConstant(IncrementSize, dl,
598 Ptr.getValueType()));
599 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
600 ST->getPointerInfo().getWithOffset(IncrementSize),
601 ExtraVT, ST->getOriginalAlign(), MMOFlags, AAInfo);
602 }
603
604 // The order of the stores doesn't matter.
605 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
606 ReplaceNode(SDValue(Node, 0), Result);
607 } else {
608 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
609 default: llvm_unreachable("This action is not supported yet!");
610 case TargetLowering::Legal: {
611 EVT MemVT = ST->getMemoryVT();
612 // If this is an unaligned store and the target doesn't support it,
613 // expand it.
614 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
615 *ST->getMemOperand())) {
616 SDValue Result = TLI.expandUnalignedStore(ST, DAG);
617 ReplaceNode(SDValue(ST, 0), Result);
618 }
619 break;
620 }
621 case TargetLowering::Custom: {
622 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
623 if (Res && Res != SDValue(Node, 0))
624 ReplaceNode(SDValue(Node, 0), Res);
625 return;
626 }
627 case TargetLowering::Expand:
628 assert(!StVT.isVector() &&
629 "Vector Stores are handled in LegalizeVectorOps");
630
632
633 // TRUNCSTORE:i16 i32 -> STORE i16
634 if (TLI.isTypeLegal(StVT)) {
635 Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value);
636 Result = DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
637 ST->getOriginalAlign(), MMOFlags, AAInfo);
638 } else {
639 // The in-memory type isn't legal. Truncate to the type it would promote
640 // to, and then do a truncstore.
641 Value = DAG.getNode(ISD::TRUNCATE, dl,
642 TLI.getTypeToTransformTo(*DAG.getContext(), StVT),
643 Value);
644 Result =
645 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), StVT,
646 ST->getOriginalAlign(), MMOFlags, AAInfo);
647 }
648
649 ReplaceNode(SDValue(Node, 0), Result);
650 break;
651 }
652 }
653}
654
655void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
656 LoadSDNode *LD = cast<LoadSDNode>(Node);
657 SDValue Chain = LD->getChain(); // The chain.
658 SDValue Ptr = LD->getBasePtr(); // The base pointer.
659 SDValue Value; // The value returned by the load op.
660 SDLoc dl(Node);
661
662 ISD::LoadExtType ExtType = LD->getExtensionType();
663 if (ExtType == ISD::NON_EXTLOAD) {
664 LLVM_DEBUG(dbgs() << "Legalizing non-extending load operation\n");
665 MVT VT = Node->getSimpleValueType(0);
666 SDValue RVal = SDValue(Node, 0);
667 SDValue RChain = SDValue(Node, 1);
668
669 switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
670 default: llvm_unreachable("This action is not supported yet!");
671 case TargetLowering::Legal: {
672 EVT MemVT = LD->getMemoryVT();
673 const DataLayout &DL = DAG.getDataLayout();
674 // If this is an unaligned load and the target doesn't support it,
675 // expand it.
676 if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
677 *LD->getMemOperand())) {
678 std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
679 }
680 break;
681 }
682 case TargetLowering::Custom:
683 if (SDValue Res = TLI.LowerOperation(RVal, DAG)) {
684 RVal = Res;
685 RChain = Res.getValue(1);
686 }
687 break;
688
689 case TargetLowering::Promote: {
690 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
691 assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
692 "Can only promote loads to same size type");
693
694 SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand());
695 RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
696 RChain = Res.getValue(1);
697 break;
698 }
699 }
700 if (RChain.getNode() != Node) {
701 assert(RVal.getNode() != Node && "Load must be completely replaced");
702 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal);
703 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain);
704 if (UpdatedNodes) {
705 UpdatedNodes->insert(RVal.getNode());
706 UpdatedNodes->insert(RChain.getNode());
707 }
708 ReplacedNode(Node);
709 }
710 return;
711 }
712
713 LLVM_DEBUG(dbgs() << "Legalizing extending load operation\n");
714 EVT SrcVT = LD->getMemoryVT();
715 TypeSize SrcWidth = SrcVT.getSizeInBits();
716 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
717 AAMDNodes AAInfo = LD->getAAInfo();
718
719 if (SrcWidth != SrcVT.getStoreSizeInBits() &&
720 // Some targets pretend to have an i1 loading operation, and actually
721 // load an i8. This trick is correct for ZEXTLOAD because the top 7
722 // bits are guaranteed to be zero; it helps the optimizers understand
723 // that these bits are zero. It is also useful for EXTLOAD, since it
724 // tells the optimizers that those bits are undefined. It would be
725 // nice to have an effective generic way of getting these benefits...
726 // Until such a way is found, don't insist on promoting i1 here.
727 (SrcVT != MVT::i1 ||
728 TLI.getLoadExtAction(ExtType, Node->getValueType(0), MVT::i1) ==
729 TargetLowering::Promote)) {
730 // Promote to a byte-sized load if not loading an integral number of
731 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
732 unsigned NewWidth = SrcVT.getStoreSizeInBits();
733 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
734 SDValue Ch;
735
736 // The extra bits are guaranteed to be zero, since we stored them that
737 // way. A zext load from NVT thus automatically gives zext from SrcVT.
738
739 ISD::LoadExtType NewExtType =
741
742 SDValue Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
743 Chain, Ptr, LD->getPointerInfo(), NVT,
744 LD->getOriginalAlign(), MMOFlags, AAInfo);
745
746 Ch = Result.getValue(1); // The chain.
747
748 if (ExtType == ISD::SEXTLOAD)
749 // Having the top bits zero doesn't help when sign extending.
750 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
751 Result.getValueType(),
752 Result, DAG.getValueType(SrcVT));
753 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
754 // All the top bits are guaranteed to be zero - inform the optimizers.
755 Result = DAG.getNode(ISD::AssertZext, dl,
756 Result.getValueType(), Result,
757 DAG.getValueType(SrcVT));
758
759 Value = Result;
760 Chain = Ch;
761 } else if (!isPowerOf2_64(SrcWidth.getKnownMinValue())) {
762 // If not loading a power-of-2 number of bits, expand as two loads.
763 assert(!SrcVT.isVector() && "Unsupported extload!");
764 unsigned SrcWidthBits = SrcWidth.getFixedValue();
765 unsigned LogSrcWidth = Log2_32(SrcWidthBits);
766 assert(LogSrcWidth < 32);
767 unsigned RoundWidth = 1 << LogSrcWidth;
768 assert(RoundWidth < SrcWidthBits);
769 unsigned ExtraWidth = SrcWidthBits - RoundWidth;
770 assert(ExtraWidth < RoundWidth);
771 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
772 "Load size not an integral number of bytes!");
773 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
774 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
775 SDValue Lo, Hi, Ch;
776 unsigned IncrementSize;
777 auto &DL = DAG.getDataLayout();
778
779 if (DL.isLittleEndian()) {
780 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
781 // Load the bottom RoundWidth bits.
782 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr,
783 LD->getPointerInfo(), RoundVT, LD->getOriginalAlign(),
784 MMOFlags, AAInfo);
785
786 // Load the remaining ExtraWidth bits.
787 IncrementSize = RoundWidth / 8;
788 Ptr =
789 DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
790 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
791 LD->getPointerInfo().getWithOffset(IncrementSize),
792 ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
793
794 // Build a factor node to remember that this load is independent of
795 // the other one.
796 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
797 Hi.getValue(1));
798
799 // Move the top bits to the right place.
800 Hi = DAG.getNode(
801 ISD::SHL, dl, Hi.getValueType(), Hi,
802 DAG.getConstant(RoundWidth, dl,
803 TLI.getShiftAmountTy(Hi.getValueType(), DL)));
804
805 // Join the hi and lo parts.
806 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
807 } else {
808 // Big endian - avoid unaligned loads.
809 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
810 // Load the top RoundWidth bits.
811 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
812 LD->getPointerInfo(), RoundVT, LD->getOriginalAlign(),
813 MMOFlags, AAInfo);
814
815 // Load the remaining ExtraWidth bits.
816 IncrementSize = RoundWidth / 8;
817 Ptr =
818 DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
819 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr,
820 LD->getPointerInfo().getWithOffset(IncrementSize),
821 ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
822
823 // Build a factor node to remember that this load is independent of
824 // the other one.
825 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
826 Hi.getValue(1));
827
828 // Move the top bits to the right place.
829 Hi = DAG.getNode(
830 ISD::SHL, dl, Hi.getValueType(), Hi,
831 DAG.getConstant(ExtraWidth, dl,
832 TLI.getShiftAmountTy(Hi.getValueType(), DL)));
833
834 // Join the hi and lo parts.
835 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
836 }
837
838 Chain = Ch;
839 } else {
840 bool isCustom = false;
841 switch (TLI.getLoadExtAction(ExtType, Node->getValueType(0),
842 SrcVT.getSimpleVT())) {
843 default: llvm_unreachable("This action is not supported yet!");
844 case TargetLowering::Custom:
845 isCustom = true;
846 [[fallthrough]];
847 case TargetLowering::Legal:
848 Value = SDValue(Node, 0);
849 Chain = SDValue(Node, 1);
850
851 if (isCustom) {
852 if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) {
853 Value = Res;
854 Chain = Res.getValue(1);
855 }
856 } else {
857 // If this is an unaligned load and the target doesn't support it,
858 // expand it.
859 EVT MemVT = LD->getMemoryVT();
860 const DataLayout &DL = DAG.getDataLayout();
861 if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
862 *LD->getMemOperand())) {
863 std::tie(Value, Chain) = TLI.expandUnalignedLoad(LD, DAG);
864 }
865 }
866 break;
867
868 case TargetLowering::Expand: {
869 EVT DestVT = Node->getValueType(0);
870 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, DestVT, SrcVT)) {
871 // If the source type is not legal, see if there is a legal extload to
872 // an intermediate type that we can then extend further.
873 EVT LoadVT = TLI.getRegisterType(SrcVT.getSimpleVT());
874 if ((LoadVT.isFloatingPoint() == SrcVT.isFloatingPoint()) &&
875 (TLI.isTypeLegal(SrcVT) || // Same as SrcVT == LoadVT?
876 TLI.isLoadExtLegal(ExtType, LoadVT, SrcVT))) {
877 // If we are loading a legal type, this is a non-extload followed by a
878 // full extend.
879 ISD::LoadExtType MidExtType =
880 (LoadVT == SrcVT) ? ISD::NON_EXTLOAD : ExtType;
881
882 SDValue Load = DAG.getExtLoad(MidExtType, dl, LoadVT, Chain, Ptr,
883 SrcVT, LD->getMemOperand());
884 unsigned ExtendOp =
886 Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
887 Chain = Load.getValue(1);
888 break;
889 }
890
891 // Handle the special case of fp16 extloads. EXTLOAD doesn't have the
892 // normal undefined upper bits behavior to allow using an in-reg extend
893 // with the illegal FP type, so load as an integer and do the
894 // from-integer conversion.
895 EVT SVT = SrcVT.getScalarType();
896 if (SVT == MVT::f16 || SVT == MVT::bf16) {
897 EVT ISrcVT = SrcVT.changeTypeToInteger();
898 EVT IDestVT = DestVT.changeTypeToInteger();
899 EVT ILoadVT = TLI.getRegisterType(IDestVT.getSimpleVT());
900
901 SDValue Result = DAG.getExtLoad(ISD::ZEXTLOAD, dl, ILoadVT, Chain,
902 Ptr, ISrcVT, LD->getMemOperand());
903 Value =
904 DAG.getNode(SVT == MVT::f16 ? ISD::FP16_TO_FP : ISD::BF16_TO_FP,
905 dl, DestVT, Result);
906 Chain = Result.getValue(1);
907 break;
908 }
909 }
910
911 assert(!SrcVT.isVector() &&
912 "Vector Loads are handled in LegalizeVectorOps");
913
914 // FIXME: This does not work for vectors on most targets. Sign-
915 // and zero-extend operations are currently folded into extending
916 // loads, whether they are legal or not, and then we end up here
917 // without any support for legalizing them.
918 assert(ExtType != ISD::EXTLOAD &&
919 "EXTLOAD should always be supported!");
920 // Turn the unsupported load into an EXTLOAD followed by an
921 // explicit zero/sign extend inreg.
922 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl,
923 Node->getValueType(0),
924 Chain, Ptr, SrcVT,
925 LD->getMemOperand());
926 SDValue ValRes;
927 if (ExtType == ISD::SEXTLOAD)
928 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
929 Result.getValueType(),
930 Result, DAG.getValueType(SrcVT));
931 else
932 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT);
933 Value = ValRes;
934 Chain = Result.getValue(1);
935 break;
936 }
937 }
938 }
939
940 // Since loads produce two values, make sure to remember that we legalized
941 // both of them.
942 if (Chain.getNode() != Node) {
943 assert(Value.getNode() != Node && "Load must be completely replaced");
944 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value);
945 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
946 if (UpdatedNodes) {
947 UpdatedNodes->insert(Value.getNode());
948 UpdatedNodes->insert(Chain.getNode());
949 }
950 ReplacedNode(Node);
951 }
952}
953
954/// Return a legal replacement for the given operation, with all legal operands.
955void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
956 LLVM_DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG));
957
958 // Allow illegal target nodes and illegal registers.
959 if (Node->getOpcode() == ISD::TargetConstant ||
960 Node->getOpcode() == ISD::Register)
961 return;
962
963#ifndef NDEBUG
964 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
965 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) ==
966 TargetLowering::TypeLegal &&
967 "Unexpected illegal type!");
968
969 for (const SDValue &Op : Node->op_values())
970 assert((TLI.getTypeAction(*DAG.getContext(), Op.getValueType()) ==
971 TargetLowering::TypeLegal ||
972 Op.getOpcode() == ISD::TargetConstant ||
973 Op.getOpcode() == ISD::Register) &&
974 "Unexpected illegal type!");
975#endif
976
977 // Figure out the correct action; the way to query this varies by opcode
978 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
979 bool SimpleFinishLegalizing = true;
980 switch (Node->getOpcode()) {
984 case ISD::STACKSAVE:
985 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
986 break;
988 Action = TLI.getOperationAction(Node->getOpcode(),
989 Node->getValueType(0));
990 break;
991 case ISD::VAARG:
992 Action = TLI.getOperationAction(Node->getOpcode(),
993 Node->getValueType(0));
994 if (Action != TargetLowering::Promote)
995 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
996 break;
997 case ISD::SET_FPENV:
998 case ISD::SET_FPMODE:
999 Action = TLI.getOperationAction(Node->getOpcode(),
1000 Node->getOperand(1).getValueType());
1001 break;
1002 case ISD::FP_TO_FP16:
1003 case ISD::FP_TO_BF16:
1004 case ISD::SINT_TO_FP:
1005 case ISD::UINT_TO_FP:
1007 case ISD::LROUND:
1008 case ISD::LLROUND:
1009 case ISD::LRINT:
1010 case ISD::LLRINT:
1011 Action = TLI.getOperationAction(Node->getOpcode(),
1012 Node->getOperand(0).getValueType());
1013 break;
1018 case ISD::STRICT_LRINT:
1019 case ISD::STRICT_LLRINT:
1020 case ISD::STRICT_LROUND:
1022 // These pseudo-ops are the same as the other STRICT_ ops except
1023 // they are registered with setOperationAction() using the input type
1024 // instead of the output type.
1025 Action = TLI.getOperationAction(Node->getOpcode(),
1026 Node->getOperand(1).getValueType());
1027 break;
1029 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
1030 Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
1031 break;
1032 }
1033 case ISD::ATOMIC_STORE:
1034 Action = TLI.getOperationAction(Node->getOpcode(),
1035 Node->getOperand(1).getValueType());
1036 break;
1037 case ISD::SELECT_CC:
1038 case ISD::STRICT_FSETCC:
1040 case ISD::SETCC:
1041 case ISD::SETCCCARRY:
1042 case ISD::VP_SETCC:
1043 case ISD::BR_CC: {
1044 unsigned Opc = Node->getOpcode();
1045 unsigned CCOperand = Opc == ISD::SELECT_CC ? 4
1046 : Opc == ISD::STRICT_FSETCC ? 3
1047 : Opc == ISD::STRICT_FSETCCS ? 3
1048 : Opc == ISD::SETCCCARRY ? 3
1049 : (Opc == ISD::SETCC || Opc == ISD::VP_SETCC) ? 2
1050 : 1;
1051 unsigned CompareOperand = Opc == ISD::BR_CC ? 2
1052 : Opc == ISD::STRICT_FSETCC ? 1
1053 : Opc == ISD::STRICT_FSETCCS ? 1
1054 : 0;
1055 MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType();
1056 ISD::CondCode CCCode =
1057 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get();
1058 Action = TLI.getCondCodeAction(CCCode, OpVT);
1059 if (Action == TargetLowering::Legal) {
1060 if (Node->getOpcode() == ISD::SELECT_CC)
1061 Action = TLI.getOperationAction(Node->getOpcode(),
1062 Node->getValueType(0));
1063 else
1064 Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
1065 }
1066 break;
1067 }
1068 case ISD::LOAD:
1069 case ISD::STORE:
1070 // FIXME: Model these properly. LOAD and STORE are complicated, and
1071 // STORE expects the unlegalized operand in some cases.
1072 SimpleFinishLegalizing = false;
1073 break;
1074 case ISD::CALLSEQ_START:
1075 case ISD::CALLSEQ_END:
1076 // FIXME: This shouldn't be necessary. These nodes have special properties
1077 // dealing with the recursive nature of legalization. Removing this
1078 // special case should be done as part of making LegalizeDAG non-recursive.
1079 SimpleFinishLegalizing = false;
1080 break;
1082 case ISD::GET_ROUNDING:
1083 case ISD::MERGE_VALUES:
1084 case ISD::EH_RETURN:
1086 case ISD::EH_DWARF_CFA:
1090 // These operations lie about being legal: when they claim to be legal,
1091 // they should actually be expanded.
1092 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1093 if (Action == TargetLowering::Legal)
1094 Action = TargetLowering::Expand;
1095 break;
1098 case ISD::FRAMEADDR:
1099 case ISD::RETURNADDR:
1101 case ISD::SPONENTRY:
1102 // These operations lie about being legal: when they claim to be legal,
1103 // they should actually be custom-lowered.
1104 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1105 if (Action == TargetLowering::Legal)
1106 Action = TargetLowering::Custom;
1107 break;
1108 case ISD::CLEAR_CACHE:
1109 // This operation is typically going to be LibCall unless the target wants
1110 // something differrent.
1111 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1112 break;
1115 // READCYCLECOUNTER and READSTEADYCOUNTER return a i64, even if type
1116 // legalization might have expanded that to several smaller types.
1117 Action = TLI.getOperationAction(Node->getOpcode(), MVT::i64);
1118 break;
1119 case ISD::READ_REGISTER:
1121 // Named register is legal in the DAG, but blocked by register name
1122 // selection if not implemented by target (to chose the correct register)
1123 // They'll be converted to Copy(To/From)Reg.
1124 Action = TargetLowering::Legal;
1125 break;
1126 case ISD::UBSANTRAP:
1127 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1128 if (Action == TargetLowering::Expand) {
1129 // replace ISD::UBSANTRAP with ISD::TRAP
1130 SDValue NewVal;
1131 NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
1132 Node->getOperand(0));
1133 ReplaceNode(Node, NewVal.getNode());
1134 LegalizeOp(NewVal.getNode());
1135 return;
1136 }
1137 break;
1138 case ISD::DEBUGTRAP:
1139 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1140 if (Action == TargetLowering::Expand) {
1141 // replace ISD::DEBUGTRAP with ISD::TRAP
1142 SDValue NewVal;
1143 NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
1144 Node->getOperand(0));
1145 ReplaceNode(Node, NewVal.getNode());
1146 LegalizeOp(NewVal.getNode());
1147 return;
1148 }
1149 break;
1150 case ISD::SADDSAT:
1151 case ISD::UADDSAT:
1152 case ISD::SSUBSAT:
1153 case ISD::USUBSAT:
1154 case ISD::SSHLSAT:
1155 case ISD::USHLSAT:
1158 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1159 break;
1160 case ISD::SMULFIX:
1161 case ISD::SMULFIXSAT:
1162 case ISD::UMULFIX:
1163 case ISD::UMULFIXSAT:
1164 case ISD::SDIVFIX:
1165 case ISD::SDIVFIXSAT:
1166 case ISD::UDIVFIX:
1167 case ISD::UDIVFIXSAT: {
1168 unsigned Scale = Node->getConstantOperandVal(2);
1169 Action = TLI.getFixedPointOperationAction(Node->getOpcode(),
1170 Node->getValueType(0), Scale);
1171 break;
1172 }
1173 case ISD::MSCATTER:
1174 Action = TLI.getOperationAction(Node->getOpcode(),
1175 cast<MaskedScatterSDNode>(Node)->getValue().getValueType());
1176 break;
1177 case ISD::MSTORE:
1178 Action = TLI.getOperationAction(Node->getOpcode(),
1179 cast<MaskedStoreSDNode>(Node)->getValue().getValueType());
1180 break;
1181 case ISD::VP_SCATTER:
1182 Action = TLI.getOperationAction(
1183 Node->getOpcode(),
1184 cast<VPScatterSDNode>(Node)->getValue().getValueType());
1185 break;
1186 case ISD::VP_STORE:
1187 Action = TLI.getOperationAction(
1188 Node->getOpcode(),
1189 cast<VPStoreSDNode>(Node)->getValue().getValueType());
1190 break;
1191 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
1192 Action = TLI.getOperationAction(
1193 Node->getOpcode(),
1194 cast<VPStridedStoreSDNode>(Node)->getValue().getValueType());
1195 break;
1198 case ISD::VECREDUCE_ADD:
1199 case ISD::VECREDUCE_MUL:
1200 case ISD::VECREDUCE_AND:
1201 case ISD::VECREDUCE_OR:
1202 case ISD::VECREDUCE_XOR:
1211 case ISD::IS_FPCLASS:
1212 Action = TLI.getOperationAction(
1213 Node->getOpcode(), Node->getOperand(0).getValueType());
1214 break;
1217 case ISD::VP_REDUCE_FADD:
1218 case ISD::VP_REDUCE_FMUL:
1219 case ISD::VP_REDUCE_ADD:
1220 case ISD::VP_REDUCE_MUL:
1221 case ISD::VP_REDUCE_AND:
1222 case ISD::VP_REDUCE_OR:
1223 case ISD::VP_REDUCE_XOR:
1224 case ISD::VP_REDUCE_SMAX:
1225 case ISD::VP_REDUCE_SMIN:
1226 case ISD::VP_REDUCE_UMAX:
1227 case ISD::VP_REDUCE_UMIN:
1228 case ISD::VP_REDUCE_FMAX:
1229 case ISD::VP_REDUCE_FMIN:
1230 case ISD::VP_REDUCE_FMAXIMUM:
1231 case ISD::VP_REDUCE_FMINIMUM:
1232 case ISD::VP_REDUCE_SEQ_FADD:
1233 case ISD::VP_REDUCE_SEQ_FMUL:
1234 Action = TLI.getOperationAction(
1235 Node->getOpcode(), Node->getOperand(1).getValueType());
1236 break;
1237 case ISD::VP_CTTZ_ELTS:
1238 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
1239 Action = TLI.getOperationAction(Node->getOpcode(),
1240 Node->getOperand(0).getValueType());
1241 break;
1242 default:
1243 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
1244 Action = TLI.getCustomOperationAction(*Node);
1245 } else {
1246 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
1247 }
1248 break;
1249 }
1250
1251 if (SimpleFinishLegalizing) {
1252 SDNode *NewNode = Node;
1253 switch (Node->getOpcode()) {
1254 default: break;
1255 case ISD::SHL:
1256 case ISD::SRL:
1257 case ISD::SRA:
1258 case ISD::ROTL:
1259 case ISD::ROTR: {
1260 // Legalizing shifts/rotates requires adjusting the shift amount
1261 // to the appropriate width.
1262 SDValue Op0 = Node->getOperand(0);
1263 SDValue Op1 = Node->getOperand(1);
1264 if (!Op1.getValueType().isVector()) {
1265 SDValue SAO = DAG.getShiftAmountOperand(Op0.getValueType(), Op1);
1266 // The getShiftAmountOperand() may create a new operand node or
1267 // return the existing one. If new operand is created we need
1268 // to update the parent node.
1269 // Do not try to legalize SAO here! It will be automatically legalized
1270 // in the next round.
1271 if (SAO != Op1)
1272 NewNode = DAG.UpdateNodeOperands(Node, Op0, SAO);
1273 }
1274 }
1275 break;
1276 case ISD::FSHL:
1277 case ISD::FSHR:
1278 case ISD::SRL_PARTS:
1279 case ISD::SRA_PARTS:
1280 case ISD::SHL_PARTS: {
1281 // Legalizing shifts/rotates requires adjusting the shift amount
1282 // to the appropriate width.
1283 SDValue Op0 = Node->getOperand(0);
1284 SDValue Op1 = Node->getOperand(1);
1285 SDValue Op2 = Node->getOperand(2);
1286 if (!Op2.getValueType().isVector()) {
1287 SDValue SAO = DAG.getShiftAmountOperand(Op0.getValueType(), Op2);
1288 // The getShiftAmountOperand() may create a new operand node or
1289 // return the existing one. If new operand is created we need
1290 // to update the parent node.
1291 if (SAO != Op2)
1292 NewNode = DAG.UpdateNodeOperands(Node, Op0, Op1, SAO);
1293 }
1294 break;
1295 }
1296 }
1297
1298 if (NewNode != Node) {
1299 ReplaceNode(Node, NewNode);
1300 Node = NewNode;
1301 }
1302 switch (Action) {
1303 case TargetLowering::Legal:
1304 LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
1305 return;
1306 case TargetLowering::Custom:
1307 LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
1308 // FIXME: The handling for custom lowering with multiple results is
1309 // a complete mess.
1310 if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) {
1311 if (!(Res.getNode() != Node || Res.getResNo() != 0))
1312 return;
1313
1314 if (Node->getNumValues() == 1) {
1315 // Verify the new types match the original. Glue is waived because
1316 // ISD::ADDC can be legalized by replacing Glue with an integer type.
1317 assert((Res.getValueType() == Node->getValueType(0) ||
1318 Node->getValueType(0) == MVT::Glue) &&
1319 "Type mismatch for custom legalized operation");
1320 LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n");
1321 // We can just directly replace this node with the lowered value.
1322 ReplaceNode(SDValue(Node, 0), Res);
1323 return;
1324 }
1325
1326 SmallVector<SDValue, 8> ResultVals;
1327 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) {
1328 // Verify the new types match the original. Glue is waived because
1329 // ISD::ADDC can be legalized by replacing Glue with an integer type.
1330 assert((Res->getValueType(i) == Node->getValueType(i) ||
1331 Node->getValueType(i) == MVT::Glue) &&
1332 "Type mismatch for custom legalized operation");
1333 ResultVals.push_back(Res.getValue(i));
1334 }
1335 LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n");
1336 ReplaceNode(Node, ResultVals.data());
1337 return;
1338 }
1339 LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
1340 [[fallthrough]];
1341 case TargetLowering::Expand:
1342 if (ExpandNode(Node))
1343 return;
1344 [[fallthrough]];
1345 case TargetLowering::LibCall:
1346 ConvertNodeToLibcall(Node);
1347 return;
1348 case TargetLowering::Promote:
1349 PromoteNode(Node);
1350 return;
1351 }
1352 }
1353
1354 switch (Node->getOpcode()) {
1355 default:
1356#ifndef NDEBUG
1357 dbgs() << "NODE: ";
1358 Node->dump( &DAG);
1359 dbgs() << "\n";
1360#endif
1361 llvm_unreachable("Do not know how to legalize this operator!");
1362
1363 case ISD::CALLSEQ_START:
1364 case ISD::CALLSEQ_END:
1365 break;
1366 case ISD::LOAD:
1367 return LegalizeLoadOps(Node);
1368 case ISD::STORE:
1369 return LegalizeStoreOps(Node);
1370 }
1371}
1372
1373SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
1374 SDValue Vec = Op.getOperand(0);
1375 SDValue Idx = Op.getOperand(1);
1376 SDLoc dl(Op);
1377
1378 // Before we generate a new store to a temporary stack slot, see if there is
1379 // already one that we can use. There often is because when we scalarize
1380 // vector operations (using SelectionDAG::UnrollVectorOp for example) a whole
1381 // series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in
1382 // the vector. If all are expanded here, we don't want one store per vector
1383 // element.
1384
1385 // Caches for hasPredecessorHelper
1388 Visited.insert(Op.getNode());
1389 Worklist.push_back(Idx.getNode());
1390 SDValue StackPtr, Ch;
1391 for (SDNode *User : Vec.getNode()->uses()) {
1392 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) {
1393 if (ST->isIndexed() || ST->isTruncatingStore() ||
1394 ST->getValue() != Vec)
1395 continue;
1396
1397 // Make sure that nothing else could have stored into the destination of
1398 // this store.
1399 if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode()))
1400 continue;
1401
1402 // If the index is dependent on the store we will introduce a cycle when
1403 // creating the load (the load uses the index, and by replacing the chain
1404 // we will make the index dependent on the load). Also, the store might be
1405 // dependent on the extractelement and introduce a cycle when creating
1406 // the load.
1407 if (SDNode::hasPredecessorHelper(ST, Visited, Worklist) ||
1408 ST->hasPredecessor(Op.getNode()))
1409 continue;
1410
1411 StackPtr = ST->getBasePtr();
1412 Ch = SDValue(ST, 0);
1413 break;
1414 }
1415 }
1416
1417 EVT VecVT = Vec.getValueType();
1418
1419 if (!Ch.getNode()) {
1420 // Store the value to a temporary stack slot, then LOAD the returned part.
1421 StackPtr = DAG.CreateStackTemporary(VecVT);
1423 StackPtr, DAG.getMachineFunction(), VecVT.isScalableVector());
1424 Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, StoreMMO);
1425 }
1426
1427 SDValue NewLoad;
1428 Align ElementAlignment =
1429 std::min(cast<StoreSDNode>(Ch)->getAlign(),
1430 DAG.getDataLayout().getPrefTypeAlign(
1431 Op.getValueType().getTypeForEVT(*DAG.getContext())));
1432
1433 if (Op.getValueType().isVector()) {
1434 StackPtr = TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT,
1435 Op.getValueType(), Idx);
1436 NewLoad = DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,
1437 MachinePointerInfo(), ElementAlignment);
1438 } else {
1439 StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
1440 NewLoad = DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
1442 ElementAlignment);
1443 }
1444
1445 // Replace the chain going out of the store, by the one out of the load.
1446 DAG.ReplaceAllUsesOfValueWith(Ch, SDValue(NewLoad.getNode(), 1));
1447
1448 // We introduced a cycle though, so update the loads operands, making sure
1449 // to use the original store's chain as an incoming chain.
1450 SmallVector<SDValue, 6> NewLoadOperands(NewLoad->op_begin(),
1451 NewLoad->op_end());
1452 NewLoadOperands[0] = Ch;
1453 NewLoad =
1454 SDValue(DAG.UpdateNodeOperands(NewLoad.getNode(), NewLoadOperands), 0);
1455 return NewLoad;
1456}
1457
1458SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
1459 assert(Op.getValueType().isVector() && "Non-vector insert subvector!");
1460
1461 SDValue Vec = Op.getOperand(0);
1462 SDValue Part = Op.getOperand(1);
1463 SDValue Idx = Op.getOperand(2);
1464 SDLoc dl(Op);
1465
1466 // Store the value to a temporary stack slot, then LOAD the returned part.
1467 EVT VecVT = Vec.getValueType();
1468 EVT PartVT = Part.getValueType();
1469 SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
1470 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
1471 MachinePointerInfo PtrInfo =
1472 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
1473
1474 // First store the whole vector.
1475 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo);
1476
1477 // Freeze the index so we don't poison the clamping code we're about to emit.
1478 Idx = DAG.getFreeze(Idx);
1479
1480 // Then store the inserted part.
1481 if (PartVT.isVector()) {
1482 SDValue SubStackPtr =
1483 TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, PartVT, Idx);
1484
1485 // Store the subvector.
1486 Ch = DAG.getStore(
1487 Ch, dl, Part, SubStackPtr,
1488 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1489 } else {
1490 SDValue SubStackPtr =
1491 TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
1492
1493 // Store the scalar value.
1494 Ch = DAG.getTruncStore(
1495 Ch, dl, Part, SubStackPtr,
1496 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1497 VecVT.getVectorElementType());
1498 }
1499
1500 // Finally, load the updated vector.
1501 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo);
1502}
1503
1504SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
1505 assert((Node->getOpcode() == ISD::BUILD_VECTOR ||
1506 Node->getOpcode() == ISD::CONCAT_VECTORS) &&
1507 "Unexpected opcode!");
1508
1509 // We can't handle this case efficiently. Allocate a sufficiently
1510 // aligned object on the stack, store each operand into it, then load
1511 // the result as a vector.
1512 // Create the stack frame object.
1513 EVT VT = Node->getValueType(0);
1514 EVT MemVT = isa<BuildVectorSDNode>(Node) ? VT.getVectorElementType()
1515 : Node->getOperand(0).getValueType();
1516 SDLoc dl(Node);
1517 SDValue FIPtr = DAG.CreateStackTemporary(VT);
1518 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
1519 MachinePointerInfo PtrInfo =
1520 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
1521
1522 // Emit a store of each element to the stack slot.
1524 unsigned TypeByteSize = MemVT.getSizeInBits() / 8;
1525 assert(TypeByteSize > 0 && "Vector element type too small for stack store!");
1526
1527 // If the destination vector element type of a BUILD_VECTOR is narrower than
1528 // the source element type, only store the bits necessary.
1529 bool Truncate = isa<BuildVectorSDNode>(Node) &&
1530 MemVT.bitsLT(Node->getOperand(0).getValueType());
1531
1532 // Store (in the right endianness) the elements to memory.
1533 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
1534 // Ignore undef elements.
1535 if (Node->getOperand(i).isUndef()) continue;
1536
1537 unsigned Offset = TypeByteSize*i;
1538
1539 SDValue Idx =
1540 DAG.getMemBasePlusOffset(FIPtr, TypeSize::getFixed(Offset), dl);
1541
1542 if (Truncate)
1543 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
1544 Node->getOperand(i), Idx,
1545 PtrInfo.getWithOffset(Offset), MemVT));
1546 else
1547 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, Node->getOperand(i),
1548 Idx, PtrInfo.getWithOffset(Offset)));
1549 }
1550
1551 SDValue StoreChain;
1552 if (!Stores.empty()) // Not all undef elements?
1553 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
1554 else
1555 StoreChain = DAG.getEntryNode();
1556
1557 // Result is a load from the stack slot.
1558 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo);
1559}
1560
1561/// Bitcast a floating-point value to an integer value. Only bitcast the part
1562/// containing the sign bit if the target has no integer value capable of
1563/// holding all bits of the floating-point value.
1564void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
1565 const SDLoc &DL,
1566 SDValue Value) const {
1567 EVT FloatVT = Value.getValueType();
1568 unsigned NumBits = FloatVT.getScalarSizeInBits();
1569 State.FloatVT = FloatVT;
1570 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
1571 // Convert to an integer of the same size.
1572 if (TLI.isTypeLegal(IVT)) {
1573 State.IntValue = DAG.getNode(ISD::BITCAST, DL, IVT, Value);
1574 State.SignMask = APInt::getSignMask(NumBits);
1575 State.SignBit = NumBits - 1;
1576 return;
1577 }
1578
1579 auto &DataLayout = DAG.getDataLayout();
1580 // Store the float to memory, then load the sign part out as an integer.
1581 MVT LoadTy = TLI.getRegisterType(MVT::i8);
1582 // First create a temporary that is aligned for both the load and store.
1583 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
1584 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
1585 // Then store the float to it.
1586 State.FloatPtr = StackPtr;
1587 MachineFunction &MF = DAG.getMachineFunction();
1588 State.FloatPointerInfo = MachinePointerInfo::getFixedStack(MF, FI);
1589 State.Chain = DAG.getStore(DAG.getEntryNode(), DL, Value, State.FloatPtr,
1590 State.FloatPointerInfo);
1591
1592 SDValue IntPtr;
1593 if (DataLayout.isBigEndian()) {
1594 assert(FloatVT.isByteSized() && "Unsupported floating point type!");
1595 // Load out a legal integer with the same sign bit as the float.
1596 IntPtr = StackPtr;
1597 State.IntPointerInfo = State.FloatPointerInfo;
1598 } else {
1599 // Advance the pointer so that the loaded byte will contain the sign bit.
1600 unsigned ByteOffset = (NumBits / 8) - 1;
1601 IntPtr =
1602 DAG.getMemBasePlusOffset(StackPtr, TypeSize::getFixed(ByteOffset), DL);
1603 State.IntPointerInfo = MachinePointerInfo::getFixedStack(MF, FI,
1604 ByteOffset);
1605 }
1606
1607 State.IntPtr = IntPtr;
1608 State.IntValue = DAG.getExtLoad(ISD::EXTLOAD, DL, LoadTy, State.Chain, IntPtr,
1609 State.IntPointerInfo, MVT::i8);
1610 State.SignMask = APInt::getOneBitSet(LoadTy.getScalarSizeInBits(), 7);
1611 State.SignBit = 7;
1612}
1613
1614/// Replace the integer value produced by getSignAsIntValue() with a new value
1615/// and cast the result back to a floating-point type.
1616SDValue SelectionDAGLegalize::modifySignAsInt(const FloatSignAsInt &State,
1617 const SDLoc &DL,
1618 SDValue NewIntValue) const {
1619 if (!State.Chain)
1620 return DAG.getNode(ISD::BITCAST, DL, State.FloatVT, NewIntValue);
1621
1622 // Override the part containing the sign bit in the value stored on the stack.
1623 SDValue Chain = DAG.getTruncStore(State.Chain, DL, NewIntValue, State.IntPtr,
1624 State.IntPointerInfo, MVT::i8);
1625 return DAG.getLoad(State.FloatVT, DL, Chain, State.FloatPtr,
1626 State.FloatPointerInfo);
1627}
1628
1629SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const {
1630 SDLoc DL(Node);
1631 SDValue Mag = Node->getOperand(0);
1632 SDValue Sign = Node->getOperand(1);
1633
1634 // Get sign bit into an integer value.
1635 FloatSignAsInt SignAsInt;
1636 getSignAsIntValue(SignAsInt, DL, Sign);
1637
1638 EVT IntVT = SignAsInt.IntValue.getValueType();
1639 SDValue SignMask = DAG.getConstant(SignAsInt.SignMask, DL, IntVT);
1640 SDValue SignBit = DAG.getNode(ISD::AND, DL, IntVT, SignAsInt.IntValue,
1641 SignMask);
1642
1643 // If FABS is legal transform FCOPYSIGN(x, y) => sign(x) ? -FABS(x) : FABS(X)
1644 EVT FloatVT = Mag.getValueType();
1645 if (TLI.isOperationLegalOrCustom(ISD::FABS, FloatVT) &&
1646 TLI.isOperationLegalOrCustom(ISD::FNEG, FloatVT)) {
1647 SDValue AbsValue = DAG.getNode(ISD::FABS, DL, FloatVT, Mag);
1648 SDValue NegValue = DAG.getNode(ISD::FNEG, DL, FloatVT, AbsValue);
1649 SDValue Cond = DAG.getSetCC(DL, getSetCCResultType(IntVT), SignBit,
1650 DAG.getConstant(0, DL, IntVT), ISD::SETNE);
1651 return DAG.getSelect(DL, FloatVT, Cond, NegValue, AbsValue);
1652 }
1653
1654 // Transform Mag value to integer, and clear the sign bit.
1655 FloatSignAsInt MagAsInt;
1656 getSignAsIntValue(MagAsInt, DL, Mag);
1657 EVT MagVT = MagAsInt.IntValue.getValueType();
1658 SDValue ClearSignMask = DAG.getConstant(~MagAsInt.SignMask, DL, MagVT);
1659 SDValue ClearedSign = DAG.getNode(ISD::AND, DL, MagVT, MagAsInt.IntValue,
1660 ClearSignMask);
1661
1662 // Get the signbit at the right position for MagAsInt.
1663 int ShiftAmount = SignAsInt.SignBit - MagAsInt.SignBit;
1664 EVT ShiftVT = IntVT;
1665 if (SignBit.getScalarValueSizeInBits() <
1666 ClearedSign.getScalarValueSizeInBits()) {
1667 SignBit = DAG.getNode(ISD::ZERO_EXTEND, DL, MagVT, SignBit);
1668 ShiftVT = MagVT;
1669 }
1670 if (ShiftAmount > 0) {
1671 SDValue ShiftCnst = DAG.getConstant(ShiftAmount, DL, ShiftVT);
1672 SignBit = DAG.getNode(ISD::SRL, DL, ShiftVT, SignBit, ShiftCnst);
1673 } else if (ShiftAmount < 0) {
1674 SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, ShiftVT);
1675 SignBit = DAG.getNode(ISD::SHL, DL, ShiftVT, SignBit, ShiftCnst);
1676 }
1677 if (SignBit.getScalarValueSizeInBits() >
1678 ClearedSign.getScalarValueSizeInBits()) {
1679 SignBit = DAG.getNode(ISD::TRUNCATE, DL, MagVT, SignBit);
1680 }
1681
1682 // Store the part with the modified sign and convert back to float.
1683 SDValue CopiedSign = DAG.getNode(ISD::OR, DL, MagVT, ClearedSign, SignBit);
1684 return modifySignAsInt(MagAsInt, DL, CopiedSign);
1685}
1686
1687SDValue SelectionDAGLegalize::ExpandFNEG(SDNode *Node) const {
1688 // Get the sign bit as an integer.
1689 SDLoc DL(Node);
1690 FloatSignAsInt SignAsInt;
1691 getSignAsIntValue(SignAsInt, DL, Node->getOperand(0));
1692 EVT IntVT = SignAsInt.IntValue.getValueType();
1693
1694 // Flip the sign.
1695 SDValue SignMask = DAG.getConstant(SignAsInt.SignMask, DL, IntVT);
1696 SDValue SignFlip =
1697 DAG.getNode(ISD::XOR, DL, IntVT, SignAsInt.IntValue, SignMask);
1698
1699 // Convert back to float.
1700 return modifySignAsInt(SignAsInt, DL, SignFlip);
1701}
1702
1703SDValue SelectionDAGLegalize::ExpandFABS(SDNode *Node) const {
1704 SDLoc DL(Node);
1705 SDValue Value = Node->getOperand(0);
1706
1707 // Transform FABS(x) => FCOPYSIGN(x, 0.0) if FCOPYSIGN is legal.
1708 EVT FloatVT = Value.getValueType();
1709 if (TLI.isOperationLegalOrCustom(ISD::FCOPYSIGN, FloatVT)) {
1710 SDValue Zero = DAG.getConstantFP(0.0, DL, FloatVT);
1711 return DAG.getNode(ISD::FCOPYSIGN, DL, FloatVT, Value, Zero);
1712 }
1713
1714 // Transform value to integer, clear the sign bit and transform back.
1715 FloatSignAsInt ValueAsInt;
1716 getSignAsIntValue(ValueAsInt, DL, Value);
1717 EVT IntVT = ValueAsInt.IntValue.getValueType();
1718 SDValue ClearSignMask = DAG.getConstant(~ValueAsInt.SignMask, DL, IntVT);
1719 SDValue ClearedSign = DAG.getNode(ISD::AND, DL, IntVT, ValueAsInt.IntValue,
1720 ClearSignMask);
1721 return modifySignAsInt(ValueAsInt, DL, ClearedSign);
1722}
1723
1724void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
1726 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
1727 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
1728 " not tell us which reg is the stack pointer!");
1729 SDLoc dl(Node);
1730 EVT VT = Node->getValueType(0);
1731 SDValue Tmp1 = SDValue(Node, 0);
1732 SDValue Tmp2 = SDValue(Node, 1);
1733 SDValue Tmp3 = Node->getOperand(2);
1734 SDValue Chain = Tmp1.getOperand(0);
1735
1736 // Chain the dynamic stack allocation so that it doesn't modify the stack
1737 // pointer when other instructions are using the stack.
1738 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
1739
1740 SDValue Size = Tmp2.getOperand(1);
1741 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
1742 Chain = SP.getValue(1);
1743 Align Alignment = cast<ConstantSDNode>(Tmp3)->getAlignValue();
1744 const TargetFrameLowering *TFL = DAG.getSubtarget().getFrameLowering();
1745 unsigned Opc =
1748
1749 Align StackAlign = TFL->getStackAlign();
1750 Tmp1 = DAG.getNode(Opc, dl, VT, SP, Size); // Value
1751 if (Alignment > StackAlign)
1752 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
1753 DAG.getConstant(-Alignment.value(), dl, VT));
1754 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
1755
1756 Tmp2 = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
1757
1758 Results.push_back(Tmp1);
1759 Results.push_back(Tmp2);
1760}
1761
1762/// Emit a store/load combination to the stack. This stores
1763/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1764/// a load from the stack slot to DestVT, extending it if needed.
1765/// The resultant code need not be legal.
1766SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
1767 EVT DestVT, const SDLoc &dl) {
1768 return EmitStackConvert(SrcOp, SlotVT, DestVT, dl, DAG.getEntryNode());
1769}
1770
1771SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
1772 EVT DestVT, const SDLoc &dl,
1773 SDValue Chain) {
1774 EVT SrcVT = SrcOp.getValueType();
1775 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
1776 Align DestAlign = DAG.getDataLayout().getPrefTypeAlign(DestType);
1777
1778 // Don't convert with stack if the load/store is expensive.
1779 if ((SrcVT.bitsGT(SlotVT) &&
1780 !TLI.isTruncStoreLegalOrCustom(SrcOp.getValueType(), SlotVT)) ||
1781 (SlotVT.bitsLT(DestVT) &&
1782 !TLI.isLoadExtLegalOrCustom(ISD::EXTLOAD, DestVT, SlotVT)))
1783 return SDValue();
1784
1785 // Create the stack frame object.
1786 Align SrcAlign = DAG.getDataLayout().getPrefTypeAlign(
1787 SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
1788 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT.getStoreSize(), SrcAlign);
1789
1790 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
1791 int SPFI = StackPtrFI->getIndex();
1792 MachinePointerInfo PtrInfo =
1793 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
1794
1795 // Emit a store to the stack slot. Use a truncstore if the input value is
1796 // later than DestVT.
1797 SDValue Store;
1798
1799 if (SrcVT.bitsGT(SlotVT))
1800 Store = DAG.getTruncStore(Chain, dl, SrcOp, FIPtr, PtrInfo,
1801 SlotVT, SrcAlign);
1802 else {
1803 assert(SrcVT.bitsEq(SlotVT) && "Invalid store");
1804 Store = DAG.getStore(Chain, dl, SrcOp, FIPtr, PtrInfo, SrcAlign);
1805 }
1806
1807 // Result is a load from the stack slot.
1808 if (SlotVT.bitsEq(DestVT))
1809 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, DestAlign);
1810
1811 assert(SlotVT.bitsLT(DestVT) && "Unknown extension!");
1812 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, PtrInfo, SlotVT,
1813 DestAlign);
1814}
1815
1816SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
1817 SDLoc dl(Node);
1818 // Create a vector sized/aligned stack slot, store the value to element #0,
1819 // then load the whole vector back out.
1820 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0));
1821
1822 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr);
1823 int SPFI = StackPtrFI->getIndex();
1824
1825 SDValue Ch = DAG.getTruncStore(
1826 DAG.getEntryNode(), dl, Node->getOperand(0), StackPtr,
1827 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI),
1828 Node->getValueType(0).getVectorElementType());
1829 return DAG.getLoad(
1830 Node->getValueType(0), dl, Ch, StackPtr,
1831 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI));
1832}
1833
1834static bool
1836 const TargetLowering &TLI, SDValue &Res) {
1837 unsigned NumElems = Node->getNumOperands();
1838 SDLoc dl(Node);
1839 EVT VT = Node->getValueType(0);
1840
1841 // Try to group the scalars into pairs, shuffle the pairs together, then
1842 // shuffle the pairs of pairs together, etc. until the vector has
1843 // been built. This will work only if all of the necessary shuffle masks
1844 // are legal.
1845
1846 // We do this in two phases; first to check the legality of the shuffles,
1847 // and next, assuming that all shuffles are legal, to create the new nodes.
1848 for (int Phase = 0; Phase < 2; ++Phase) {
1850 NewIntermedVals;
1851 for (unsigned i = 0; i < NumElems; ++i) {
1852 SDValue V = Node->getOperand(i);
1853 if (V.isUndef())
1854 continue;
1855
1856 SDValue Vec;
1857 if (Phase)
1858 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V);
1859 IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i)));
1860 }
1861
1862 while (IntermedVals.size() > 2) {
1863 NewIntermedVals.clear();
1864 for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) {
1865 // This vector and the next vector are shuffled together (simply to
1866 // append the one to the other).
1867 SmallVector<int, 16> ShuffleVec(NumElems, -1);
1868
1869 SmallVector<int, 16> FinalIndices;
1870 FinalIndices.reserve(IntermedVals[i].second.size() +
1871 IntermedVals[i+1].second.size());
1872
1873 int k = 0;
1874 for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f;
1875 ++j, ++k) {
1876 ShuffleVec[k] = j;
1877 FinalIndices.push_back(IntermedVals[i].second[j]);
1878 }
1879 for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f;
1880 ++j, ++k) {
1881 ShuffleVec[k] = NumElems + j;
1882 FinalIndices.push_back(IntermedVals[i+1].second[j]);
1883 }
1884
1885 SDValue Shuffle;
1886 if (Phase)
1887 Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first,
1888 IntermedVals[i+1].first,
1889 ShuffleVec);
1890 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
1891 return false;
1892 NewIntermedVals.push_back(
1893 std::make_pair(Shuffle, std::move(FinalIndices)));
1894 }
1895
1896 // If we had an odd number of defined values, then append the last
1897 // element to the array of new vectors.
1898 if ((IntermedVals.size() & 1) != 0)
1899 NewIntermedVals.push_back(IntermedVals.back());
1900
1901 IntermedVals.swap(NewIntermedVals);
1902 }
1903
1904 assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 &&
1905 "Invalid number of intermediate vectors");
1906 SDValue Vec1 = IntermedVals[0].first;
1907 SDValue Vec2;
1908 if (IntermedVals.size() > 1)
1909 Vec2 = IntermedVals[1].first;
1910 else if (Phase)
1911 Vec2 = DAG.getUNDEF(VT);
1912
1913 SmallVector<int, 16> ShuffleVec(NumElems, -1);
1914 for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i)
1915 ShuffleVec[IntermedVals[0].second[i]] = i;
1916 for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i)
1917 ShuffleVec[IntermedVals[1].second[i]] = NumElems + i;
1918
1919 if (Phase)
1920 Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec);
1921 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
1922 return false;
1923 }
1924
1925 return true;
1926}
1927
1928/// Expand a BUILD_VECTOR node on targets that don't
1929/// support the operation, but do support the resultant vector type.
1930SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
1931 unsigned NumElems = Node->getNumOperands();
1932 SDValue Value1, Value2;
1933 SDLoc dl(Node);
1934 EVT VT = Node->getValueType(0);
1935 EVT OpVT = Node->getOperand(0).getValueType();
1936 EVT EltVT = VT.getVectorElementType();
1937
1938 // If the only non-undef value is the low element, turn this into a
1939 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1940 bool isOnlyLowElement = true;
1941 bool MoreThanTwoValues = false;
1942 bool isConstant = true;
1943 for (unsigned i = 0; i < NumElems; ++i) {
1944 SDValue V = Node->getOperand(i);
1945 if (V.isUndef())
1946 continue;
1947 if (i > 0)
1948 isOnlyLowElement = false;
1949 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
1950 isConstant = false;
1951
1952 if (!Value1.getNode()) {
1953 Value1 = V;
1954 } else if (!Value2.getNode()) {
1955 if (V != Value1)
1956 Value2 = V;
1957 } else if (V != Value1 && V != Value2) {
1958 MoreThanTwoValues = true;
1959 }
1960 }
1961
1962 if (!Value1.getNode())
1963 return DAG.getUNDEF(VT);
1964
1965 if (isOnlyLowElement)
1966 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0));
1967
1968 // If all elements are constants, create a load from the constant pool.
1969 if (isConstant) {
1971 for (unsigned i = 0, e = NumElems; i != e; ++i) {
1972 if (ConstantFPSDNode *V =
1973 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) {
1974 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue()));
1975 } else if (ConstantSDNode *V =
1976 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
1977 if (OpVT==EltVT)
1978 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
1979 else {
1980 // If OpVT and EltVT don't match, EltVT is not legal and the
1981 // element values have been promoted/truncated earlier. Undo this;
1982 // we don't want a v16i8 to become a v16i32 for example.
1983 const ConstantInt *CI = V->getConstantIntValue();
1984 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()),
1985 CI->getZExtValue()));
1986 }
1987 } else {
1988 assert(Node->getOperand(i).isUndef());
1989 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
1990 CV.push_back(UndefValue::get(OpNTy));
1991 }
1992 }
1994 SDValue CPIdx =
1995 DAG.getConstantPool(CP, TLI.getPointerTy(DAG.getDataLayout()));
1996 Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign();
1997 return DAG.getLoad(
1998 VT, dl, DAG.getEntryNode(), CPIdx,
1999 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
2000 Alignment);
2001 }
2002
2003 SmallSet<SDValue, 16> DefinedValues;
2004 for (unsigned i = 0; i < NumElems; ++i) {
2005 if (Node->getOperand(i).isUndef())
2006 continue;
2007 DefinedValues.insert(Node->getOperand(i));
2008 }
2009
2010 if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) {
2011 if (!MoreThanTwoValues) {
2012 SmallVector<int, 8> ShuffleVec(NumElems, -1);
2013 for (unsigned i = 0; i < NumElems; ++i) {
2014 SDValue V = Node->getOperand(i);
2015 if (V.isUndef())
2016 continue;
2017 ShuffleVec[i] = V == Value1 ? 0 : NumElems;
2018 }
2019 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
2020 // Get the splatted value into the low element of a vector register.
2021 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
2022 SDValue Vec2;
2023 if (Value2.getNode())
2024 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
2025 else
2026 Vec2 = DAG.getUNDEF(VT);
2027
2028 // Return shuffle(LowValVec, undef, <0,0,0,0>)
2029 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec);
2030 }
2031 } else {
2032 SDValue Res;
2033 if (ExpandBVWithShuffles(Node, DAG, TLI, Res))
2034 return Res;
2035 }
2036 }
2037
2038 // Otherwise, we can't handle this case efficiently.
2039 return ExpandVectorBuildThroughStack(Node);
2040}
2041
2042SDValue SelectionDAGLegalize::ExpandSPLAT_VECTOR(SDNode *Node) {
2043 SDLoc DL(Node);
2044 EVT VT = Node->getValueType(0);
2045 SDValue SplatVal = Node->getOperand(0);
2046
2047 return DAG.getSplatBuildVector(VT, DL, SplatVal);
2048}
2049
2050// Expand a node into a call to a libcall, returning the value as the first
2051// result and the chain as the second. If the result value does not fit into a
2052// register, return the lo part and set the hi part to the by-reg argument in
2053// the first. If it does fit into a single register, return the result and
2054// leave the Hi part unset.
2055std::pair<SDValue, SDValue> SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
2057 bool isSigned) {
2058 EVT CodePtrTy = TLI.getPointerTy(DAG.getDataLayout());
2060 if (const char *LibcallName = TLI.getLibcallName(LC))
2061 Callee = DAG.getExternalSymbol(LibcallName, CodePtrTy);
2062 else {
2063 Callee = DAG.getUNDEF(CodePtrTy);
2064 DAG.getContext()->emitError(Twine("no libcall available for ") +
2065 Node->getOperationName(&DAG));
2066 }
2067
2068 EVT RetVT = Node->getValueType(0);
2069 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2070
2071 // By default, the input chain to this libcall is the entry node of the
2072 // function. If the libcall is going to be emitted as a tail call then
2073 // TLI.isUsedByReturnOnly will change it to the right chain if the return
2074 // node which is being folded has a non-entry input chain.
2075 SDValue InChain = DAG.getEntryNode();
2076
2077 // isTailCall may be true since the callee does not reference caller stack
2078 // frame. Check if it's in the right position and that the return types match.
2079 SDValue TCChain = InChain;
2080 const Function &F = DAG.getMachineFunction().getFunction();
2081 bool isTailCall =
2082 TLI.isInTailCallPosition(DAG, Node, TCChain) &&
2083 (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
2084 if (isTailCall)
2085 InChain = TCChain;
2086
2088 bool signExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, isSigned);
2089 CLI.setDebugLoc(SDLoc(Node))
2090 .setChain(InChain)
2091 .setLibCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee,
2092 std::move(Args))
2093 .setTailCall(isTailCall)
2094 .setSExtResult(signExtend)
2095 .setZExtResult(!signExtend)
2096 .setIsPostTypeLegalization(true);
2097
2098 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2099
2100 if (!CallInfo.second.getNode()) {
2101 LLVM_DEBUG(dbgs() << "Created tailcall: "; DAG.getRoot().dump(&DAG));
2102 // It's a tailcall, return the chain (which is the DAG root).
2103 return {DAG.getRoot(), DAG.getRoot()};
2104 }
2105
2106 LLVM_DEBUG(dbgs() << "Created libcall: "; CallInfo.first.dump(&DAG));
2107 return CallInfo;
2108}
2109
2110std::pair<SDValue, SDValue> SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
2111 bool isSigned) {
2114 for (const SDValue &Op : Node->op_values()) {
2115 EVT ArgVT = Op.getValueType();
2116 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2117 Entry.Node = Op;
2118 Entry.Ty = ArgTy;
2119 Entry.IsSExt = TLI.shouldSignExtendTypeInLibCall(ArgVT, isSigned);
2120 Entry.IsZExt = !Entry.IsSExt;
2121 Args.push_back(Entry);
2122 }
2123
2124 return ExpandLibCall(LC, Node, std::move(Args), isSigned);
2125}
2126
2127void SelectionDAGLegalize::ExpandFrexpLibCall(
2129 SDLoc dl(Node);
2130 EVT VT = Node->getValueType(0);
2131 EVT ExpVT = Node->getValueType(1);
2132
2133 SDValue FPOp = Node->getOperand(0);
2134
2135 EVT ArgVT = FPOp.getValueType();
2136 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2137
2139 FPArgEntry.Node = FPOp;
2140 FPArgEntry.Ty = ArgTy;
2141
2142 SDValue StackSlot = DAG.CreateStackTemporary(ExpVT);
2143 TargetLowering::ArgListEntry PtrArgEntry;
2144 PtrArgEntry.Node = StackSlot;
2145 PtrArgEntry.Ty = PointerType::get(*DAG.getContext(),
2146 DAG.getDataLayout().getAllocaAddrSpace());
2147
2148 TargetLowering::ArgListTy Args = {FPArgEntry, PtrArgEntry};
2149
2151 auto [Call, Chain] = ExpandLibCall(LC, Node, std::move(Args), false);
2152
2153 // FIXME: Get type of int for libcall declaration and cast
2154
2155 int FrameIdx = cast<FrameIndexSDNode>(StackSlot)->getIndex();
2156 auto PtrInfo =
2157 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
2158
2159 SDValue LoadExp = DAG.getLoad(ExpVT, dl, Chain, StackSlot, PtrInfo);
2160 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2161 LoadExp.getValue(1), DAG.getRoot());
2162 DAG.setRoot(OutputChain);
2163
2164 Results.push_back(Call);
2165 Results.push_back(LoadExp);
2166}
2167
2168void SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
2169 RTLIB::Libcall LC,
2171 if (LC == RTLIB::UNKNOWN_LIBCALL)
2172 llvm_unreachable("Can't create an unknown libcall!");
2173
2174 if (Node->isStrictFPOpcode()) {
2175 EVT RetVT = Node->getValueType(0);
2178 // FIXME: This doesn't support tail calls.
2179 std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT,
2180 Ops, CallOptions,
2181 SDLoc(Node),
2182 Node->getOperand(0));
2183 Results.push_back(Tmp.first);
2184 Results.push_back(Tmp.second);
2185 } else {
2186 SDValue Tmp = ExpandLibCall(LC, Node, false).first;
2187 Results.push_back(Tmp);
2188 }
2189}
2190
2191/// Expand the node to a libcall based on the result type.
2192void SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
2193 RTLIB::Libcall Call_F32,
2194 RTLIB::Libcall Call_F64,
2195 RTLIB::Libcall Call_F80,
2196 RTLIB::Libcall Call_F128,
2197 RTLIB::Libcall Call_PPCF128,
2199 RTLIB::Libcall LC = RTLIB::getFPLibCall(Node->getSimpleValueType(0),
2200 Call_F32, Call_F64, Call_F80,
2201 Call_F128, Call_PPCF128);
2202 ExpandFPLibCall(Node, LC, Results);
2203}
2204
2205SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
2206 RTLIB::Libcall Call_I8,
2207 RTLIB::Libcall Call_I16,
2208 RTLIB::Libcall Call_I32,
2209 RTLIB::Libcall Call_I64,
2210 RTLIB::Libcall Call_I128) {
2211 RTLIB::Libcall LC;
2212 switch (Node->getSimpleValueType(0).SimpleTy) {
2213 default: llvm_unreachable("Unexpected request for libcall!");
2214 case MVT::i8: LC = Call_I8; break;
2215 case MVT::i16: LC = Call_I16; break;
2216 case MVT::i32: LC = Call_I32; break;
2217 case MVT::i64: LC = Call_I64; break;
2218 case MVT::i128: LC = Call_I128; break;
2219 }
2220 return ExpandLibCall(LC, Node, isSigned).first;
2221}
2222
2223/// Expand the node to a libcall based on first argument type (for instance
2224/// lround and its variant).
2225void SelectionDAGLegalize::ExpandArgFPLibCall(SDNode* Node,
2226 RTLIB::Libcall Call_F32,
2227 RTLIB::Libcall Call_F64,
2228 RTLIB::Libcall Call_F80,
2229 RTLIB::Libcall Call_F128,
2230 RTLIB::Libcall Call_PPCF128,
2232 EVT InVT = Node->getOperand(Node->isStrictFPOpcode() ? 1 : 0).getValueType();
2234 Call_F32, Call_F64, Call_F80,
2235 Call_F128, Call_PPCF128);
2236 ExpandFPLibCall(Node, LC, Results);
2237}
2238
2239/// Issue libcalls to __{u}divmod to compute div / rem pairs.
2240void
2241SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
2243 unsigned Opcode = Node->getOpcode();
2244 bool isSigned = Opcode == ISD::SDIVREM;
2245
2246 RTLIB::Libcall LC;
2247 switch (Node->getSimpleValueType(0).SimpleTy) {
2248 default: llvm_unreachable("Unexpected request for libcall!");
2249 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2250 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2251 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2252 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2253 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2254 }
2255
2256 // The input chain to this libcall is the entry node of the function.
2257 // Legalizing the call will automatically add the previous call to the
2258 // dependence.
2259 SDValue InChain = DAG.getEntryNode();
2260
2261 EVT RetVT = Node->getValueType(0);
2262 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2263
2266 for (const SDValue &Op : Node->op_values()) {
2267 EVT ArgVT = Op.getValueType();
2268 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2269 Entry.Node = Op;
2270 Entry.Ty = ArgTy;
2271 Entry.IsSExt = isSigned;
2272 Entry.IsZExt = !isSigned;
2273 Args.push_back(Entry);
2274 }
2275
2276 // Also pass the return address of the remainder.
2277 SDValue FIPtr = DAG.CreateStackTemporary(RetVT);
2278 Entry.Node = FIPtr;
2279 Entry.Ty = PointerType::getUnqual(RetTy->getContext());
2280 Entry.IsSExt = isSigned;
2281 Entry.IsZExt = !isSigned;
2282 Args.push_back(Entry);
2283
2284 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2285 TLI.getPointerTy(DAG.getDataLayout()));
2286
2287 SDLoc dl(Node);
2289 CLI.setDebugLoc(dl)
2290 .setChain(InChain)
2291 .setLibCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee,
2292 std::move(Args))
2293 .setSExtResult(isSigned)
2294 .setZExtResult(!isSigned);
2295
2296 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2297
2298 // Remainder is loaded back from the stack frame.
2299 SDValue Rem =
2300 DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, MachinePointerInfo());
2301 Results.push_back(CallInfo.first);
2302 Results.push_back(Rem);
2303}
2304
2305/// Return true if sincos libcall is available.
2307 RTLIB::Libcall LC;
2308 switch (Node->getSimpleValueType(0).SimpleTy) {
2309 default: llvm_unreachable("Unexpected request for libcall!");
2310 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2311 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2312 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2313 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2314 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2315 }
2316 return TLI.getLibcallName(LC) != nullptr;
2317}
2318
2319/// Only issue sincos libcall if both sin and cos are needed.
2320static bool useSinCos(SDNode *Node) {
2321 unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
2322 ? ISD::FCOS : ISD::FSIN;
2323
2324 SDValue Op0 = Node->getOperand(0);
2325 for (const SDNode *User : Op0.getNode()->uses()) {
2326 if (User == Node)
2327 continue;
2328 // The other user might have been turned into sincos already.
2329 if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS)
2330 return true;
2331 }
2332 return false;
2333}
2334
2335/// Issue libcalls to sincos to compute sin / cos pairs.
2336void
2337SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
2339 RTLIB::Libcall LC;
2340 switch (Node->getSimpleValueType(0).SimpleTy) {
2341 default: llvm_unreachable("Unexpected request for libcall!");
2342 case MVT::f32: LC = RTLIB::SINCOS_F32; break;
2343 case MVT::f64: LC = RTLIB::SINCOS_F64; break;
2344 case MVT::f80: LC = RTLIB::SINCOS_F80; break;
2345 case MVT::f128: LC = RTLIB::SINCOS_F128; break;
2346 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
2347 }
2348
2349 // The input chain to this libcall is the entry node of the function.
2350 // Legalizing the call will automatically add the previous call to the
2351 // dependence.
2352 SDValue InChain = DAG.getEntryNode();
2353
2354 EVT RetVT = Node->getValueType(0);
2355 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
2356
2359
2360 // Pass the argument.
2361 Entry.Node = Node->getOperand(0);
2362 Entry.Ty = RetTy;
2363 Entry.IsSExt = false;
2364 Entry.IsZExt = false;
2365 Args.push_back(Entry);
2366
2367 // Pass the return address of sin.
2368 SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
2369 Entry.Node = SinPtr;
2370 Entry.Ty = PointerType::getUnqual(RetTy->getContext());
2371 Entry.IsSExt = false;
2372 Entry.IsZExt = false;
2373 Args.push_back(Entry);
2374
2375 // Also pass the return address of the cos.
2376 SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
2377 Entry.Node = CosPtr;
2378 Entry.Ty = PointerType::getUnqual(RetTy->getContext());
2379 Entry.IsSExt = false;
2380 Entry.IsZExt = false;
2381 Args.push_back(Entry);
2382
2383 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
2384 TLI.getPointerTy(DAG.getDataLayout()));
2385
2386 SDLoc dl(Node);
2388 CLI.setDebugLoc(dl).setChain(InChain).setLibCallee(
2389 TLI.getLibcallCallingConv(LC), Type::getVoidTy(*DAG.getContext()), Callee,
2390 std::move(Args));
2391
2392 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
2393
2394 Results.push_back(
2395 DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr, MachinePointerInfo()));
2396 Results.push_back(
2397 DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr, MachinePointerInfo()));
2398}
2399
2400SDValue SelectionDAGLegalize::expandLdexp(SDNode *Node) const {
2401 SDLoc dl(Node);
2402 EVT VT = Node->getValueType(0);
2403 SDValue X = Node->getOperand(0);
2404 SDValue N = Node->getOperand(1);
2405 EVT ExpVT = N.getValueType();
2406 EVT AsIntVT = VT.changeTypeToInteger();
2407 if (AsIntVT == EVT()) // TODO: How to handle f80?
2408 return SDValue();
2409
2410 if (Node->getOpcode() == ISD::STRICT_FLDEXP) // TODO
2411 return SDValue();
2412
2413 SDNodeFlags NSW;
2414 NSW.setNoSignedWrap(true);
2415 SDNodeFlags NUW_NSW;
2416 NUW_NSW.setNoUnsignedWrap(true);
2417 NUW_NSW.setNoSignedWrap(true);
2418
2419 EVT SetCCVT =
2420 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ExpVT);
2422
2423 const APFloat::ExponentType MaxExpVal = APFloat::semanticsMaxExponent(FltSem);
2424 const APFloat::ExponentType MinExpVal = APFloat::semanticsMinExponent(FltSem);
2425 const int Precision = APFloat::semanticsPrecision(FltSem);
2426
2427 const SDValue MaxExp = DAG.getConstant(MaxExpVal, dl, ExpVT);
2428 const SDValue MinExp = DAG.getConstant(MinExpVal, dl, ExpVT);
2429
2430 const SDValue DoubleMaxExp = DAG.getConstant(2 * MaxExpVal, dl, ExpVT);
2431
2432 const APFloat One(FltSem, "1.0");
2433 APFloat ScaleUpK = scalbn(One, MaxExpVal, APFloat::rmNearestTiesToEven);
2434
2435 // Offset by precision to avoid denormal range.
2436 APFloat ScaleDownK =
2437 scalbn(One, MinExpVal + Precision, APFloat::rmNearestTiesToEven);
2438
2439 // TODO: Should really introduce control flow and use a block for the >
2440 // MaxExp, < MinExp cases
2441
2442 // First, handle exponents Exp > MaxExp and scale down.
2443 SDValue NGtMaxExp = DAG.getSetCC(dl, SetCCVT, N, MaxExp, ISD::SETGT);
2444
2445 SDValue DecN0 = DAG.getNode(ISD::SUB, dl, ExpVT, N, MaxExp, NSW);
2446 SDValue ClampMaxVal = DAG.getConstant(3 * MaxExpVal, dl, ExpVT);
2447 SDValue ClampN_Big = DAG.getNode(ISD::SMIN, dl, ExpVT, N, ClampMaxVal);
2448 SDValue DecN1 =
2449 DAG.getNode(ISD::SUB, dl, ExpVT, ClampN_Big, DoubleMaxExp, NSW);
2450
2451 SDValue ScaleUpTwice =
2452 DAG.getSetCC(dl, SetCCVT, N, DoubleMaxExp, ISD::SETUGT);
2453
2454 const SDValue ScaleUpVal = DAG.getConstantFP(ScaleUpK, dl, VT);
2455 SDValue ScaleUp0 = DAG.getNode(ISD::FMUL, dl, VT, X, ScaleUpVal);
2456 SDValue ScaleUp1 = DAG.getNode(ISD::FMUL, dl, VT, ScaleUp0, ScaleUpVal);
2457
2458 SDValue SelectN_Big =
2459 DAG.getNode(ISD::SELECT, dl, ExpVT, ScaleUpTwice, DecN1, DecN0);
2460 SDValue SelectX_Big =
2461 DAG.getNode(ISD::SELECT, dl, VT, ScaleUpTwice, ScaleUp1, ScaleUp0);
2462
2463 // Now handle exponents Exp < MinExp
2464 SDValue NLtMinExp = DAG.getSetCC(dl, SetCCVT, N, MinExp, ISD::SETLT);
2465
2466 SDValue Increment0 = DAG.getConstant(-(MinExpVal + Precision), dl, ExpVT);
2467 SDValue Increment1 = DAG.getConstant(-2 * (MinExpVal + Precision), dl, ExpVT);
2468
2469 SDValue IncN0 = DAG.getNode(ISD::ADD, dl, ExpVT, N, Increment0, NUW_NSW);
2470
2471 SDValue ClampMinVal =
2472 DAG.getConstant(3 * MinExpVal + 2 * Precision, dl, ExpVT);
2473 SDValue ClampN_Small = DAG.getNode(ISD::SMAX, dl, ExpVT, N, ClampMinVal);
2474 SDValue IncN1 =
2475 DAG.getNode(ISD::ADD, dl, ExpVT, ClampN_Small, Increment1, NSW);
2476
2477 const SDValue ScaleDownVal = DAG.getConstantFP(ScaleDownK, dl, VT);
2478 SDValue ScaleDown0 = DAG.getNode(ISD::FMUL, dl, VT, X, ScaleDownVal);
2479 SDValue ScaleDown1 = DAG.getNode(ISD::FMUL, dl, VT, ScaleDown0, ScaleDownVal);
2480
2481 SDValue ScaleDownTwice = DAG.getSetCC(
2482 dl, SetCCVT, N, DAG.getConstant(2 * MinExpVal + Precision, dl, ExpVT),
2483 ISD::SETULT);
2484
2485 SDValue SelectN_Small =
2486 DAG.getNode(ISD::SELECT, dl, ExpVT, ScaleDownTwice, IncN1, IncN0);
2487 SDValue SelectX_Small =
2488 DAG.getNode(ISD::SELECT, dl, VT, ScaleDownTwice, ScaleDown1, ScaleDown0);
2489
2490 // Now combine the two out of range exponent handling cases with the base
2491 // case.
2492 SDValue NewX = DAG.getNode(
2493 ISD::SELECT, dl, VT, NGtMaxExp, SelectX_Big,
2494 DAG.getNode(ISD::SELECT, dl, VT, NLtMinExp, SelectX_Small, X));
2495
2496 SDValue NewN = DAG.getNode(
2497 ISD::SELECT, dl, ExpVT, NGtMaxExp, SelectN_Big,
2498 DAG.getNode(ISD::SELECT, dl, ExpVT, NLtMinExp, SelectN_Small, N));
2499
2500 SDValue BiasedN = DAG.getNode(ISD::ADD, dl, ExpVT, NewN, MaxExp, NSW);
2501
2502 SDValue ExponentShiftAmt =
2503 DAG.getShiftAmountConstant(Precision - 1, ExpVT, dl);
2504 SDValue CastExpToValTy = DAG.getZExtOrTrunc(BiasedN, dl, AsIntVT);
2505
2506 SDValue AsInt = DAG.getNode(ISD::SHL, dl, AsIntVT, CastExpToValTy,
2507 ExponentShiftAmt, NUW_NSW);
2508 SDValue AsFP = DAG.getNode(ISD::BITCAST, dl, VT, AsInt);
2509 return DAG.getNode(ISD::FMUL, dl, VT, NewX, AsFP);
2510}
2511
2512SDValue SelectionDAGLegalize::expandFrexp(SDNode *Node) const {
2513 SDLoc dl(Node);
2514 SDValue Val = Node->getOperand(0);
2515 EVT VT = Val.getValueType();
2516 EVT ExpVT = Node->getValueType(1);
2517 EVT AsIntVT = VT.changeTypeToInteger();
2518 if (AsIntVT == EVT()) // TODO: How to handle f80?
2519 return SDValue();
2520
2522 const APFloat::ExponentType MinExpVal = APFloat::semanticsMinExponent(FltSem);
2523 const unsigned Precision = APFloat::semanticsPrecision(FltSem);
2524 const unsigned BitSize = VT.getScalarSizeInBits();
2525
2526 // TODO: Could introduce control flow and skip over the denormal handling.
2527
2528 // scale_up = fmul value, scalbn(1.0, precision + 1)
2529 // extracted_exp = (bitcast value to uint) >> precision - 1
2530 // biased_exp = extracted_exp + min_exp
2531 // extracted_fract = (bitcast value to uint) & (fract_mask | sign_mask)
2532 //
2533 // is_denormal = val < smallest_normalized
2534 // computed_fract = is_denormal ? scale_up : extracted_fract
2535 // computed_exp = is_denormal ? biased_exp + (-precision - 1) : biased_exp
2536 //
2537 // result_0 = (!isfinite(val) || iszero(val)) ? val : computed_fract
2538 // result_1 = (!isfinite(val) || iszero(val)) ? 0 : computed_exp
2539
2540 SDValue NegSmallestNormalizedInt = DAG.getConstant(
2541 APFloat::getSmallestNormalized(FltSem, true).bitcastToAPInt(), dl,
2542 AsIntVT);
2543
2544 SDValue SmallestNormalizedInt = DAG.getConstant(
2545 APFloat::getSmallestNormalized(FltSem, false).bitcastToAPInt(), dl,
2546 AsIntVT);
2547
2548 // Masks out the exponent bits.
2549 SDValue ExpMask =
2550 DAG.getConstant(APFloat::getInf(FltSem).bitcastToAPInt(), dl, AsIntVT);
2551
2552 // Mask out the exponent part of the value.
2553 //
2554 // e.g, for f32 FractSignMaskVal = 0x807fffff
2555 APInt FractSignMaskVal = APInt::getBitsSet(BitSize, 0, Precision - 1);
2556 FractSignMaskVal.setBit(BitSize - 1); // Set the sign bit
2557
2558 APInt SignMaskVal = APInt::getSignedMaxValue(BitSize);
2559 SDValue SignMask = DAG.getConstant(SignMaskVal, dl, AsIntVT);
2560
2561 SDValue FractSignMask = DAG.getConstant(FractSignMaskVal, dl, AsIntVT);
2562
2563 const APFloat One(FltSem, "1.0");
2564 // Scale a possible denormal input.
2565 // e.g., for f64, 0x1p+54
2566 APFloat ScaleUpKVal =
2567 scalbn(One, Precision + 1, APFloat::rmNearestTiesToEven);
2568
2569 SDValue ScaleUpK = DAG.getConstantFP(ScaleUpKVal, dl, VT);
2570 SDValue ScaleUp = DAG.getNode(ISD::FMUL, dl, VT, Val, ScaleUpK);
2571
2572 EVT SetCCVT =
2573 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2574
2575 SDValue AsInt = DAG.getNode(ISD::BITCAST, dl, AsIntVT, Val);
2576
2577 SDValue Abs = DAG.getNode(ISD::AND, dl, AsIntVT, AsInt, SignMask);
2578
2579 SDValue AddNegSmallestNormal =
2580 DAG.getNode(ISD::ADD, dl, AsIntVT, Abs, NegSmallestNormalizedInt);
2581 SDValue DenormOrZero = DAG.getSetCC(dl, SetCCVT, AddNegSmallestNormal,
2582 NegSmallestNormalizedInt, ISD::SETULE);
2583
2584 SDValue IsDenormal =
2585 DAG.getSetCC(dl, SetCCVT, Abs, SmallestNormalizedInt, ISD::SETULT);
2586
2587 SDValue MinExp = DAG.getConstant(MinExpVal, dl, ExpVT);
2588 SDValue Zero = DAG.getConstant(0, dl, ExpVT);
2589
2590 SDValue ScaledAsInt = DAG.getNode(ISD::BITCAST, dl, AsIntVT, ScaleUp);
2591 SDValue ScaledSelect =
2592 DAG.getNode(ISD::SELECT, dl, AsIntVT, IsDenormal, ScaledAsInt, AsInt);
2593
2594 SDValue ExpMaskScaled =
2595 DAG.getNode(ISD::AND, dl, AsIntVT, ScaledAsInt, ExpMask);
2596
2597 SDValue ScaledValue =
2598 DAG.getNode(ISD::SELECT, dl, AsIntVT, IsDenormal, ExpMaskScaled, Abs);
2599
2600 // Extract the exponent bits.
2601 SDValue ExponentShiftAmt =
2602 DAG.getShiftAmountConstant(Precision - 1, AsIntVT, dl);
2603 SDValue ShiftedExp =
2604 DAG.getNode(ISD::SRL, dl, AsIntVT, ScaledValue, ExponentShiftAmt);
2605 SDValue Exp = DAG.getSExtOrTrunc(ShiftedExp, dl, ExpVT);
2606
2607 SDValue NormalBiasedExp = DAG.getNode(ISD::ADD, dl, ExpVT, Exp, MinExp);
2608 SDValue DenormalOffset = DAG.getConstant(-Precision - 1, dl, ExpVT);
2609 SDValue DenormalExpBias =
2610 DAG.getNode(ISD::SELECT, dl, ExpVT, IsDenormal, DenormalOffset, Zero);
2611
2612 SDValue MaskedFractAsInt =
2613 DAG.getNode(ISD::AND, dl, AsIntVT, ScaledSelect, FractSignMask);
2614 const APFloat Half(FltSem, "0.5");
2615 SDValue FPHalf = DAG.getConstant(Half.bitcastToAPInt(), dl, AsIntVT);
2616 SDValue Or = DAG.getNode(ISD::OR, dl, AsIntVT, MaskedFractAsInt, FPHalf);
2617 SDValue MaskedFract = DAG.getNode(ISD::BITCAST, dl, VT, Or);
2618
2619 SDValue ComputedExp =
2620 DAG.getNode(ISD::ADD, dl, ExpVT, NormalBiasedExp, DenormalExpBias);
2621
2622 SDValue Result0 =
2623 DAG.getNode(ISD::SELECT, dl, VT, DenormOrZero, Val, MaskedFract);
2624
2625 SDValue Result1 =
2626 DAG.getNode(ISD::SELECT, dl, ExpVT, DenormOrZero, Zero, ComputedExp);
2627
2628 return DAG.getMergeValues({Result0, Result1}, dl);
2629}
2630
2631/// This function is responsible for legalizing a
2632/// INT_TO_FP operation of the specified operand when the target requests that
2633/// we expand it. At this point, we know that the result and operand types are
2634/// legal for the target.
2635SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
2636 SDValue &Chain) {
2637 bool isSigned = (Node->getOpcode() == ISD::STRICT_SINT_TO_FP ||
2638 Node->getOpcode() == ISD::SINT_TO_FP);
2639 EVT DestVT = Node->getValueType(0);
2640 SDLoc dl(Node);
2641 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
2642 SDValue Op0 = Node->getOperand(OpNo);
2643 EVT SrcVT = Op0.getValueType();
2644
2645 // TODO: Should any fast-math-flags be set for the created nodes?
2646 LLVM_DEBUG(dbgs() << "Legalizing INT_TO_FP\n");
2647 if (SrcVT == MVT::i32 && TLI.isTypeLegal(MVT::f64) &&
2648 (DestVT.bitsLE(MVT::f64) ||
2649 TLI.isOperationLegal(Node->isStrictFPOpcode() ? ISD::STRICT_FP_EXTEND
2651 DestVT))) {
2652 LLVM_DEBUG(dbgs() << "32-bit [signed|unsigned] integer to float/double "
2653 "expansion\n");
2654
2655 // Get the stack frame index of a 8 byte buffer.
2656 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
2657
2658 SDValue Lo = Op0;
2659 // if signed map to unsigned space
2660 if (isSigned) {
2661 // Invert sign bit (signed to unsigned mapping).
2662 Lo = DAG.getNode(ISD::XOR, dl, MVT::i32, Lo,
2663 DAG.getConstant(0x80000000u, dl, MVT::i32));
2664 }
2665 // Initial hi portion of constructed double.
2666 SDValue Hi = DAG.getConstant(0x43300000u, dl, MVT::i32);
2667
2668 // If this a big endian target, swap the lo and high data.
2669 if (DAG.getDataLayout().isBigEndian())
2670 std::swap(Lo, Hi);
2671
2672 SDValue MemChain = DAG.getEntryNode();
2673
2674 // Store the lo of the constructed double.
2675 SDValue Store1 = DAG.getStore(MemChain, dl, Lo, StackSlot,
2677 // Store the hi of the constructed double.
2678 SDValue HiPtr =
2679 DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
2680 SDValue Store2 =
2681 DAG.getStore(MemChain, dl, Hi, HiPtr, MachinePointerInfo());
2682 MemChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
2683
2684 // load the constructed double
2685 SDValue Load =
2686 DAG.getLoad(MVT::f64, dl, MemChain, StackSlot, MachinePointerInfo());
2687 // FP constant to bias correct the final result
2688 SDValue Bias = DAG.getConstantFP(
2689 isSigned ? llvm::bit_cast<double>(0x4330000080000000ULL)
2690 : llvm::bit_cast<double>(0x4330000000000000ULL),
2691 dl, MVT::f64);
2692 // Subtract the bias and get the final result.
2693 SDValue Sub;
2695 if (Node->isStrictFPOpcode()) {
2696 Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
2697 {Node->getOperand(0), Load, Bias});
2698 Chain = Sub.getValue(1);
2699 if (DestVT != Sub.getValueType()) {
2700 std::pair<SDValue, SDValue> ResultPair;
2701 ResultPair =
2702 DAG.getStrictFPExtendOrRound(Sub, Chain, dl, DestVT);
2703 Result = ResultPair.first;
2704 Chain = ResultPair.second;
2705 }
2706 else
2707 Result = Sub;
2708 } else {
2709 Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias);
2710 Result = DAG.getFPExtendOrRound(Sub, dl, DestVT);
2711 }
2712 return Result;
2713 }
2714
2715 if (isSigned)
2716 return SDValue();
2717
2718 // TODO: Generalize this for use with other types.
2719 if (((SrcVT == MVT::i32 || SrcVT == MVT::i64) && DestVT == MVT::f32) ||
2720 (SrcVT == MVT::i64 && DestVT == MVT::f64)) {
2721 LLVM_DEBUG(dbgs() << "Converting unsigned i32/i64 to f32/f64\n");
2722 // For unsigned conversions, convert them to signed conversions using the
2723 // algorithm from the x86_64 __floatundisf in compiler_rt. That method
2724 // should be valid for i32->f32 as well.
2725
2726 // More generally this transform should be valid if there are 3 more bits
2727 // in the integer type than the significand. Rounding uses the first bit
2728 // after the width of the significand and the OR of all bits after that. So
2729 // we need to be able to OR the shifted out bit into one of the bits that
2730 // participate in the OR.
2731
2732 // TODO: This really should be implemented using a branch rather than a
2733 // select. We happen to get lucky and machinesink does the right
2734 // thing most of the time. This would be a good candidate for a
2735 // pseudo-op, or, even better, for whole-function isel.
2736 EVT SetCCVT = getSetCCResultType(SrcVT);
2737
2738 SDValue SignBitTest = DAG.getSetCC(
2739 dl, SetCCVT, Op0, DAG.getConstant(0, dl, SrcVT), ISD::SETLT);
2740
2741 EVT ShiftVT = TLI.getShiftAmountTy(SrcVT, DAG.getDataLayout());
2742 SDValue ShiftConst = DAG.getConstant(1, dl, ShiftVT);
2743 SDValue Shr = DAG.getNode(ISD::SRL, dl, SrcVT, Op0, ShiftConst);
2744 SDValue AndConst = DAG.getConstant(1, dl, SrcVT);
2745 SDValue And = DAG.getNode(ISD::AND, dl, SrcVT, Op0, AndConst);
2746 SDValue Or = DAG.getNode(ISD::OR, dl, SrcVT, And, Shr);
2747
2748 SDValue Slow, Fast;
2749 if (Node->isStrictFPOpcode()) {
2750 // In strict mode, we must avoid spurious exceptions, and therefore
2751 // must make sure to only emit a single STRICT_SINT_TO_FP.
2752 SDValue InCvt = DAG.getSelect(dl, SrcVT, SignBitTest, Or, Op0);
2753 Fast = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DestVT, MVT::Other },
2754 { Node->getOperand(0), InCvt });
2755 Slow = DAG.getNode(ISD::STRICT_FADD, dl, { DestVT, MVT::Other },
2756 { Fast.getValue(1), Fast, Fast });
2757 Chain = Slow.getValue(1);
2758 // The STRICT_SINT_TO_FP inherits the exception mode from the
2759 // incoming STRICT_UINT_TO_FP node; the STRICT_FADD node can
2760 // never raise any exception.
2762 Flags.setNoFPExcept(Node->getFlags().hasNoFPExcept());
2763 Fast->setFlags(Flags);
2764 Flags.setNoFPExcept(true);
2765 Slow->setFlags(Flags);
2766 } else {
2767 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Or);
2768 Slow = DAG.getNode(ISD::FADD, dl, DestVT, SignCvt, SignCvt);
2769 Fast = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
2770 }
2771
2772 return DAG.getSelect(dl, DestVT, SignBitTest, Slow, Fast);
2773 }
2774
2775 // Don't expand it if there isn't cheap fadd.
2776 if (!TLI.isOperationLegalOrCustom(
2777 Node->isStrictFPOpcode() ? ISD::STRICT_FADD : ISD::FADD, DestVT))
2778 return SDValue();
2779
2780 // The following optimization is valid only if every value in SrcVT (when
2781 // treated as signed) is representable in DestVT. Check that the mantissa
2782 // size of DestVT is >= than the number of bits in SrcVT -1.
2783 assert(APFloat::semanticsPrecision(DAG.EVTToAPFloatSemantics(DestVT)) >=
2784 SrcVT.getSizeInBits() - 1 &&
2785 "Cannot perform lossless SINT_TO_FP!");
2786
2787 SDValue Tmp1;
2788 if (Node->isStrictFPOpcode()) {
2789 Tmp1 = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DestVT, MVT::Other },
2790 { Node->getOperand(0), Op0 });
2791 } else
2792 Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
2793
2794 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(SrcVT), Op0,
2795 DAG.getConstant(0, dl, SrcVT), ISD::SETLT);
2796 SDValue Zero = DAG.getIntPtrConstant(0, dl),
2797 Four = DAG.getIntPtrConstant(4, dl);
2798 SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(),
2799 SignSet, Four, Zero);
2800
2801 // If the sign bit of the integer is set, the large number will be treated
2802 // as a negative number. To counteract this, the dynamic code adds an
2803 // offset depending on the data type.
2804 uint64_t FF;
2805 switch (SrcVT.getSimpleVT().SimpleTy) {
2806 default:
2807 return SDValue();
2808 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
2809 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
2810 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
2811 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
2812 }
2813 if (DAG.getDataLayout().isLittleEndian())
2814 FF <<= 32;
2815 Constant *FudgeFactor = ConstantInt::get(
2816 Type::getInt64Ty(*DAG.getContext()), FF);
2817
2818 SDValue CPIdx =
2819 DAG.getConstantPool(FudgeFactor, TLI.getPointerTy(DAG.getDataLayout()));
2820 Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign();
2821 CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset);
2822 Alignment = commonAlignment(Alignment, 4);
2823 SDValue FudgeInReg;
2824 if (DestVT == MVT::f32)
2825 FudgeInReg = DAG.getLoad(
2826 MVT::f32, dl, DAG.getEntryNode(), CPIdx,
2827 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
2828 Alignment);
2829 else {
2830 SDValue Load = DAG.getExtLoad(
2831 ISD::EXTLOAD, dl, DestVT, DAG.getEntryNode(), CPIdx,
2832 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
2833 Alignment);
2834 HandleSDNode Handle(Load);
2835 LegalizeOp(Load.getNode());
2836 FudgeInReg = Handle.getValue();
2837 }
2838
2839 if (Node->isStrictFPOpcode()) {
2840 SDValue Result = DAG.getNode(ISD::STRICT_FADD, dl, { DestVT, MVT::Other },
2841 { Tmp1.getValue(1), Tmp1, FudgeInReg });
2842 Chain = Result.getValue(1);
2843 return Result;
2844 }
2845
2846 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
2847}
2848
2849/// This function is responsible for legalizing a
2850/// *INT_TO_FP operation of the specified operand when the target requests that
2851/// we promote it. At this point, we know that the result and operand types are
2852/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2853/// operation that takes a larger input.
2854void SelectionDAGLegalize::PromoteLegalINT_TO_FP(
2856 bool IsStrict = N->isStrictFPOpcode();
2857 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
2858 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
2859 EVT DestVT = N->getValueType(0);
2860 SDValue LegalOp = N->getOperand(IsStrict ? 1 : 0);
2861 unsigned UIntOp = IsStrict ? ISD::STRICT_UINT_TO_FP : ISD::UINT_TO_FP;
2862 unsigned SIntOp = IsStrict ? ISD::STRICT_SINT_TO_FP : ISD::SINT_TO_FP;
2863
2864 // First step, figure out the appropriate *INT_TO_FP operation to use.
2865 EVT NewInTy = LegalOp.getValueType();
2866
2867 unsigned OpToUse = 0;
2868
2869 // Scan for the appropriate larger type to use.
2870 while (true) {
2871 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
2872 assert(NewInTy.isInteger() && "Ran out of possibilities!");
2873
2874 // If the target supports SINT_TO_FP of this type, use it.
2875 if (TLI.isOperationLegalOrCustom(SIntOp, NewInTy)) {
2876 OpToUse = SIntOp;
2877 break;
2878 }
2879 if (IsSigned)
2880 continue;
2881
2882 // If the target supports UINT_TO_FP of this type, use it.
2883 if (TLI.isOperationLegalOrCustom(UIntOp, NewInTy)) {
2884 OpToUse = UIntOp;
2885 break;
2886 }
2887
2888 // Otherwise, try a larger type.
2889 }
2890
2891 // Okay, we found the operation and type to use. Zero extend our input to the
2892 // desired type then run the operation on it.
2893 if (IsStrict) {
2894 SDValue Res =
2895 DAG.getNode(OpToUse, dl, {DestVT, MVT::Other},
2896 {N->getOperand(0),
2897 DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
2898 dl, NewInTy, LegalOp)});
2899 Results.push_back(Res);
2900 Results.push_back(Res.getValue(1));
2901 return;
2902 }
2903
2904 Results.push_back(
2905 DAG.getNode(OpToUse, dl, DestVT,
2906 DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
2907 dl, NewInTy, LegalOp)));
2908}
2909
2910/// This function is responsible for legalizing a
2911/// FP_TO_*INT operation of the specified operand when the target requests that
2912/// we promote it. At this point, we know that the result and operand types are
2913/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2914/// operation that returns a larger result.
2915void SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl,
2917 bool IsStrict = N->isStrictFPOpcode();
2918 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
2919 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
2920 EVT DestVT = N->getValueType(0);
2921 SDValue LegalOp = N->getOperand(IsStrict ? 1 : 0);
2922 // First step, figure out the appropriate FP_TO*INT operation to use.
2923 EVT NewOutTy = DestVT;
2924
2925 unsigned OpToUse = 0;
2926
2927 // Scan for the appropriate larger type to use.
2928 while (true) {
2929 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
2930 assert(NewOutTy.isInteger() && "Ran out of possibilities!");
2931
2932 // A larger signed type can hold all unsigned values of the requested type,
2933 // so using FP_TO_SINT is valid
2934 OpToUse = IsStrict ? ISD::STRICT_FP_TO_SINT : ISD::FP_TO_SINT;
2935 if (TLI.isOperationLegalOrCustom(OpToUse, NewOutTy))
2936 break;
2937
2938 // However, if the value may be < 0.0, we *must* use some FP_TO_SINT.
2939 OpToUse = IsStrict ? ISD::STRICT_FP_TO_UINT : ISD::FP_TO_UINT;
2940 if (!IsSigned && TLI.isOperationLegalOrCustom(OpToUse, NewOutTy))
2941 break;
2942
2943 // Otherwise, try a larger type.
2944 }
2945
2946 // Okay, we found the operation and type to use.
2948 if (IsStrict) {
2949 SDVTList VTs = DAG.getVTList(NewOutTy, MVT::Other);
2950 Operation = DAG.getNode(OpToUse, dl, VTs, N->getOperand(0), LegalOp);
2951 } else
2952 Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp);
2953
2954 // Truncate the result of the extended FP_TO_*INT operation to the desired
2955 // size.
2956 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation);
2957 Results.push_back(Trunc);
2958 if (IsStrict)
2959 Results.push_back(Operation.getValue(1));
2960}
2961
2962/// Promote FP_TO_*INT_SAT operation to a larger result type. At this point
2963/// the result and operand types are legal and there must be a legal
2964/// FP_TO_*INT_SAT operation for a larger result type.
2965SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT_SAT(SDNode *Node,
2966 const SDLoc &dl) {
2967 unsigned Opcode = Node->getOpcode();
2968
2969 // Scan for the appropriate larger type to use.
2970 EVT NewOutTy = Node->getValueType(0);
2971 while (true) {
2972 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy + 1);
2973 assert(NewOutTy.isInteger() && "Ran out of possibilities!");
2974
2975 if (TLI.isOperationLegalOrCustom(Opcode, NewOutTy))
2976 break;
2977 }
2978
2979 // Saturation width is determined by second operand, so we don't have to
2980 // perform any fixup and can directly truncate the result.
2981 SDValue Result = DAG.getNode(Opcode, dl, NewOutTy, Node->getOperand(0),
2982 Node->getOperand(1));
2983 return DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Result);
2984}
2985
2986/// Open code the operations for PARITY of the specified operation.
2987SDValue SelectionDAGLegalize::ExpandPARITY(SDValue Op, const SDLoc &dl) {
2988 EVT VT = Op.getValueType();
2989 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
2990 unsigned Sz = VT.getScalarSizeInBits();
2991
2992 // If CTPOP is legal, use it. Otherwise use shifts and xor.
2994 if (TLI.isOperationLegalOrPromote(ISD::CTPOP, VT)) {
2995 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
2996 } else {
2997 Result = Op;
2998 for (unsigned i = Log2_32_Ceil(Sz); i != 0;) {
2999 SDValue Shift = DAG.getNode(ISD::SRL, dl, VT, Result,
3000 DAG.getConstant(1ULL << (--i), dl, ShVT));
3001 Result = DAG.getNode(ISD::XOR, dl, VT, Result, Shift);
3002 }
3003 }
3004
3005 return DAG.getNode(ISD::AND, dl, VT, Result, DAG.getConstant(1, dl, VT));
3006}
3007
3008SDValue SelectionDAGLegalize::PromoteReduction(SDNode *Node) {
3009 MVT VecVT = Node->getOperand(1).getSimpleValueType();
3010 MVT NewVecVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VecVT);
3011 MVT ScalarVT = Node->getSimpleValueType(0);
3012 MVT NewScalarVT = NewVecVT.getVectorElementType();
3013
3014 SDLoc DL(Node);
3015 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
3016
3017 // promote the initial value.
3018 // FIXME: Support integer.
3019 assert(Node->getOperand(0).getValueType().isFloatingPoint() &&
3020 "Only FP promotion is supported");
3021 Operands[0] =
3022 DAG.getNode(ISD::FP_EXTEND, DL, NewScalarVT, Node->getOperand(0));
3023
3024 for (unsigned j = 1; j != Node->getNumOperands(); ++j)
3025 if (Node->getOperand(j).getValueType().isVector() &&
3026 !(ISD::isVPOpcode(Node->getOpcode()) &&
3027 ISD::getVPMaskIdx(Node->getOpcode()) == j)) { // Skip mask operand.
3028 // promote the vector operand.
3029 // FIXME: Support integer.
3030 assert(Node->getOperand(j).getValueType().isFloatingPoint() &&
3031 "Only FP promotion is supported");
3032 Operands[j] =
3033 DAG.getNode(ISD::FP_EXTEND, DL, NewVecVT, Node->getOperand(j));
3034 } else {
3035 Operands[j] = Node->getOperand(j); // Skip VL operand.
3036 }
3037
3038 SDValue Res = DAG.getNode(Node->getOpcode(), DL, NewScalarVT, Operands,
3039 Node->getFlags());
3040
3041 assert(ScalarVT.isFloatingPoint() && "Only FP promotion is supported");
3042 return DAG.getNode(ISD::FP_ROUND, DL, ScalarVT, Res,
3043 DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
3044}
3045
3046bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
3047 LLVM_DEBUG(dbgs() << "Trying to expand node\n");
3049 SDLoc dl(Node);
3050 SDValue Tmp1, Tmp2, Tmp3, Tmp4;
3051 bool NeedInvert;
3052 switch (Node->getOpcode()) {
3053 case ISD::ABS:
3054 if ((Tmp1 = TLI.expandABS(Node, DAG)))
3055 Results.push_back(Tmp1);
3056 break;
3057 case ISD::ABDS:
3058 case ISD::ABDU:
3059 if ((Tmp1 = TLI.expandABD(Node, DAG)))
3060 Results.push_back(Tmp1);
3061 break;
3062 case ISD::AVGCEILS:
3063 case ISD::AVGCEILU:
3064 case ISD::AVGFLOORS:
3065 case ISD::AVGFLOORU:
3066 if ((Tmp1 = TLI.expandAVG(Node, DAG)))
3067 Results.push_back(Tmp1);
3068 break;
3069 case ISD::CTPOP:
3070 if ((Tmp1 = TLI.expandCTPOP(Node, DAG)))
3071 Results.push_back(Tmp1);
3072 break;
3073 case ISD::CTLZ:
3075 if ((Tmp1 = TLI.expandCTLZ(Node, DAG)))
3076 Results.push_back(Tmp1);
3077 break;
3078 case ISD::CTTZ:
3080 if ((Tmp1 = TLI.expandCTTZ(Node, DAG)))
3081 Results.push_back(Tmp1);
3082 break;
3083 case ISD::BITREVERSE:
3084 if ((Tmp1 = TLI.expandBITREVERSE(Node, DAG)))
3085 Results.push_back(Tmp1);
3086 break;
3087 case ISD::BSWAP:
3088 if ((Tmp1 = TLI.expandBSWAP(Node, DAG)))
3089 Results.push_back(Tmp1);
3090 break;
3091 case ISD::PARITY:
3092 Results.push_back(ExpandPARITY(Node->getOperand(0), dl));
3093 break;
3094 case ISD::FRAMEADDR:
3095 case ISD::RETURNADDR:
3097 Results.push_back(DAG.getConstant(0, dl, Node->getValueType(0)));
3098 break;
3099 case ISD::EH_DWARF_CFA: {
3100 SDValue CfaArg = DAG.getSExtOrTrunc(Node->getOperand(0), dl,
3101 TLI.getPointerTy(DAG.getDataLayout()));
3102 SDValue Offset = DAG.getNode(ISD::ADD, dl,
3103 CfaArg.getValueType(),
3104 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
3105 CfaArg.getValueType()),
3106 CfaArg);
3107 SDValue FA = DAG.getNode(
3108 ISD::FRAMEADDR, dl, TLI.getPointerTy(DAG.getDataLayout()),
3109 DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())));
3110 Results.push_back(DAG.getNode(ISD::ADD, dl, FA.getValueType(),
3111 FA, Offset));
3112 break;
3113 }
3114 case ISD::GET_ROUNDING:
3115 Results.push_back(DAG.getConstant(1, dl, Node->getValueType(0)));
3116 Results.push_back(Node->getOperand(0));
3117 break;
3118 case ISD::EH_RETURN:
3119 case ISD::EH_LABEL:
3120 case ISD::PREFETCH:
3121 case ISD::VAEND:
3123 // If the target didn't expand these, there's nothing to do, so just
3124 // preserve the chain and be done.
3125 Results.push_back(Node->getOperand(0));
3126 break;
3129 // If the target didn't expand this, just return 'zero' and preserve the
3130 // chain.
3131 Results.append(Node->getNumValues() - 1,
3132 DAG.getConstant(0, dl, Node->getValueType(0)));
3133 Results.push_back(Node->getOperand(0));
3134 break;
3136 // If the target didn't expand this, just return 'zero' and preserve the
3137 // chain.
3138 Results.push_back(DAG.getConstant(0, dl, MVT::i32));
3139 Results.push_back(Node->getOperand(0));
3140 break;
3141 case ISD::ATOMIC_LOAD: {
3142 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP.
3143 SDValue Zero = DAG.getConstant(0, dl, Node->getValueType(0));
3144 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
3145 SDValue Swap = DAG.getAtomicCmpSwap(
3146 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
3147 Node->getOperand(0), Node->getOperand(1), Zero, Zero,
3148 cast<AtomicSDNode>(Node)->getMemOperand());
3149 Results.push_back(Swap.getValue(0));
3150 Results.push_back(Swap.getValue(1));
3151 break;
3152 }
3153 case ISD::ATOMIC_STORE: {
3154 // There is no libcall for atomic store; fake it with ATOMIC_SWAP.
3155 SDValue Swap = DAG.getAtomic(
3156 ISD::ATOMIC_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(),
3157 Node->getOperand(0), Node->getOperand(2), Node->getOperand(1),
3158 cast<AtomicSDNode>(Node)->getMemOperand());
3159 Results.push_back(Swap.getValue(1));
3160 break;
3161 }
3163 // Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and
3164 // splits out the success value as a comparison. Expanding the resulting
3165 // ATOMIC_CMP_SWAP will produce a libcall.
3166 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
3167 SDValue Res = DAG.getAtomicCmpSwap(
3168 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
3169 Node->getOperand(0), Node->getOperand(1), Node->getOperand(2),
3170 Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand());
3171
3172 SDValue ExtRes = Res;
3173 SDValue LHS = Res;
3174 SDValue RHS = Node->getOperand(1);
3175
3176 EVT AtomicType = cast<AtomicSDNode>(Node)->getMemoryVT();
3177 EVT OuterType = Node->getValueType(0);
3178 switch (TLI.getExtendForAtomicOps()) {
3179 case ISD::SIGN_EXTEND:
3180 LHS = DAG.getNode(ISD::AssertSext, dl, OuterType, Res,
3181 DAG.getValueType(AtomicType));
3182 RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, OuterType,
3183 Node->getOperand(2), DAG.getValueType(AtomicType));
3184 ExtRes = LHS;
3185 break;
3186 case ISD::ZERO_EXTEND:
3187 LHS = DAG.getNode(ISD::AssertZext, dl, OuterType, Res,
3188 DAG.getValueType(AtomicType));
3189 RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
3190 ExtRes = LHS;
3191 break;
3192 case ISD::ANY_EXTEND:
3193 LHS = DAG.getZeroExtendInReg(Res, dl, AtomicType);
3194 RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
3195 break;
3196 default:
3197 llvm_unreachable("Invalid atomic op extension");
3198 }
3199
3201 DAG.getSetCC(dl, Node->getValueType(1), LHS, RHS, ISD::SETEQ);
3202
3203 Results.push_back(ExtRes.getValue(0));
3204 Results.push_back(Success);
3205 Results.push_back(Res.getValue(1));
3206 break;
3207 }
3208 case ISD::ATOMIC_LOAD_SUB: {
3209 SDLoc DL(Node);
3210 EVT VT = Node->getValueType(0);
3211 SDValue RHS = Node->getOperand(2);
3212 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
3213 if (RHS->getOpcode() == ISD::SIGN_EXTEND_INREG &&
3214 cast<VTSDNode>(RHS->getOperand(1))->getVT() == AN->getMemoryVT())
3215 RHS = RHS->getOperand(0);
3216 SDValue NewRHS =
3217 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
3218 SDValue Res = DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, AN->getMemoryVT(),
3219 Node->getOperand(0), Node->getOperand(1),
3220 NewRHS, AN->getMemOperand());
3221 Results.push_back(Res);
3222 Results.push_back(Res.getValue(1));
3223 break;
3224 }
3226 ExpandDYNAMIC_STACKALLOC(Node, Results);
3227 break;
3228 case ISD::MERGE_VALUES:
3229 for (unsigned i = 0; i < Node->getNumValues(); i++)
3230 Results.push_back(Node->getOperand(i));
3231 break;
3232 case ISD::UNDEF: {
3233 EVT VT = Node->getValueType(0);
3234 if (VT.isInteger())
3235 Results.push_back(DAG.getConstant(0, dl, VT));
3236 else {
3237 assert(VT.isFloatingPoint() && "Unknown value type!");
3238 Results.push_back(DAG.getConstantFP(0, dl, VT));
3239 }
3240 break;
3241 }
3243 // When strict mode is enforced we can't do expansion because it
3244 // does not honor the "strict" properties. Only libcall is allowed.
3245 if (TLI.isStrictFPEnabled())
3246 break;
3247 // We might as well mutate to FP_ROUND when FP_ROUND operation is legal
3248 // since this operation is more efficient than stack operation.
3249 if (TLI.getStrictFPOperationAction(Node->getOpcode(),
3250 Node->getValueType(0))
3251 == TargetLowering::Legal)
3252 break;
3253 // We fall back to use stack operation when the FP_ROUND operation
3254 // isn't available.
3255 if ((Tmp1 = EmitStackConvert(Node->getOperand(1), Node->getValueType(0),
3256 Node->getValueType(0), dl,
3257 Node->getOperand(0)))) {
3258 ReplaceNode(Node, Tmp1.getNode());
3259 LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_ROUND node\n");
3260 return true;
3261 }
3262 break;
3263 case ISD::FP_ROUND: {
3264 if ((Tmp1 = TLI.expandFP_ROUND(Node, DAG))) {
3265 Results.push_back(Tmp1);
3266 break;
3267 }
3268
3269 [[fallthrough]];
3270 }
3271 case ISD::BITCAST:
3272 if ((Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
3273 Node->getValueType(0), dl)))
3274 Results.push_back(Tmp1);
3275 break;
3277 // When strict mode is enforced we can't do expansion because it
3278 // does not honor the "strict" properties. Only libcall is allowed.
3279 if (TLI.isStrictFPEnabled())
3280 break;
3281 // We might as well mutate to FP_EXTEND when FP_EXTEND operation is legal
3282 // since this operation is more efficient than stack operation.
3283 if (TLI.getStrictFPOperationAction(Node->getOpcode(),
3284 Node->getValueType(0))
3285 == TargetLowering::Legal)
3286 break;
3287 // We fall back to use stack operation when the FP_EXTEND operation
3288 // isn't available.
3289 if ((Tmp1 = EmitStackConvert(
3290 Node->getOperand(1), Node->getOperand(1).getValueType(),
3291 Node->getValueType(0), dl, Node->getOperand(0)))) {
3292 ReplaceNode(Node, Tmp1.getNode());
3293 LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_EXTEND node\n");
3294 return true;
3295 }
3296 break;
3297 case ISD::FP_EXTEND: {
3298 SDValue Op = Node->getOperand(0);
3299 EVT SrcVT = Op.getValueType();
3300 EVT DstVT = Node->getValueType(0);
3301 if (SrcVT.getScalarType() == MVT::bf16) {
3302 Results.push_back(DAG.getNode(ISD::BF16_TO_FP, SDLoc(Node), DstVT, Op));
3303 break;
3304 }
3305
3306 if ((Tmp1 = EmitStackConvert(Op, SrcVT, DstVT, dl)))
3307 Results.push_back(Tmp1);
3308 break;
3309 }
3310 case ISD::BF16_TO_FP: {
3311 // Always expand bf16 to f32 casts, they lower to ext + shift.
3312 //
3313 // Note that the operand of this code can be bf16 or an integer type in case
3314 // bf16 is not supported on the target and was softened.
3315 SDValue Op = Node->getOperand(0);
3316 if (Op.getValueType() == MVT::bf16) {
3317 Op = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32,
3318 DAG.getNode(ISD::BITCAST, dl, MVT::i16, Op));
3319 } else {
3320 Op = DAG.getAnyExtOrTrunc(Op, dl, MVT::i32);
3321 }
3322 Op = DAG.getNode(
3323 ISD::SHL, dl, MVT::i32, Op,
3324 DAG.getConstant(16, dl,
3325 TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
3326 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op);
3327 // Add fp_extend in case the output is bigger than f32.
3328 if (Node->getValueType(0) != MVT::f32)
3329 Op = DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Op);
3330 Results.push_back(Op);
3331 break;
3332 }
3333 case ISD::FP_TO_BF16: {
3334 SDValue Op = Node->getOperand(0);
3335 if (Op.getValueType() != MVT::f32)
3336 Op = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Op,
3337 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
3338 // Certain SNaNs will turn into infinities if we do a simple shift right.
3339 if (!DAG.isKnownNeverSNaN(Op)) {
3340 Op = DAG.getNode(ISD::FCANONICALIZE, dl, MVT::f32, Op, Node->getFlags());
3341 }
3342 Op = DAG.getNode(
3343 ISD::SRL, dl, MVT::i32, DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op),
3344 DAG.getConstant(16, dl,
3345 TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
3346 // The result of this node can be bf16 or an integer type in case bf16 is
3347 // not supported on the target and was softened to i16 for storage.
3348 if (Node->getValueType(0) == MVT::bf16) {
3349 Op = DAG.getNode(ISD::BITCAST, dl, MVT::bf16,
3350 DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Op));
3351 } else {
3352 Op = DAG.getAnyExtOrTrunc(Op, dl, Node->getValueType(0));
3353 }
3354 Results.push_back(Op);
3355 break;
3356 }
3358 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
3359 EVT VT = Node->getValueType(0);
3360
3361 // An in-register sign-extend of a boolean is a negation:
3362 // 'true' (1) sign-extended is -1.
3363 // 'false' (0) sign-extended is 0.
3364 // However, we must mask the high bits of the source operand because the
3365 // SIGN_EXTEND_INREG does not guarantee that the high bits are already zero.
3366
3367 // TODO: Do this for vectors too?
3368 if (ExtraVT.isScalarInteger() && ExtraVT.getSizeInBits() == 1) {
3369 SDValue One = DAG.getConstant(1, dl, VT);
3370 SDValue And = DAG.getNode(ISD::AND, dl, VT, Node->getOperand(0), One);
3371 SDValue Zero = DAG.getConstant(0, dl, VT);
3372 SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, Zero, And);
3373 Results.push_back(Neg);
3374 break;
3375 }
3376
3377 // NOTE: we could fall back on load/store here too for targets without
3378 // SRA. However, it is doubtful that any exist.
3379 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
3380 unsigned BitsDiff = VT.getScalarSizeInBits() -
3381 ExtraVT.getScalarSizeInBits();
3382 SDValue ShiftCst = DAG.getConstant(BitsDiff, dl, ShiftAmountTy);
3383 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
3384 Node->getOperand(0), ShiftCst);
3385 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst);
3386 Results.push_back(Tmp1);
3387 break;
3388 }
3389 case ISD::UINT_TO_FP:
3391 if (TLI.expandUINT_TO_FP(Node, Tmp1, Tmp2, DAG)) {
3392 Results.push_back(Tmp1);
3393 if (Node->isStrictFPOpcode())
3394 Results.push_back(Tmp2);
3395 break;
3396 }
3397 [[fallthrough]];
3398 case ISD::SINT_TO_FP:
3400 if ((Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2))) {
3401 Results.push_back(Tmp1);
3402 if (Node->isStrictFPOpcode())
3403 Results.push_back(Tmp2);
3404 }
3405 break;
3406 case ISD::FP_TO_SINT:
3407 if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG))
3408 Results.push_back(Tmp1);
3409 break;
3411 if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG)) {
3412 ReplaceNode(Node, Tmp1.getNode());
3413 LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_TO_SINT node\n");
3414 return true;
3415 }
3416 break;
3417 case ISD::FP_TO_UINT:
3418 if (TLI.expandFP_TO_UINT(Node, Tmp1, Tmp2, DAG))
3419 Results.push_back(Tmp1);
3420 break;
3422 if (TLI.expandFP_TO_UINT(Node, Tmp1, Tmp2, DAG)) {
3423 // Relink the chain.
3424 DAG.ReplaceAllUsesOfValueWith(SDValue(Node,1), Tmp2);
3425 // Replace the new UINT result.
3426 ReplaceNodeWithValue(SDValue(Node, 0), Tmp1);
3427 LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_TO_UINT node\n");
3428 return true;
3429 }
3430 break;
3433 Results.push_back(TLI.expandFP_TO_INT_SAT(Node, DAG));
3434 break;
3435 case ISD::VAARG:
3436 Results.push_back(DAG.expandVAArg(Node));
3437 Results.push_back(Results[0].getValue(1));
3438 break;
3439 case ISD::VACOPY:
3440 Results.push_back(DAG.expandVACopy(Node));
3441 break;
3443 if (Node->getOperand(0).getValueType().getVectorElementCount().isScalar())
3444 // This must be an access of the only element. Return it.
3445 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
3446 Node->getOperand(0));
3447 else
3448 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
3449 Results.push_back(Tmp1);
3450 break;
3452 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0)));
3453 break;
3455 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0)));
3456 break;
3458 Results.push_back(ExpandVectorBuildThroughStack(Node));
3459 break;
3461 Results.push_back(ExpandSCALAR_TO_VECTOR(Node));
3462 break;
3464 Results.push_back(ExpandINSERT_VECTOR_ELT(SDValue(Node, 0)));
3465 break;
3466 case ISD::VECTOR_SHUFFLE: {
3467 SmallVector<int, 32> NewMask;
3468 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
3469
3470 EVT VT = Node->getValueType(0);
3471 EVT EltVT = VT.getVectorElementType();
3472 SDValue Op0 = Node->getOperand(0);
3473 SDValue Op1 = Node->getOperand(1);
3474 if (!TLI.isTypeLegal(EltVT)) {
3475 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
3476
3477 // BUILD_VECTOR operands are allowed to be wider than the element type.
3478 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept
3479 // it.
3480 if (NewEltVT.bitsLT(EltVT)) {
3481 // Convert shuffle node.
3482 // If original node was v4i64 and the new EltVT is i32,
3483 // cast operands to v8i32 and re-build the mask.
3484
3485 // Calculate new VT, the size of the new VT should be equal to original.
3486 EVT NewVT =
3487 EVT::getVectorVT(*DAG.getContext(), NewEltVT,
3488 VT.getSizeInBits() / NewEltVT.getSizeInBits());
3489 assert(NewVT.bitsEq(VT));
3490
3491 // cast operands to new VT
3492 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0);
3493 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
3494
3495 // Convert the shuffle mask
3496 unsigned int factor =
3498
3499 // EltVT gets smaller
3500 assert(factor > 0);
3501
3502 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
3503 if (Mask[i] < 0) {
3504 for (unsigned fi = 0; fi < factor; ++fi)
3505 NewMask.push_back(Mask[i]);
3506 }
3507 else {
3508 for (unsigned fi = 0; fi < factor; ++fi)
3509 NewMask.push_back(Mask[i]*factor+fi);
3510 }
3511 }
3512 Mask = NewMask;
3513 VT = NewVT;
3514 }
3515 EltVT = NewEltVT;
3516 }
3517 unsigned NumElems = VT.getVectorNumElements();
3519 for (unsigned i = 0; i != NumElems; ++i) {
3520 if (Mask[i] < 0) {
3521 Ops.push_back(DAG.getUNDEF(EltVT));
3522 continue;
3523 }
3524 unsigned Idx = Mask[i];
3525 if (Idx < NumElems)
3526 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
3527 DAG.getVectorIdxConstant(Idx, dl)));
3528 else
3529 Ops.push_back(
3530 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op1,
3531 DAG.getVectorIdxConstant(Idx - NumElems, dl)));
3532 }
3533
3534 Tmp1 = DAG.getBuildVector(VT, dl, Ops);
3535 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
3536 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
3537 Results.push_back(Tmp1);
3538 break;
3539 }
3540 case ISD::VECTOR_SPLICE: {
3541 Results.push_back(TLI.expandVectorSplice(Node, DAG));
3542 break;
3543 }
3544 case ISD::EXTRACT_ELEMENT: {
3545 EVT OpTy = Node->getOperand(0).getValueType();
3546 if (Node->getConstantOperandVal(1)) {
3547 // 1 -> Hi
3548 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
3549 DAG.getConstant(OpTy.getSizeInBits() / 2, dl,
3550 TLI.getShiftAmountTy(
3551 Node->getOperand(0).getValueType(),
3552 DAG.getDataLayout())));
3553 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
3554 } else {
3555 // 0 -> Lo
3556 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0),
3557 Node->getOperand(0));
3558 }
3559 Results.push_back(Tmp1);
3560 break;
3561 }
3562 case ISD::STACKSAVE:
3563 // Expand to CopyFromReg if the target set
3564 // StackPointerRegisterToSaveRestore.
3565 if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
3566 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP,
3567 Node->getValueType(0)));
3568 Results.push_back(Results[0].getValue(1));
3569 } else {
3570 Results.push_back(DAG.getUNDEF(Node->getValueType(0)));
3571 Results.push_back(Node->getOperand(0));
3572 }
3573 break;
3574 case ISD::STACKRESTORE:
3575 // Expand to CopyToReg if the target set
3576 // StackPointerRegisterToSaveRestore.
3577 if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
3578 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP,
3579 Node->getOperand(1)));
3580 } else {
3581 Results.push_back(Node->getOperand(0));
3582 }
3583 break;
3585 Results.push_back(DAG.getConstant(0, dl, Node->getValueType(0)));
3586 Results.push_back(Results[0].getValue(0));
3587 break;
3588 case ISD::FCOPYSIGN:
3589 Results.push_back(ExpandFCOPYSIGN(Node));
3590 break;
3591 case ISD::FNEG:
3592 Results.push_back(ExpandFNEG(Node));
3593 break;
3594 case ISD::FABS:
3595 Results.push_back(ExpandFABS(Node));
3596 break;
3597 case ISD::IS_FPCLASS: {
3598 auto Test = static_cast<FPClassTest>(Node->getConstantOperandVal(1));
3599 if (SDValue Expanded =
3600 TLI.expandIS_FPCLASS(Node->getValueType(0), Node->getOperand(0),
3601 Test, Node->getFlags(), SDLoc(Node), DAG))
3602 Results.push_back(Expanded);
3603 break;
3604 }
3605 case ISD::SMIN:
3606 case ISD::SMAX:
3607 case ISD::UMIN:
3608 case ISD::UMAX: {
3609 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B
3610 ISD::CondCode Pred;
3611 switch (Node->getOpcode()) {
3612 default: llvm_unreachable("How did we get here?");
3613 case ISD::SMAX: Pred = ISD::SETGT; break;
3614 case ISD::SMIN: Pred = ISD::SETLT; break;
3615 case ISD::UMAX: Pred = ISD::SETUGT; break;
3616 case ISD::UMIN: Pred = ISD::SETULT; break;
3617 }
3618 Tmp1 = Node->getOperand(0);
3619 Tmp2 = Node->getOperand(1);
3620 Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp1, Tmp2, Pred);
3621 Results.push_back(Tmp1);
3622 break;
3623 }
3624 case ISD::FMINNUM:
3625 case ISD::FMAXNUM: {
3626 if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG))
3627 Results.push_back(Expanded);
3628 break;
3629 }
3630 case ISD::FMINIMUM:
3631 case ISD::FMAXIMUM: {
3632 if (SDValue Expanded = TLI.expandFMINIMUM_FMAXIMUM(Node, DAG))
3633 Results.push_back(Expanded);
3634 break;
3635 }
3636 case ISD::FSIN:
3637 case ISD::FCOS: {
3638 EVT VT = Node->getValueType(0);
3639 // Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
3640 // fcos which share the same operand and both are used.
3641 if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
3643 && useSinCos(Node)) {
3644 SDVTList VTs = DAG.getVTList(VT, VT);
3645 Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
3646 if (Node->getOpcode() == ISD::FCOS)
3647 Tmp1 = Tmp1.getValue(1);
3648 Results.push_back(Tmp1);
3649 }
3650 break;
3651 }
3652 case ISD::FLDEXP:
3653 case ISD::STRICT_FLDEXP: {
3654 EVT VT = Node->getValueType(0);
3656 // Use the LibCall instead, it is very likely faster
3657 // FIXME: Use separate LibCall action.
3658 if (TLI.getLibcallName(LC))
3659 break;
3660
3661 if (SDValue Expanded = expandLdexp(Node)) {
3662 Results.push_back(Expanded);
3663 if (Node->getOpcode() == ISD::STRICT_FLDEXP)
3664 Results.push_back(Expanded.getValue(1));
3665 }
3666
3667 break;
3668 }
3669 case ISD::FFREXP: {
3670 RTLIB::Libcall LC = RTLIB::getFREXP(Node->getValueType(0));
3671 // Use the LibCall instead, it is very likely faster
3672 // FIXME: Use separate LibCall action.
3673 if (TLI.getLibcallName(LC))
3674 break;
3675
3676 if (SDValue Expanded = expandFrexp(Node)) {
3677 Results.push_back(Expanded);
3678 Results.push_back(Expanded.getValue(1));
3679 }
3680 break;
3681 }
3682 case ISD::FMAD:
3683 llvm_unreachable("Illegal fmad should never be formed");
3684
3685 case ISD::FP16_TO_FP:
3686 if (Node->getValueType(0) != MVT::f32) {
3687 // We can extend to types bigger than f32 in two steps without changing
3688 // the result. Since "f16 -> f32" is much more commonly available, give
3689 // CodeGen the option of emitting that before resorting to a libcall.
3690 SDValue Res =
3691 DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0));
3692 Results.push_back(
3693 DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res));
3694 }
3695 break;
3698 if (Node->getValueType(0) != MVT::f32) {
3699 // We can extend to types bigger than f32 in two steps without changing
3700 // the result. Since "f16 -> f32" is much more commonly available, give
3701 // CodeGen the option of emitting that before resorting to a libcall.
3702 SDValue Res = DAG.getNode(Node->getOpcode(), dl, {MVT::f32, MVT::Other},
3703 {Node->getOperand(0), Node->getOperand(1)});
3704 Res = DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
3705 {Node->getValueType(0), MVT::Other},
3706 {Res.getValue(1), Res});
3707 Results.push_back(Res);
3708 Results.push_back(Res.getValue(1));
3709 }
3710 break;
3711 case ISD::FP_TO_FP16:
3712 LLVM_DEBUG(dbgs() << "Legalizing FP_TO_FP16\n");
3713 if (!TLI.useSoftFloat() && TM.Options.UnsafeFPMath) {
3714 SDValue Op = Node->getOperand(0);
3715 MVT SVT = Op.getSimpleValueType();
3716 if ((SVT == MVT::f64 || SVT == MVT::f80) &&
3717 TLI.isOperationLegalOrCustom(ISD::FP_TO_FP16, MVT::f32)) {
3718 // Under fastmath, we can expand this node into a fround followed by
3719 // a float-half conversion.
3720 SDValue FloatVal =
3721 DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Op,
3722 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
3723 Results.push_back(
3724 DAG.getNode(ISD::FP_TO_FP16, dl, Node->getValueType(0), FloatVal));
3725 }
3726 }
3727 break;
3728 case ISD::ConstantFP: {
3729 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
3730 // Check to see if this FP immediate is already legal.
3731 // If this is a legal constant, turn it into a TargetConstantFP node.
3732 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0),
3733 DAG.shouldOptForSize()))
3734 Results.push_back(ExpandConstantFP(CFP, true));
3735 break;
3736 }
3737 case ISD::Constant: {
3738 ConstantSDNode *CP = cast<ConstantSDNode>(Node);
3739 Results.push_back(ExpandConstant(CP));
3740 break;
3741 }
3742 case ISD::FSUB: {
3743 EVT VT = Node->getValueType(0);
3744 if (TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
3745 TLI.isOperationLegalOrCustom(ISD::FNEG, VT)) {
3746 const SDNodeFlags Flags = Node->getFlags();
3747 Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
3748 Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1, Flags);
3749 Results.push_back(Tmp1);
3750 }
3751 break;
3752 }
3753 case ISD::SUB: {
3754 EVT VT = Node->getValueType(0);
3755 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
3756 TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
3757 "Don't know how to expand this subtraction!");
3758 Tmp1 = DAG.getNOT(dl, Node->getOperand(1), VT);
3759 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, dl, VT));
3760 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
3761 break;
3762 }
3763 case ISD::UREM:
3764 case ISD::SREM:
3765 if (TLI.expandREM(Node, Tmp1, DAG))
3766 Results.push_back(Tmp1);
3767 break;
3768 case ISD::UDIV:
3769 case ISD::SDIV: {
3770 bool isSigned = Node->getOpcode() == ISD::SDIV;
3771 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3772 EVT VT = Node->getValueType(0);
3773 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) {
3774 SDVTList VTs = DAG.getVTList(VT, VT);
3775 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
3776 Node->getOperand(1));
3777 Results.push_back(Tmp1);
3778 }
3779 break;
3780 }
3781 case ISD::MULHU:
3782 case ISD::MULHS: {
3783 unsigned ExpandOpcode =
3784 Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : ISD::SMUL_LOHI;
3785 EVT VT = Node->getValueType(0);
3786 SDVTList VTs = DAG.getVTList(VT, VT);
3787
3788 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0),
3789 Node->getOperand(1));
3790 Results.push_back(Tmp1.getValue(1));
3791 break;
3792 }
3793 case ISD::UMUL_LOHI:
3794 case ISD::SMUL_LOHI: {
3795 SDValue LHS = Node->getOperand(0);
3796 SDValue RHS = Node->getOperand(1);
3797 MVT VT = LHS.getSimpleValueType();
3798 unsigned MULHOpcode =
3799 Node->getOpcode() == ISD::UMUL_LOHI ? ISD::MULHU : ISD::MULHS;
3800
3801 if (TLI.isOperationLegalOrCustom(MULHOpcode, VT)) {
3802 Results.push_back(DAG.getNode(ISD::MUL, dl, VT, LHS, RHS));
3803 Results.push_back(DAG.getNode(MULHOpcode, dl, VT, LHS, RHS));
3804 break;
3805 }
3806
3808 EVT HalfType = EVT(VT).getHalfSizedIntegerVT(*DAG.getContext());
3809 assert(TLI.isTypeLegal(HalfType));
3810 if (TLI.expandMUL_LOHI(Node->getOpcode(), VT, dl, LHS, RHS, Halves,
3811 HalfType, DAG,
3812 TargetLowering::MulExpansionKind::Always)) {
3813 for (unsigned i = 0; i < 2; ++i) {
3814 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Halves[2 * i]);
3815 SDValue Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Halves[2 * i + 1]);
3816 SDValue Shift = DAG.getConstant(
3817 HalfType.getScalarSizeInBits(), dl,
3818 TLI.getShiftAmountTy(HalfType, DAG.getDataLayout()));
3819 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3820 Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
3821 }
3822 break;
3823 }
3824 break;
3825 }
3826 case ISD::MUL: {
3827 EVT VT = Node->getValueType(0);
3828 SDVTList VTs = DAG.getVTList(VT, VT);
3829 // See if multiply or divide can be lowered using two-result operations.
3830 // We just need the low half of the multiply; try both the signed
3831 // and unsigned forms. If the target supports both SMUL_LOHI and
3832 // UMUL_LOHI, form a preference by checking which forms of plain
3833 // MULH it supports.
3834 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT);
3835 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT);
3836 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT);
3837 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT);
3838 unsigned OpToUse = 0;
3839 if (HasSMUL_LOHI && !HasMULHS) {
3840 OpToUse = ISD::SMUL_LOHI;
3841 } else if (HasUMUL_LOHI && !HasMULHU) {
3842 OpToUse = ISD::UMUL_LOHI;
3843 } else if (HasSMUL_LOHI) {
3844 OpToUse = ISD::SMUL_LOHI;
3845 } else if (HasUMUL_LOHI) {
3846 OpToUse = ISD::UMUL_LOHI;
3847 }
3848 if (OpToUse) {
3849 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0),
3850 Node->getOperand(1)));
3851 break;
3852 }
3853
3854 SDValue Lo, Hi;
3855 EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext());
3856 if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) &&
3857 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) &&
3858 TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
3859 TLI.isOperationLegalOrCustom(ISD::OR, VT) &&
3860 TLI.expandMUL(Node, Lo, Hi, HalfType, DAG,
3861 TargetLowering::MulExpansionKind::OnlyLegalOrCustom)) {
3862 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
3863 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi);
3864 SDValue Shift =
3865 DAG.getConstant(HalfType.getSizeInBits(), dl,
3866 TLI.getShiftAmountTy(HalfType, DAG.getDataLayout()));
3867 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3868 Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
3869 }
3870 break;
3871 }
3872 case ISD::FSHL:
3873 case ISD::FSHR:
3874 if (SDValue Expanded = TLI.expandFunnelShift(Node, DAG))
3875 Results.push_back(Expanded);
3876 break;
3877 case ISD::ROTL:
3878 case ISD::ROTR:
3879 if (SDValue Expanded = TLI.expandROT(Node, true /*AllowVectorOps*/, DAG))
3880 Results.push_back(Expanded);
3881 break;
3882 case ISD::SADDSAT:
3883 case ISD::UADDSAT:
3884 case ISD::SSUBSAT:
3885 case ISD::USUBSAT:
3886 Results.push_back(TLI.expandAddSubSat(Node, DAG));
3887 break;
3888 case ISD::SSHLSAT:
3889 case ISD::USHLSAT:
3890 Results.push_back(TLI.expandShlSat(Node, DAG));
3891 break;
3892 case ISD::SMULFIX:
3893 case ISD::SMULFIXSAT:
3894 case ISD::UMULFIX:
3895 case ISD::UMULFIXSAT:
3896 Results.push_back(TLI.expandFixedPointMul(Node, DAG));
3897 break;
3898 case ISD::SDIVFIX:
3899 case ISD::SDIVFIXSAT:
3900 case ISD::UDIVFIX:
3901 case ISD::UDIVFIXSAT:
3902 if (SDValue V = TLI.expandFixedPointDiv(Node->getOpcode(), SDLoc(Node),
3903 Node->getOperand(0),
3904 Node->getOperand(1),
3905 Node->getConstantOperandVal(2),
3906 DAG)) {
3907 Results.push_back(V);
3908 break;
3909 }
3910 // FIXME: We might want to retry here with a wider type if we fail, if that
3911 // type is legal.
3912 // FIXME: Technically, so long as we only have sdivfixes where BW+Scale is
3913 // <= 128 (which is the case for all of the default Embedded-C types),
3914 // we will only get here with types and scales that we could always expand
3915 // if we were allowed to generate libcalls to division functions of illegal
3916 // type. But we cannot do that.
3917 llvm_unreachable("Cannot expand DIVFIX!");
3918 case ISD::UADDO_CARRY:
3919 case ISD::USUBO_CARRY: {
3920 SDValue LHS = Node->getOperand(0);
3921 SDValue RHS = Node->getOperand(1);
3922 SDValue Carry = Node->getOperand(2);
3923
3924 bool IsAdd = Node->getOpcode() == ISD::UADDO_CARRY;
3925
3926 // Initial add of the 2 operands.
3927 unsigned Op = IsAdd ? ISD::ADD : ISD::SUB;
3928 EVT VT = LHS.getValueType();
3929 SDValue Sum = DAG.getNode(Op, dl, VT, LHS, RHS);
3930
3931 // Initial check for overflow.
3932 EVT CarryType = Node->getValueType(1);
3933 EVT SetCCType = getSetCCResultType(Node->getValueType(0));
3935 SDValue Overflow = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC);
3936
3937 // Add of the sum and the carry.
3938 SDValue One = DAG.getConstant(1, dl, VT);
3939 SDValue CarryExt =
3940 DAG.getNode(ISD::AND, dl, VT, DAG.getZExtOrTrunc(Carry, dl, VT), One);
3941 SDValue Sum2 = DAG.getNode(Op, dl, VT, Sum, CarryExt);
3942
3943 // Second check for overflow. If we are adding, we can only overflow if the
3944 // initial sum is all 1s ang the carry is set, resulting in a new sum of 0.
3945 // If we are subtracting, we can only overflow if the initial sum is 0 and
3946 // the carry is set, resulting in a new sum of all 1s.
3947 SDValue Zero = DAG.getConstant(0, dl, VT);
3948 SDValue Overflow2 =
3949 IsAdd ? DAG.getSetCC(dl, SetCCType, Sum2, Zero, ISD::SETEQ)
3950 : DAG.getSetCC(dl, SetCCType, Sum, Zero, ISD::SETEQ);
3951 Overflow2 = DAG.getNode(ISD::AND, dl, SetCCType, Overflow2,
3952 DAG.getZExtOrTrunc(Carry, dl, SetCCType));
3953
3954 SDValue ResultCarry =
3955 DAG.getNode(ISD::OR, dl, SetCCType, Overflow, Overflow2);
3956
3957 Results.push_back(Sum2);
3958 Results.push_back(DAG.getBoolExtOrTrunc(ResultCarry, dl, CarryType, VT));
3959 break;
3960 }
3961 case ISD::SADDO:
3962 case ISD::SSUBO: {
3963 SDValue Result, Overflow;
3964 TLI.expandSADDSUBO(Node, Result, Overflow, DAG);
3965 Results.push_back(Result);
3966 Results.push_back(Overflow);
3967 break;
3968 }
3969 case ISD::UADDO:
3970 case ISD::USUBO: {
3971 SDValue Result, Overflow;
3972 TLI.expandUADDSUBO(Node, Result, Overflow, DAG);
3973 Results.push_back(Result);
3974 Results.push_back(Overflow);
3975 break;
3976 }
3977 case ISD::UMULO:
3978 case ISD::SMULO: {
3979 SDValue Result, Overflow;
3980 if (TLI.expandMULO(Node, Result, Overflow, DAG)) {
3981 Results.push_back(Result);
3982 Results.push_back(Overflow);
3983 }
3984 break;
3985 }
3986 case ISD::BUILD_PAIR: {
3987 EVT PairTy = Node->getValueType(0);
3988 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
3989 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
3990 Tmp2 = DAG.getNode(
3991 ISD::SHL, dl, PairTy, Tmp2,
3992 DAG.getConstant(PairTy.getSizeInBits() / 2, dl,
3993 TLI.getShiftAmountTy(PairTy, DAG.getDataLayout())));
3994 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
3995 break;
3996 }
3997 case ISD::SELECT:
3998 Tmp1 = Node->getOperand(0);
3999 Tmp2 = Node->getOperand(1);
4000 Tmp3 = Node->getOperand(2);
4001 if (Tmp1.getOpcode() == ISD::SETCC) {
4002 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1),
4003 Tmp2, Tmp3,
4004 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get());
4005 } else {
4006 Tmp1 = DAG.getSelectCC(dl, Tmp1,
4007 DAG.getConstant(0, dl, Tmp1.getValueType()),
4008 Tmp2, Tmp3, ISD::SETNE);
4009 }
4010 Tmp1->setFlags(Node->getFlags());
4011 Results.push_back(Tmp1);
4012 break;
4013 case ISD::BR_JT: {
4014 SDValue Chain = Node->getOperand(0);
4015 SDValue Table = Node->getOperand(1);
4016 SDValue Index = Node->getOperand(2);
4017 int JTI = cast<JumpTableSDNode>(Table.getNode())->getIndex();
4018
4019 const DataLayout &TD = DAG.getDataLayout();
4020 EVT PTy = TLI.getPointerTy(TD);
4021
4022 unsigned EntrySize =
4023 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
4024
4025 // For power-of-two jumptable entry sizes convert multiplication to a shift.
4026 // This transformation needs to be done here since otherwise the MIPS
4027 // backend will end up emitting a three instruction multiply sequence
4028 // instead of a single shift and MSP430 will call a runtime function.
4029 if (llvm::isPowerOf2_32(EntrySize))
4030 Index = DAG.getNode(
4031 ISD::SHL, dl, Index.getValueType(), Index,
4032 DAG.getConstant(llvm::Log2_32(EntrySize), dl, Index.getValueType()));
4033 else
4034 Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), Index,
4035 DAG.getConstant(EntrySize, dl, Index.getValueType()));
4036 SDValue Addr = DAG.getNode(ISD::ADD, dl, Index.getValueType(),
4037 Index, Table);
4038
4039 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
4040 SDValue LD = DAG.getExtLoad(
4041 ISD::SEXTLOAD, dl, PTy, Chain, Addr,
4042 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), MemVT);
4043 Addr = LD;
4044 if (TLI.isJumpTableRelative()) {
4045 // For PIC, the sequence is:
4046 // BRIND(load(Jumptable + index) + RelocBase)
4047 // RelocBase can be JumpTable, GOT or some sort of global base.
4048 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr,
4049 TLI.getPICJumpTableRelocBase(Table, DAG));
4050 }
4051
4052 Tmp1 = TLI.expandIndirectJTBranch(dl, LD.getValue(1), Addr, JTI, DAG);
4053 Results.push_back(Tmp1);
4054 break;
4055 }
4056 case ISD::BRCOND:
4057 // Expand brcond's setcc into its constituent parts and create a BR_CC
4058 // Node.
4059 Tmp1 = Node->getOperand(0);
4060 Tmp2 = Node->getOperand(1);
4061 if (Tmp2.getOpcode() == ISD::SETCC &&
4062 TLI.isOperationLegalOrCustom(ISD::BR_CC,
4063 Tmp2.getOperand(0).getValueType())) {
4064 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, Tmp2.getOperand(2),
4065 Tmp2.getOperand(0), Tmp2.getOperand(1),
4066 Node->getOperand(2));
4067 } else {
4068 // We test only the i1 bit. Skip the AND if UNDEF or another AND.
4069 if (Tmp2.isUndef() ||
4070 (Tmp2.getOpcode() == ISD::AND && isOneConstant(Tmp2.getOperand(1))))
4071 Tmp3 = Tmp2;
4072 else
4073 Tmp3 = DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2,
4074 DAG.getConstant(1, dl, Tmp2.getValueType()));
4075 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1,
4076 DAG.getCondCode(ISD::SETNE), Tmp3,
4077 DAG.getConstant(0, dl, Tmp3.getValueType()),
4078 Node->getOperand(2));
4079 }
4080 Results.push_back(Tmp1);
4081 break;
4082 case ISD::SETCC:
4083 case ISD::VP_SETCC:
4084 case ISD::STRICT_FSETCC:
4085 case ISD::STRICT_FSETCCS: {
4086 bool IsVP = Node->getOpcode() == ISD::VP_SETCC;
4087 bool IsStrict = Node->getOpcode() == ISD::STRICT_FSETCC ||
4088 Node->getOpcode() == ISD::STRICT_FSETCCS;
4089 bool IsSignaling = Node->getOpcode() == ISD::STRICT_FSETCCS;
4090 SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
4091 unsigned Offset = IsStrict ? 1 : 0;
4092 Tmp1 = Node->getOperand(0 + Offset);
4093 Tmp2 = Node->getOperand(1 + Offset);
4094 Tmp3 = Node->getOperand(2 + Offset);
4095 SDValue Mask, EVL;
4096 if (IsVP) {
4097 Mask = Node->getOperand(3 + Offset);
4098 EVL = Node->getOperand(4 + Offset);
4099 }
4100 bool Legalized = TLI.LegalizeSetCCCondCode(
4101 DAG, Node->getValueType(0), Tmp1, Tmp2, Tmp3, Mask, EVL, NeedInvert, dl,
4102 Chain, IsSignaling);
4103
4104 if (Legalized) {
4105 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
4106 // condition code, create a new SETCC node.
4107 if (Tmp3.getNode()) {
4108 if (IsStrict) {
4109 Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getVTList(),
4110 {Chain, Tmp1, Tmp2, Tmp3}, Node->getFlags());
4111 Chain = Tmp1.getValue(1);
4112 } else if (IsVP) {
4113 Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0),
4114 {Tmp1, Tmp2, Tmp3, Mask, EVL}, Node->getFlags());
4115 } else {
4116 Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Tmp1,
4117 Tmp2, Tmp3, Node->getFlags());
4118 }
4119 }
4120
4121 // If we expanded the SETCC by inverting the condition code, then wrap
4122 // the existing SETCC in a NOT to restore the intended condition.
4123 if (NeedInvert) {
4124 if (!IsVP)
4125 Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0));
4126 else
4127 Tmp1 =
4128 DAG.getVPLogicalNOT(dl, Tmp1, Mask, EVL, Tmp1->getValueType(0));
4129 }
4130
4131 Results.push_back(Tmp1);
4132 if (IsStrict)
4133 Results.push_back(Chain);
4134
4135 break;
4136 }
4137
4138 // FIXME: It seems Legalized is false iff CCCode is Legal. I don't
4139 // understand if this code is useful for strict nodes.
4140 assert(!IsStrict && "Don't know how to expand for strict nodes.");
4141
4142 // Otherwise, SETCC for the given comparison type must be completely
4143 // illegal; expand it into a SELECT_CC.
4144 // FIXME: This drops the mask/evl for VP_SETCC.
4145 EVT VT = Node->getValueType(0);
4146 EVT Tmp1VT = Tmp1.getValueType();
4147 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2,
4148 DAG.getBoolConstant(true, dl, VT, Tmp1VT),
4149 DAG.getBoolConstant(false, dl, VT, Tmp1VT), Tmp3);
4150 Tmp1->setFlags(Node->getFlags());
4151 Results.push_back(Tmp1);
4152 break;
4153 }
4154 case ISD::SELECT_CC: {
4155 // TODO: need to add STRICT_SELECT_CC and STRICT_SELECT_CCS
4156 Tmp1 = Node->getOperand(0); // LHS
4157 Tmp2 = Node->getOperand(1); // RHS
4158 Tmp3 = Node->getOperand(2); // True
4159 Tmp4 = Node->getOperand(3); // False
4160 EVT VT = Node->getValueType(0);
4161 SDValue Chain;
4162 SDValue CC = Node->getOperand(4);
4163 ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get();
4164
4165 if (TLI.isCondCodeLegalOrCustom(CCOp, Tmp1.getSimpleValueType())) {
4166 // If the condition code is legal, then we need to expand this
4167 // node using SETCC and SELECT.
4168 EVT CmpVT = Tmp1.getValueType();
4169 assert(!TLI.isOperationExpand(ISD::SELECT, VT) &&
4170 "Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be "
4171 "expanded.");
4172 EVT CCVT = getSetCCResultType(CmpVT);
4173 SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC, Node->getFlags());
4174 Results.push_back(
4175 DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4, Node->getFlags()));
4176 break;
4177 }
4178
4179 // SELECT_CC is legal, so the condition code must not be.
4180 bool Legalized = false;
4181 // Try to legalize by inverting the condition. This is for targets that
4182 // might support an ordered version of a condition, but not the unordered
4183 // version (or vice versa).
4184 ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp, Tmp1.getValueType());
4185 if (TLI.isCondCodeLegalOrCustom(InvCC, Tmp1.getSimpleValueType())) {
4186 // Use the new condition code and swap true and false
4187 Legalized = true;
4188 Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC);
4189 Tmp1->setFlags(Node->getFlags());
4190 } else {
4191 // If The inverse is not legal, then try to swap the arguments using
4192 // the inverse condition code.
4194 if (TLI.isCondCodeLegalOrCustom(SwapInvCC, Tmp1.getSimpleValueType())) {
4195 // The swapped inverse condition is legal, so swap true and false,
4196 // lhs and rhs.
4197 Legalized = true;
4198 Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC);
4199 Tmp1->setFlags(Node->getFlags());
4200 }
4201 }
4202
4203 if (!Legalized) {
4204 Legalized = TLI.LegalizeSetCCCondCode(
4205 DAG, getSetCCResultType(Tmp1.getValueType()), Tmp1, Tmp2, CC,
4206 /*Mask*/ SDValue(), /*EVL*/ SDValue(), NeedInvert, dl, Chain);
4207
4208 assert(Legalized && "Can't legalize SELECT_CC with legal condition!");
4209
4210 // If we expanded the SETCC by inverting the condition code, then swap
4211 // the True/False operands to match.
4212 if (NeedInvert)
4213 std::swap(Tmp3, Tmp4);
4214
4215 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
4216 // condition code, create a new SELECT_CC node.
4217 if (CC.getNode()) {
4218 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0),
4219 Tmp1, Tmp2, Tmp3, Tmp4, CC);
4220 } else {
4221 Tmp2 = DAG.getConstant(0, dl, Tmp1.getValueType());
4222 CC = DAG.getCondCode(ISD::SETNE);
4223 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1,
4224 Tmp2, Tmp3, Tmp4, CC);
4225 }
4226 Tmp1->setFlags(Node->getFlags());
4227 }
4228 Results.push_back(Tmp1);
4229 break;
4230 }
4231 case ISD::BR_CC: {
4232 // TODO: need to add STRICT_BR_CC and STRICT_BR_CCS
4233 SDValue Chain;
4234 Tmp1 = Node->getOperand(0); // Chain
4235 Tmp2 = Node->getOperand(2); // LHS
4236 Tmp3 = Node->getOperand(3); // RHS
4237 Tmp4 = Node->getOperand(1); // CC
4238
4239 bool Legalized = TLI.LegalizeSetCCCondCode(
4240 DAG, getSetCCResultType(Tmp2.getValueType()), Tmp2, Tmp3, Tmp4,
4241 /*Mask*/ SDValue(), /*EVL*/ SDValue(), NeedInvert, dl, Chain);
4242 (void)Legalized;
4243 assert(Legalized && "Can't legalize BR_CC with legal condition!");
4244
4245 // If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC
4246 // node.
4247 if (Tmp4.getNode()) {
4248 assert(!NeedInvert && "Don't know how to invert BR_CC!");
4249
4250 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1,
4251 Tmp4, Tmp2, Tmp3, Node->getOperand(4));
4252 } else {
4253 Tmp3 = DAG.getConstant(0, dl, Tmp2.getValueType());
4254 Tmp4 = DAG.getCondCode(NeedInvert ? ISD::SETEQ : ISD::SETNE);
4255 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4,
4256 Tmp2, Tmp3, Node->getOperand(4));
4257 }
4258 Results.push_back(Tmp1);
4259 break;
4260 }
4261 case ISD::BUILD_VECTOR:
4262 Results.push_back(ExpandBUILD_VECTOR(Node));
4263 break;
4264 case ISD::SPLAT_VECTOR:
4265 Results.push_back(ExpandSPLAT_VECTOR(Node));
4266 break;
4267 case ISD::SRA:
4268 case ISD::SRL:
4269 case ISD::SHL: {
4270 // Scalarize vector SRA/SRL/SHL.
4271 EVT VT = Node->getValueType(0);
4272 assert(VT.isVector() && "Unable to legalize non-vector shift");
4273 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
4274 unsigned NumElem = VT.getVectorNumElements();
4275
4277 for (unsigned Idx = 0; Idx < NumElem; Idx++) {
4278 SDValue Ex =
4280 Node->getOperand(0), DAG.getVectorIdxConstant(Idx, dl));
4281 SDValue Sh =
4283 Node->getOperand(1), DAG.getVectorIdxConstant(Idx, dl));
4284 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
4285 VT.getScalarType(), Ex, Sh));
4286 }
4287
4288 SDValue Result = DAG.getBuildVector(Node->getValueType(0), dl, Scalars);
4289 Results.push_back(Result);
4290 break;
4291 }
4294 case ISD::VECREDUCE_ADD:
4295 case ISD::VECREDUCE_MUL:
4296 case ISD::VECREDUCE_AND:
4297 case ISD::VECREDUCE_OR:
4298 case ISD::VECREDUCE_XOR:
4307 Results.push_back(TLI.expandVecReduce(Node, DAG));
4308 break;
4309 case ISD::VP_CTTZ_ELTS:
4310 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
4311 Results.push_back(TLI.expandVPCTTZElements(Node, DAG));
4312 break;
4313 case ISD::CLEAR_CACHE:
4314 // The default expansion of llvm.clear_cache is simply a no-op for those
4315 // targets where it is not needed.
4316 Results.push_back(Node->getOperand(0));
4317 break;
4319 case ISD::GlobalAddress:
4322 case ISD::ConstantPool:
4323 case ISD::JumpTable:
4327 // FIXME: Custom lowering for these operations shouldn't return null!
4328 // Return true so that we don't call ConvertNodeToLibcall which also won't
4329 // do anything.
4330 return true;
4331 }
4332
4333 if (!TLI.isStrictFPEnabled() && Results.empty() && Node->isStrictFPOpcode()) {
4334 // FIXME: We were asked to expand a strict floating-point operation,
4335 // but there is currently no expansion implemented that would preserve
4336 // the "strict" properties. For now, we just fall back to the non-strict
4337 // version if that is legal on the target. The actual mutation of the
4338 // operation will happen in SelectionDAGISel::DoInstructionSelection.
4339 switch (Node->getOpcode()) {
4340 default:
4341 if (TLI.getStrictFPOperationAction(Node->getOpcode(),
4342 Node->getValueType(0))
4343 == TargetLowering::Legal)
4344 return true;
4345 break;
4346 case ISD::STRICT_FSUB: {
4347 if (TLI.getStrictFPOperationAction(
4348 ISD::STRICT_FSUB, Node->getValueType(0)) == TargetLowering::Legal)
4349 return true;
4350 if (TLI.getStrictFPOperationAction(
4351 ISD::STRICT_FADD, Node->getValueType(0)) != TargetLowering::Legal)
4352 break;
4353
4354 EVT VT = Node->getValueType(0);
4355 const SDNodeFlags Flags = Node->getFlags();
4356 SDValue Neg = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(2), Flags);
4357 SDValue Fadd = DAG.getNode(ISD::STRICT_FADD, dl, Node->getVTList(),
4358 {Node->getOperand(0), Node->getOperand(1), Neg},
4359 Flags);
4360
4361 Results.push_back(Fadd);
4362 Results.push_back(Fadd.getValue(1));
4363 break;
4364 }
4367 case ISD::STRICT_LRINT:
4368 case ISD::STRICT_LLRINT:
4369 case ISD::STRICT_LROUND:
4371 // These are registered by the operand type instead of the value
4372 // type. Reflect that here.
4373 if (TLI.getStrictFPOperationAction(Node->getOpcode(),
4374 Node->getOperand(1).getValueType())
4375 == TargetLowering::Legal)
4376 return true;
4377 break;
4378 }
4379 }
4380
4381 // Replace the original node with the legalized result.
4382 if (Results.empty()) {
4383 LLVM_DEBUG(dbgs() << "Cannot expand node\n");
4384 return false;
4385 }
4386
4387 LLVM_DEBUG(dbgs() << "Successfully expanded node\n");
4388 ReplaceNode(Node, Results.data());
4389 return true;
4390}
4391
4392void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
4393 LLVM_DEBUG(dbgs() << "Trying to convert node to libcall\n");
4395 SDLoc dl(Node);
4396 // FIXME: Check flags on the node to see if we can use a finite call.
4397 unsigned Opc = Node->getOpcode();
4398 switch (Opc) {
4399 case ISD::ATOMIC_FENCE: {
4400 // If the target didn't lower this, lower it to '__sync_synchronize()' call
4401 // FIXME: handle "fence singlethread" more efficiently.
4403
4405 CLI.setDebugLoc(dl)
4406 .setChain(Node->getOperand(0))
4407 .setLibCallee(
4408 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
4409 DAG.getExternalSymbol("__sync_synchronize",
4410 TLI.getPointerTy(DAG.getDataLayout())),
4411 std::move(Args));
4412
4413 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
4414
4415 Results.push_back(CallResult.second);
4416 break;
4417 }
4418 // By default, atomic intrinsics are marked Legal and lowered. Targets
4419 // which don't support them directly, however, may want libcalls, in which
4420 // case they mark them Expand, and we get here.
4421 case ISD::ATOMIC_SWAP:
4433 case ISD::ATOMIC_CMP_SWAP: {
4434 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
4435 AtomicOrdering Order = cast<AtomicSDNode>(Node)->getMergedOrdering();
4436 RTLIB::Libcall LC = RTLIB::getOUTLINE_ATOMIC(Opc, Order, VT);
4437 EVT RetVT = Node->getValueType(0);
4440 if (TLI.getLibcallName(LC)) {
4441 // If outline atomic available, prepare its arguments and expand.
4442 Ops.append(Node->op_begin() + 2, Node->op_end());
4443 Ops.push_back(Node->getOperand(1));
4444
4445 } else {
4446 LC = RTLIB::getSYNC(Opc, VT);
4447 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
4448 "Unexpected atomic op or value type!");
4449 // Arguments for expansion to sync libcall
4450 Ops.append(Node->op_begin() + 1, Node->op_end());
4451 }
4452 std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT,
4453 Ops, CallOptions,
4454 SDLoc(Node),
4455 Node->getOperand(0));
4456 Results.push_back(Tmp.first);
4457 Results.push_back(Tmp.second);
4458 break;
4459 }
4460 case ISD::TRAP: {
4461 // If this operation is not supported, lower it to 'abort()' call
4464 CLI.setDebugLoc(dl)
4465 .setChain(Node->getOperand(0))
4466 .setLibCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
4467 DAG.getExternalSymbol(
4468 "abort", TLI.getPointerTy(DAG.getDataLayout())),
4469 std::move(Args));
4470 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
4471
4472 Results.push_back(CallResult.second);
4473 break;
4474 }
4475 case ISD::CLEAR_CACHE: {
4477 SDValue InputChain = Node->getOperand(0);
4478 SDValue StartVal = Node->getOperand(1);
4479 SDValue EndVal = Node->getOperand(2);
4480 std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(
4481 DAG, RTLIB::CLEAR_CACHE, MVT::isVoid, {StartVal, EndVal}, CallOptions,
4482 SDLoc(Node), InputChain);
4483 Results.push_back(Tmp.second);
4484 break;
4485 }
4486 case ISD::FMINNUM:
4488 ExpandFPLibCall(Node, RTLIB::FMIN_F32, RTLIB::FMIN_F64,
4489 RTLIB::FMIN_F80, RTLIB::FMIN_F128,
4490 RTLIB::FMIN_PPCF128, Results);
4491 break;
4492 // FIXME: We do not have libcalls for FMAXIMUM and FMINIMUM. So, we cannot use
4493 // libcall legalization for these nodes, but there is no default expasion for
4494 // these nodes either (see PR63267 for example).
4495 case ISD::FMAXNUM:
4497 ExpandFPLibCall(Node, RTLIB::FMAX_F32, RTLIB::FMAX_F64,
4498 RTLIB::FMAX_F80, RTLIB::FMAX_F128,
4499 RTLIB::FMAX_PPCF128, Results);
4500 break;
4501 case ISD::FSQRT:
4502 case ISD::STRICT_FSQRT:
4503 ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64,
4504 RTLIB::SQRT_F80, RTLIB::SQRT_F128,
4505 RTLIB::SQRT_PPCF128, Results);
4506 break;
4507 case ISD::FCBRT:
4508 ExpandFPLibCall(Node, RTLIB::CBRT_F32, RTLIB::CBRT_F64,
4509 RTLIB::CBRT_F80, RTLIB::CBRT_F128,
4510 RTLIB::CBRT_PPCF128, Results);
4511 break;
4512 case ISD::FSIN:
4513 case ISD::STRICT_FSIN:
4514 ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64,
4515 RTLIB::SIN_F80, RTLIB::SIN_F128,
4516 RTLIB::SIN_PPCF128, Results);
4517 break;
4518 case ISD::FCOS:
4519 case ISD::STRICT_FCOS:
4520 ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64,
4521 RTLIB::COS_F80, RTLIB::COS_F128,
4522 RTLIB::COS_PPCF128, Results);
4523 break;
4524 case ISD::FTAN:
4525 case ISD::STRICT_FTAN:
4526 ExpandFPLibCall(Node, RTLIB::TAN_F32, RTLIB::TAN_F64, RTLIB::TAN_F80,
4527 RTLIB::TAN_F128, RTLIB::TAN_PPCF128, Results);
4528 break;
4529 case ISD::FSINCOS:
4530 // Expand into sincos libcall.
4531 ExpandSinCosLibCall(Node, Results);
4532 break;
4533 case ISD::FLOG:
4534 case ISD::STRICT_FLOG:
4535 ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, RTLIB::LOG_F80,
4536 RTLIB::LOG_F128, RTLIB::LOG_PPCF128, Results);
4537 break;
4538 case ISD::FLOG2:
4539 case ISD::STRICT_FLOG2:
4540 ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, RTLIB::LOG2_F80,
4541 RTLIB::LOG2_F128, RTLIB::LOG2_PPCF128, Results);
4542 break;
4543 case ISD::FLOG10:
4544 case ISD::STRICT_FLOG10:
4545 ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, RTLIB::LOG10_F80,
4546 RTLIB::LOG10_F128, RTLIB::LOG10_PPCF128, Results);
4547 break;
4548 case ISD::FEXP:
4549 case ISD::STRICT_FEXP:
4550 ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, RTLIB::EXP_F80,
4551 RTLIB::EXP_F128, RTLIB::EXP_PPCF128, Results);
4552 break;
4553 case ISD::FEXP2:
4554 case ISD::STRICT_FEXP2:
4555 ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, RTLIB::EXP2_F80,
4556 RTLIB::EXP2_F128, RTLIB::EXP2_PPCF128, Results);
4557 break;
4558 case ISD::FEXP10:
4559 ExpandFPLibCall(Node, RTLIB::EXP10_F32, RTLIB::EXP10_F64, RTLIB::EXP10_F80,
4560 RTLIB::EXP10_F128, RTLIB::EXP10_PPCF128, Results);
4561 break;
4562 case ISD::FTRUNC:
4563 case ISD::STRICT_FTRUNC:
4564 ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64,
4565 RTLIB::TRUNC_F80, RTLIB::TRUNC_F128,
4566 RTLIB::TRUNC_PPCF128, Results);
4567 break;
4568 case ISD::FFLOOR:
4569 case ISD::STRICT_FFLOOR:
4570 ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64,
4571 RTLIB::FLOOR_F80, RTLIB::FLOOR_F128,
4572 RTLIB::FLOOR_PPCF128, Results);
4573 break;
4574 case ISD::FCEIL:
4575 case ISD::STRICT_FCEIL:
4576 ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64,
4577 RTLIB::CEIL_F80, RTLIB::CEIL_F128,
4578 RTLIB::CEIL_PPCF128, Results);
4579 break;
4580 case ISD::FRINT:
4581 case ISD::STRICT_FRINT:
4582 ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64,
4583 RTLIB::RINT_F80, RTLIB::RINT_F128,
4584 RTLIB::RINT_PPCF128, Results);
4585 break;
4586 case ISD::FNEARBYINT:
4588 ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32,
4589 RTLIB::NEARBYINT_F64,
4590 RTLIB::NEARBYINT_F80,
4591 RTLIB::NEARBYINT_F128,
4592 RTLIB::NEARBYINT_PPCF128, Results);
4593 break;
4594 case ISD::FROUND:
4595 case ISD::STRICT_FROUND:
4596 ExpandFPLibCall(Node, RTLIB::ROUND_F32,
4597 RTLIB::ROUND_F64,
4598 RTLIB::ROUND_F80,
4599 RTLIB::ROUND_F128,
4600 RTLIB::ROUND_PPCF128, Results);
4601 break;
4602 case ISD::FROUNDEVEN:
4604 ExpandFPLibCall(Node, RTLIB::ROUNDEVEN_F32,
4605 RTLIB::ROUNDEVEN_F64,
4606 RTLIB::ROUNDEVEN_F80,
4607 RTLIB::ROUNDEVEN_F128,
4608 RTLIB::ROUNDEVEN_PPCF128, Results);
4609 break;
4610 case ISD::FLDEXP:
4611 case ISD::STRICT_FLDEXP:
4612 ExpandFPLibCall(Node, RTLIB::LDEXP_F32, RTLIB::LDEXP_F64, RTLIB::LDEXP_F80,
4613 RTLIB::LDEXP_F128, RTLIB::LDEXP_PPCF128, Results);
4614 break;
4615 case ISD::FFREXP: {
4616 ExpandFrexpLibCall(Node, Results);
4617 break;
4618 }
4619 case ISD::FPOWI:
4620 case ISD::STRICT_FPOWI: {
4621 RTLIB::Libcall LC = RTLIB::getPOWI(Node->getSimpleValueType(0));
4622 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fpowi.");
4623 if (!TLI.getLibcallName(LC)) {
4624 // Some targets don't have a powi libcall; use pow instead.
4625 if (Node->isStrictFPOpcode()) {
4627 DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(Node),
4628 {Node->getValueType(0), Node->getValueType(1)},
4629 {Node->getOperand(0), Node->getOperand(2)});
4630 SDValue FPOW =
4631 DAG.getNode(ISD::STRICT_FPOW, SDLoc(Node),
4632 {Node->getValueType(0), Node->getValueType(1)},
4633 {Exponent.getValue(1), Node->getOperand(1), Exponent});
4634 Results.push_back(FPOW);
4635 Results.push_back(FPOW.getValue(1));
4636 } else {
4637