LLVM 20.0.0git
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsWebAssembly.h"
36#include "llvm/Support/Debug.h"
42using namespace llvm;
43
44#define DEBUG_TYPE "wasm-lower"
45
47 const TargetMachine &TM, const WebAssemblySubtarget &STI)
48 : TargetLowering(TM), Subtarget(&STI) {
49 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
50
51 // Booleans always contain 0 or 1.
53 // Except in SIMD vectors
55 // We don't know the microarchitecture here, so just reduce register pressure.
57 // Tell ISel that we have a stack pointer.
59 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
60 // Set up the register classes.
61 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
62 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
63 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
64 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
65 if (Subtarget->hasSIMD128()) {
66 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
69 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
70 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
71 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
72 }
73 if (Subtarget->hasHalfPrecision()) {
74 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass);
75 }
76 if (Subtarget->hasReferenceTypes()) {
77 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
78 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
79 if (Subtarget->hasExceptionHandling()) {
80 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass);
81 }
82 }
83 // Compute derived properties from the register classes.
85
86 // Transform loads and stores to pointers in address space 1 to loads and
87 // stores to WebAssembly global variables, outside linear memory.
88 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
91 }
92 if (Subtarget->hasSIMD128()) {
93 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
94 MVT::v2f64}) {
97 }
98 }
99 if (Subtarget->hasReferenceTypes()) {
100 // We need custom load and store lowering for both externref, funcref and
101 // Other. The MVT::Other here represents tables of reference types.
102 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
105 }
106 }
107
115
116 // Take the default expansion for va_arg, va_copy, and va_end. There is no
117 // default action for va_start, so we do that custom.
122
123 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
124 // Don't expand the floating-point types to constant pools.
126 // Expand floating-point comparisons.
130 // Expand floating-point library function operators.
131 for (auto Op :
134 // Note supported floating-point library function operators that otherwise
135 // default to expand.
139 // Support minimum and maximum, which otherwise default to expand.
142 // WebAssembly currently has no builtin f16 support.
146 setTruncStoreAction(T, MVT::f16, Expand);
147 }
148
149 if (Subtarget->hasHalfPrecision()) {
152 }
153
154 // Expand unavailable integer operations.
155 for (auto Op :
159 for (auto T : {MVT::i32, MVT::i64})
161 if (Subtarget->hasSIMD128())
162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
164 }
165
166 if (Subtarget->hasNontrappingFPToInt())
168 for (auto T : {MVT::i32, MVT::i64})
170
171 // SIMD-specific configuration
172 if (Subtarget->hasSIMD128()) {
173 // Combine vector mask reductions into alltrue/anytrue
175
176 // Convert vector to integer bitcasts to bitmask
178
179 // Hoist bitcasts out of shuffles
181
182 // Combine extends of extract_subvectors into widening ops
184
185 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
186 // conversions ops
189
190 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
191 // into conversion ops
194
196
197 // Support saturating add for i8x16 and i16x8
198 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
199 for (auto T : {MVT::v16i8, MVT::v8i16})
201
202 // Support integer abs
203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
205
206 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
207 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
208 MVT::v2f64})
210
211 // We have custom shuffle lowering to expose the shuffle mask
212 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
213 MVT::v2f64})
215
216 // Support splatting
217 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
218 MVT::v2f64})
220
221 // Custom lowering since wasm shifts must have a scalar shift amount
222 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
225
226 // Custom lower lane accesses to expand out variable indices
228 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
229 MVT::v2f64})
231
232 // There is no i8x16.mul instruction
233 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
234
235 // There is no vector conditional select instruction
236 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
237 MVT::v2f64})
239
240 // Expand integer operations supported for scalars but not SIMD
241 for (auto Op :
243 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
245
246 // But we do have integer min and max operations
247 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
248 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
250
251 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
252 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
253 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
254 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
255
256 // Custom lower bit counting operations for other types to scalarize them.
257 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
258 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
260
261 // Expand float operations supported for scalars but not SIMD
264 for (auto T : {MVT::v4f32, MVT::v2f64})
266
267 // Unsigned comparison operations are unavailable for i64x2 vectors.
269 setCondCodeAction(CC, MVT::v2i64, Custom);
270
271 // 64x2 conversions are not in the spec
272 for (auto Op :
274 for (auto T : {MVT::v2i64, MVT::v2f64})
276
277 // But saturating fp_to_int converstions are
279 setOperationAction(Op, MVT::v4i32, Custom);
280
281 // Support vector extending
285 }
286 }
287
288 // As a special case, these operators use the type to mean the type to
289 // sign-extend from.
291 if (!Subtarget->hasSignExt()) {
292 // Sign extends are legal only when extending a vector extract
293 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
294 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
296 }
299
300 // Dynamic stack allocation: use the default expansion.
304
308
309 // Expand these forms; we pattern-match the forms that we can handle in isel.
310 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
311 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
313
314 // We have custom switch handling.
316
317 // WebAssembly doesn't have:
318 // - Floating-point extending loads.
319 // - Floating-point truncating stores.
320 // - i1 extending loads.
321 // - truncating SIMD stores and most extending loads
322 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
323 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
324 for (auto T : MVT::integer_valuetypes())
325 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
326 setLoadExtAction(Ext, T, MVT::i1, Promote);
327 if (Subtarget->hasSIMD128()) {
328 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
329 MVT::v2f64}) {
330 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
331 if (MVT(T) != MemT) {
333 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
334 setLoadExtAction(Ext, T, MemT, Expand);
335 }
336 }
337 }
338 // But some vector extending loads are legal
339 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
340 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
341 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
342 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
343 }
344 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
345 }
346
347 // Don't do anything clever with build_pairs
349
350 // Trap lowers to wasm unreachable
351 setOperationAction(ISD::TRAP, MVT::Other, Legal);
353
354 // Exception handling intrinsics
358
360
361 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
362 // consistent with the f64 and f128 names.
363 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
364 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
365
366 // Define the emscripten name for return address helper.
367 // TODO: when implementing other Wasm backends, make this generic or only do
368 // this on emscripten depending on what they end up doing.
369 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
370
371 // Always convert switches to br_tables unless there is only one case, which
372 // is equivalent to a simple branch. This reduces code size for wasm, and we
373 // defer possible jump table optimizations to the VM.
375}
376
378 uint32_t AS) const {
380 return MVT::externref;
382 return MVT::funcref;
384}
385
387 uint32_t AS) const {
389 return MVT::externref;
391 return MVT::funcref;
393}
394
396WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
397 // We have wasm instructions for these
398 switch (AI->getOperation()) {
406 default:
407 break;
408 }
410}
411
412bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
413 // Implementation copied from X86TargetLowering.
414 unsigned Opc = VecOp.getOpcode();
415
416 // Assume target opcodes can't be scalarized.
417 // TODO - do we have any exceptions?
418 if (Opc >= ISD::BUILTIN_OP_END)
419 return false;
420
421 // If the vector op is not supported, try to convert to scalar.
422 EVT VecVT = VecOp.getValueType();
423 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
424 return true;
425
426 // If the vector op is supported, but the scalar op is not, the transform may
427 // not be worthwhile.
428 EVT ScalarVT = VecVT.getScalarType();
429 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
430}
431
432FastISel *WebAssemblyTargetLowering::createFastISel(
433 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
434 return WebAssembly::createFastISel(FuncInfo, LibInfo);
435}
436
437MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
438 EVT VT) const {
439 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
440 if (BitWidth > 1 && BitWidth < 8)
441 BitWidth = 8;
442
443 if (BitWidth > 64) {
444 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
445 // the count to be an i32.
446 BitWidth = 32;
448 "32-bit shift counts ought to be enough for anyone");
449 }
450
453 "Unable to represent scalar shift amount type");
454 return Result;
455}
456
457// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
458// undefined result on invalid/overflow, to the WebAssembly opcode, which
459// traps on invalid/overflow.
462 const TargetInstrInfo &TII,
463 bool IsUnsigned, bool Int64,
464 bool Float64, unsigned LoweredOpcode) {
466
467 Register OutReg = MI.getOperand(0).getReg();
468 Register InReg = MI.getOperand(1).getReg();
469
470 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
471 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
472 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
473 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
474 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
475 unsigned Eqz = WebAssembly::EQZ_I32;
476 unsigned And = WebAssembly::AND_I32;
477 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
478 int64_t Substitute = IsUnsigned ? 0 : Limit;
479 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
480 auto &Context = BB->getParent()->getFunction().getContext();
481 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
482
483 const BasicBlock *LLVMBB = BB->getBasicBlock();
484 MachineFunction *F = BB->getParent();
485 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
486 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
487 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
488
490 F->insert(It, FalseMBB);
491 F->insert(It, TrueMBB);
492 F->insert(It, DoneMBB);
493
494 // Transfer the remainder of BB and its successor edges to DoneMBB.
495 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
497
498 BB->addSuccessor(TrueMBB);
499 BB->addSuccessor(FalseMBB);
500 TrueMBB->addSuccessor(DoneMBB);
501 FalseMBB->addSuccessor(DoneMBB);
502
503 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
504 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
505 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
506 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
507 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
508 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
509 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
510
511 MI.eraseFromParent();
512 // For signed numbers, we can do a single comparison to determine whether
513 // fabs(x) is within range.
514 if (IsUnsigned) {
515 Tmp0 = InReg;
516 } else {
517 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
518 }
519 BuildMI(BB, DL, TII.get(FConst), Tmp1)
520 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
521 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
522
523 // For unsigned numbers, we have to do a separate comparison with zero.
524 if (IsUnsigned) {
525 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
526 Register SecondCmpReg =
527 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
528 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
529 BuildMI(BB, DL, TII.get(FConst), Tmp1)
530 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
531 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
532 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
533 CmpReg = AndReg;
534 }
535
536 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
537
538 // Create the CFG diamond to select between doing the conversion or using
539 // the substitute value.
540 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
541 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
542 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
543 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
544 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
545 .addReg(FalseReg)
546 .addMBB(FalseMBB)
547 .addReg(TrueReg)
548 .addMBB(TrueMBB);
549
550 return DoneMBB;
551}
552
553static MachineBasicBlock *
555 const WebAssemblySubtarget *Subtarget,
556 const TargetInstrInfo &TII) {
557 MachineInstr &CallParams = *CallResults.getPrevNode();
558 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
559 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
560 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
561
562 bool IsIndirect =
563 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
564 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
565
566 bool IsFuncrefCall = false;
567 if (IsIndirect && CallParams.getOperand(0).isReg()) {
568 Register Reg = CallParams.getOperand(0).getReg();
569 const MachineFunction *MF = BB->getParent();
570 const MachineRegisterInfo &MRI = MF->getRegInfo();
571 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
572 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
573 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
574 }
575
576 unsigned CallOp;
577 if (IsIndirect && IsRetCall) {
578 CallOp = WebAssembly::RET_CALL_INDIRECT;
579 } else if (IsIndirect) {
580 CallOp = WebAssembly::CALL_INDIRECT;
581 } else if (IsRetCall) {
582 CallOp = WebAssembly::RET_CALL;
583 } else {
584 CallOp = WebAssembly::CALL;
585 }
586
587 MachineFunction &MF = *BB->getParent();
588 const MCInstrDesc &MCID = TII.get(CallOp);
589 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
590
591 // Move the function pointer to the end of the arguments for indirect calls
592 if (IsIndirect) {
593 auto FnPtr = CallParams.getOperand(0);
594 CallParams.removeOperand(0);
595
596 // For funcrefs, call_indirect is done through __funcref_call_table and the
597 // funcref is always installed in slot 0 of the table, therefore instead of
598 // having the function pointer added at the end of the params list, a zero
599 // (the index in
600 // __funcref_call_table is added).
601 if (IsFuncrefCall) {
602 Register RegZero =
603 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
604 MachineInstrBuilder MIBC0 =
605 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
606
607 BB->insert(CallResults.getIterator(), MIBC0);
608 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
609 } else
610 CallParams.addOperand(FnPtr);
611 }
612
613 for (auto Def : CallResults.defs())
614 MIB.add(Def);
615
616 if (IsIndirect) {
617 // Placeholder for the type index.
618 MIB.addImm(0);
619 // The table into which this call_indirect indexes.
620 MCSymbolWasm *Table = IsFuncrefCall
622 MF.getContext(), Subtarget)
624 MF.getContext(), Subtarget);
625 if (Subtarget->hasReferenceTypes()) {
626 MIB.addSym(Table);
627 } else {
628 // For the MVP there is at most one table whose number is 0, but we can't
629 // write a table symbol or issue relocations. Instead we just ensure the
630 // table is live and write a zero.
631 Table->setNoStrip();
632 MIB.addImm(0);
633 }
634 }
635
636 for (auto Use : CallParams.uses())
637 MIB.add(Use);
638
639 BB->insert(CallResults.getIterator(), MIB);
640 CallParams.eraseFromParent();
641 CallResults.eraseFromParent();
642
643 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
644 // table slot with ref.null upon call_indirect return.
645 //
646 // This generates the following code, which comes right after a call_indirect
647 // of a funcref:
648 //
649 // i32.const 0
650 // ref.null func
651 // table.set __funcref_call_table
652 if (IsIndirect && IsFuncrefCall) {
654 MF.getContext(), Subtarget);
655 Register RegZero =
656 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
657 MachineInstr *Const0 =
658 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
659 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
660
661 Register RegFuncref =
662 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
663 MachineInstr *RefNull =
664 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
665 BB->insertAfter(Const0->getIterator(), RefNull);
666
667 MachineInstr *TableSet =
668 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
669 .addSym(Table)
670 .addReg(RegZero)
671 .addReg(RegFuncref);
672 BB->insertAfter(RefNull->getIterator(), TableSet);
673 }
674
675 return BB;
676}
677
678MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
679 MachineInstr &MI, MachineBasicBlock *BB) const {
680 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
681 DebugLoc DL = MI.getDebugLoc();
682
683 switch (MI.getOpcode()) {
684 default:
685 llvm_unreachable("Unexpected instr type to insert");
686 case WebAssembly::FP_TO_SINT_I32_F32:
687 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
688 WebAssembly::I32_TRUNC_S_F32);
689 case WebAssembly::FP_TO_UINT_I32_F32:
690 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
691 WebAssembly::I32_TRUNC_U_F32);
692 case WebAssembly::FP_TO_SINT_I64_F32:
693 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
694 WebAssembly::I64_TRUNC_S_F32);
695 case WebAssembly::FP_TO_UINT_I64_F32:
696 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
697 WebAssembly::I64_TRUNC_U_F32);
698 case WebAssembly::FP_TO_SINT_I32_F64:
699 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
700 WebAssembly::I32_TRUNC_S_F64);
701 case WebAssembly::FP_TO_UINT_I32_F64:
702 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
703 WebAssembly::I32_TRUNC_U_F64);
704 case WebAssembly::FP_TO_SINT_I64_F64:
705 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
706 WebAssembly::I64_TRUNC_S_F64);
707 case WebAssembly::FP_TO_UINT_I64_F64:
708 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
709 WebAssembly::I64_TRUNC_U_F64);
710 case WebAssembly::CALL_RESULTS:
711 case WebAssembly::RET_CALL_RESULTS:
712 return LowerCallResults(MI, DL, BB, Subtarget, TII);
713 }
714}
715
716const char *
717WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
718 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
721 break;
722#define HANDLE_NODETYPE(NODE) \
723 case WebAssemblyISD::NODE: \
724 return "WebAssemblyISD::" #NODE;
725#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
726#include "WebAssemblyISD.def"
727#undef HANDLE_MEM_NODETYPE
728#undef HANDLE_NODETYPE
729 }
730 return nullptr;
731}
732
733std::pair<unsigned, const TargetRegisterClass *>
734WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
735 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
736 // First, see if this is a constraint that directly corresponds to a
737 // WebAssembly register class.
738 if (Constraint.size() == 1) {
739 switch (Constraint[0]) {
740 case 'r':
741 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
742 if (Subtarget->hasSIMD128() && VT.isVector()) {
743 if (VT.getSizeInBits() == 128)
744 return std::make_pair(0U, &WebAssembly::V128RegClass);
745 }
746 if (VT.isInteger() && !VT.isVector()) {
747 if (VT.getSizeInBits() <= 32)
748 return std::make_pair(0U, &WebAssembly::I32RegClass);
749 if (VT.getSizeInBits() <= 64)
750 return std::make_pair(0U, &WebAssembly::I64RegClass);
751 }
752 if (VT.isFloatingPoint() && !VT.isVector()) {
753 switch (VT.getSizeInBits()) {
754 case 32:
755 return std::make_pair(0U, &WebAssembly::F32RegClass);
756 case 64:
757 return std::make_pair(0U, &WebAssembly::F64RegClass);
758 default:
759 break;
760 }
761 }
762 break;
763 default:
764 break;
765 }
766 }
767
769}
770
771bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
772 // Assume ctz is a relatively cheap operation.
773 return true;
774}
775
776bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
777 // Assume clz is a relatively cheap operation.
778 return true;
779}
780
781bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
782 const AddrMode &AM,
783 Type *Ty, unsigned AS,
784 Instruction *I) const {
785 // WebAssembly offsets are added as unsigned without wrapping. The
786 // isLegalAddressingMode gives us no way to determine if wrapping could be
787 // happening, so we approximate this by accepting only non-negative offsets.
788 if (AM.BaseOffs < 0)
789 return false;
790
791 // WebAssembly has no scale register operands.
792 if (AM.Scale != 0)
793 return false;
794
795 // Everything else is legal.
796 return true;
797}
798
799bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
800 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
801 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
802 // WebAssembly supports unaligned accesses, though it should be declared
803 // with the p2align attribute on loads and stores which do so, and there
804 // may be a performance impact. We tell LLVM they're "fast" because
805 // for the kinds of things that LLVM uses this for (merging adjacent stores
806 // of constants, etc.), WebAssembly implementations will either want the
807 // unaligned access or they'll split anyway.
808 if (Fast)
809 *Fast = 1;
810 return true;
811}
812
813bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
814 AttributeList Attr) const {
815 // The current thinking is that wasm engines will perform this optimization,
816 // so we can save on code size.
817 return true;
818}
819
820bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
821 EVT ExtT = ExtVal.getValueType();
822 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
823 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
824 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
825 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
826}
827
828bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
829 const GlobalAddressSDNode *GA) const {
830 // Wasm doesn't support function addresses with offsets
831 const GlobalValue *GV = GA->getGlobal();
832 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
833}
834
835bool WebAssemblyTargetLowering::shouldSinkOperands(
836 Instruction *I, SmallVectorImpl<Use *> &Ops) const {
837 using namespace llvm::PatternMatch;
838
839 if (!I->getType()->isVectorTy() || !I->isShift())
840 return false;
841
842 Value *V = I->getOperand(1);
843 // We dont need to sink constant splat.
844 if (dyn_cast<Constant>(V))
845 return false;
846
848 m_Value(), m_ZeroMask()))) {
849 // Sink insert
850 Ops.push_back(&cast<Instruction>(V)->getOperandUse(0));
851 // Sink shuffle
852 Ops.push_back(&I->getOperandUse(1));
853 return true;
854 }
855
856 return false;
857}
858
859EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
860 LLVMContext &C,
861 EVT VT) const {
862 if (VT.isVector())
864
865 // So far, all branch instructions in Wasm take an I32 condition.
866 // The default TargetLowering::getSetCCResultType returns the pointer size,
867 // which would be useful to reduce instruction counts when testing
868 // against 64-bit pointers/values if at some point Wasm supports that.
869 return EVT::getIntegerVT(C, 32);
870}
871
872bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
873 const CallInst &I,
874 MachineFunction &MF,
875 unsigned Intrinsic) const {
876 switch (Intrinsic) {
877 case Intrinsic::wasm_memory_atomic_notify:
879 Info.memVT = MVT::i32;
880 Info.ptrVal = I.getArgOperand(0);
881 Info.offset = 0;
882 Info.align = Align(4);
883 // atomic.notify instruction does not really load the memory specified with
884 // this argument, but MachineMemOperand should either be load or store, so
885 // we set this to a load.
886 // FIXME Volatile isn't really correct, but currently all LLVM atomic
887 // instructions are treated as volatiles in the backend, so we should be
888 // consistent. The same applies for wasm_atomic_wait intrinsics too.
890 return true;
891 case Intrinsic::wasm_memory_atomic_wait32:
893 Info.memVT = MVT::i32;
894 Info.ptrVal = I.getArgOperand(0);
895 Info.offset = 0;
896 Info.align = Align(4);
898 return true;
899 case Intrinsic::wasm_memory_atomic_wait64:
901 Info.memVT = MVT::i64;
902 Info.ptrVal = I.getArgOperand(0);
903 Info.offset = 0;
904 Info.align = Align(8);
906 return true;
907 case Intrinsic::wasm_loadf16_f32:
909 Info.memVT = MVT::f16;
910 Info.ptrVal = I.getArgOperand(0);
911 Info.offset = 0;
912 Info.align = Align(2);
914 return true;
915 case Intrinsic::wasm_storef16_f32:
917 Info.memVT = MVT::f16;
918 Info.ptrVal = I.getArgOperand(1);
919 Info.offset = 0;
920 Info.align = Align(2);
922 return true;
923 default:
924 return false;
925 }
926}
927
928void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
929 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
930 const SelectionDAG &DAG, unsigned Depth) const {
931 switch (Op.getOpcode()) {
932 default:
933 break;
935 unsigned IntNo = Op.getConstantOperandVal(0);
936 switch (IntNo) {
937 default:
938 break;
939 case Intrinsic::wasm_bitmask: {
940 unsigned BitWidth = Known.getBitWidth();
941 EVT VT = Op.getOperand(1).getSimpleValueType();
942 unsigned PossibleBits = VT.getVectorNumElements();
943 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
944 Known.Zero |= ZeroMask;
945 break;
946 }
947 }
948 }
949 }
950}
951
953WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
954 if (VT.isFixedLengthVector()) {
955 MVT EltVT = VT.getVectorElementType();
956 // We have legal vector types with these lane types, so widening the
957 // vector would let us use some of the lanes directly without having to
958 // extend or truncate values.
959 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
960 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
961 return TypeWidenVector;
962 }
963
965}
966
967bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
968 SDValue Op, const TargetLoweringOpt &TLO) const {
969 // ISel process runs DAGCombiner after legalization; this step is called
970 // SelectionDAG optimization phase. This post-legalization combining process
971 // runs DAGCombiner on each node, and if there was a change to be made,
972 // re-runs legalization again on it and its user nodes to make sure
973 // everythiing is in a legalized state.
974 //
975 // The legalization calls lowering routines, and we do our custom lowering for
976 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
977 // into zeros. But there is a set of routines in DAGCombiner that turns unused
978 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
979 // turns unused vector elements into undefs. But this routine does not work
980 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
981 // combination can result in a infinite loop, in which undefs are converted to
982 // zeros in legalization and back to undefs in combining.
983 //
984 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
985 // running for build_vectors.
986 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
987 return false;
988 return true;
989}
990
991//===----------------------------------------------------------------------===//
992// WebAssembly Lowering private implementation.
993//===----------------------------------------------------------------------===//
994
995//===----------------------------------------------------------------------===//
996// Lowering Code
997//===----------------------------------------------------------------------===//
998
999static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
1001 DAG.getContext()->diagnose(
1002 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
1003}
1004
1005// Test whether the given calling convention is supported.
1007 // We currently support the language-independent target-independent
1008 // conventions. We don't yet have a way to annotate calls with properties like
1009 // "cold", and we don't have any call-clobbered registers, so these are mostly
1010 // all handled the same.
1011 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
1012 CallConv == CallingConv::Cold ||
1013 CallConv == CallingConv::PreserveMost ||
1014 CallConv == CallingConv::PreserveAll ||
1015 CallConv == CallingConv::CXX_FAST_TLS ||
1017 CallConv == CallingConv::Swift;
1018}
1019
1020SDValue
1021WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1022 SmallVectorImpl<SDValue> &InVals) const {
1023 SelectionDAG &DAG = CLI.DAG;
1024 SDLoc DL = CLI.DL;
1025 SDValue Chain = CLI.Chain;
1026 SDValue Callee = CLI.Callee;
1028 auto Layout = MF.getDataLayout();
1029
1030 CallingConv::ID CallConv = CLI.CallConv;
1031 if (!callingConvSupported(CallConv))
1032 fail(DL, DAG,
1033 "WebAssembly doesn't support language-specific or target-specific "
1034 "calling conventions yet");
1035 if (CLI.IsPatchPoint)
1036 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
1037
1038 if (CLI.IsTailCall) {
1039 auto NoTail = [&](const char *Msg) {
1040 if (CLI.CB && CLI.CB->isMustTailCall())
1041 fail(DL, DAG, Msg);
1042 CLI.IsTailCall = false;
1043 };
1044
1045 if (!Subtarget->hasTailCall())
1046 NoTail("WebAssembly 'tail-call' feature not enabled");
1047
1048 // Varargs calls cannot be tail calls because the buffer is on the stack
1049 if (CLI.IsVarArg)
1050 NoTail("WebAssembly does not support varargs tail calls");
1051
1052 // Do not tail call unless caller and callee return types match
1053 const Function &F = MF.getFunction();
1055 Type *RetTy = F.getReturnType();
1056 SmallVector<MVT, 4> CallerRetTys;
1057 SmallVector<MVT, 4> CalleeRetTys;
1058 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
1059 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
1060 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1061 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1062 CalleeRetTys.begin());
1063 if (!TypesMatch)
1064 NoTail("WebAssembly tail call requires caller and callee return types to "
1065 "match");
1066
1067 // If pointers to local stack values are passed, we cannot tail call
1068 if (CLI.CB) {
1069 for (auto &Arg : CLI.CB->args()) {
1070 Value *Val = Arg.get();
1071 // Trace the value back through pointer operations
1072 while (true) {
1073 Value *Src = Val->stripPointerCastsAndAliases();
1074 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
1075 Src = GEP->getPointerOperand();
1076 if (Val == Src)
1077 break;
1078 Val = Src;
1079 }
1080 if (isa<AllocaInst>(Val)) {
1081 NoTail(
1082 "WebAssembly does not support tail calling with stack arguments");
1083 break;
1084 }
1085 }
1086 }
1087 }
1088
1090 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1091 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1092
1093 // The generic code may have added an sret argument. If we're lowering an
1094 // invoke function, the ABI requires that the function pointer be the first
1095 // argument, so we may have to swap the arguments.
1096 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1097 Outs[0].Flags.isSRet()) {
1098 std::swap(Outs[0], Outs[1]);
1099 std::swap(OutVals[0], OutVals[1]);
1100 }
1101
1102 bool HasSwiftSelfArg = false;
1103 bool HasSwiftErrorArg = false;
1104 unsigned NumFixedArgs = 0;
1105 for (unsigned I = 0; I < Outs.size(); ++I) {
1106 const ISD::OutputArg &Out = Outs[I];
1107 SDValue &OutVal = OutVals[I];
1108 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1109 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1110 if (Out.Flags.isNest())
1111 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1112 if (Out.Flags.isInAlloca())
1113 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1114 if (Out.Flags.isInConsecutiveRegs())
1115 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1117 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1118 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1119 auto &MFI = MF.getFrameInfo();
1120 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1122 /*isSS=*/false);
1123 SDValue SizeNode =
1124 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1125 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1126 Chain = DAG.getMemcpy(Chain, DL, FINode, OutVal, SizeNode,
1128 /*isVolatile*/ false, /*AlwaysInline=*/false,
1129 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
1131 OutVal = FINode;
1132 }
1133 // Count the number of fixed args *after* legalization.
1134 NumFixedArgs += Out.IsFixed;
1135 }
1136
1137 bool IsVarArg = CLI.IsVarArg;
1138 auto PtrVT = getPointerTy(Layout);
1139
1140 // For swiftcc, emit additional swiftself and swifterror arguments
1141 // if there aren't. These additional arguments are also added for callee
1142 // signature They are necessary to match callee and caller signature for
1143 // indirect call.
1144 if (CallConv == CallingConv::Swift) {
1145 if (!HasSwiftSelfArg) {
1146 NumFixedArgs++;
1147 ISD::OutputArg Arg;
1148 Arg.Flags.setSwiftSelf();
1149 CLI.Outs.push_back(Arg);
1150 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1151 CLI.OutVals.push_back(ArgVal);
1152 }
1153 if (!HasSwiftErrorArg) {
1154 NumFixedArgs++;
1155 ISD::OutputArg Arg;
1156 Arg.Flags.setSwiftError();
1157 CLI.Outs.push_back(Arg);
1158 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1159 CLI.OutVals.push_back(ArgVal);
1160 }
1161 }
1162
1163 // Analyze operands of the call, assigning locations to each operand.
1165 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1166
1167 if (IsVarArg) {
1168 // Outgoing non-fixed arguments are placed in a buffer. First
1169 // compute their offsets and the total amount of buffer space needed.
1170 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1171 const ISD::OutputArg &Out = Outs[I];
1172 SDValue &Arg = OutVals[I];
1173 EVT VT = Arg.getValueType();
1174 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1175 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1176 Align Alignment =
1177 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1178 unsigned Offset =
1179 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1180 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1181 Offset, VT.getSimpleVT(),
1183 }
1184 }
1185
1186 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1187
1188 SDValue FINode;
1189 if (IsVarArg && NumBytes) {
1190 // For non-fixed arguments, next emit stores to store the argument values
1191 // to the stack buffer at the offsets computed above.
1192 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1193 Layout.getStackAlignment(),
1194 /*isSS=*/false);
1195 unsigned ValNo = 0;
1197 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1198 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1199 "ArgLocs should remain in order and only hold varargs args");
1200 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1201 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1202 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1203 DAG.getConstant(Offset, DL, PtrVT));
1204 Chains.push_back(
1205 DAG.getStore(Chain, DL, Arg, Add,
1207 }
1208 if (!Chains.empty())
1209 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1210 } else if (IsVarArg) {
1211 FINode = DAG.getIntPtrConstant(0, DL);
1212 }
1213
1214 if (Callee->getOpcode() == ISD::GlobalAddress) {
1215 // If the callee is a GlobalAddress node (quite common, every direct call
1216 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1217 // doesn't at MO_GOT which is not needed for direct calls.
1218 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
1221 GA->getOffset());
1222 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1223 getPointerTy(DAG.getDataLayout()), Callee);
1224 }
1225
1226 // Compute the operands for the CALLn node.
1228 Ops.push_back(Chain);
1229 Ops.push_back(Callee);
1230
1231 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1232 // isn't reliable.
1233 Ops.append(OutVals.begin(),
1234 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1235 // Add a pointer to the vararg buffer.
1236 if (IsVarArg)
1237 Ops.push_back(FINode);
1238
1239 SmallVector<EVT, 8> InTys;
1240 for (const auto &In : Ins) {
1241 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1242 assert(!In.Flags.isNest() && "nest is not valid for return values");
1243 if (In.Flags.isInAlloca())
1244 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1245 if (In.Flags.isInConsecutiveRegs())
1246 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1247 if (In.Flags.isInConsecutiveRegsLast())
1248 fail(DL, DAG,
1249 "WebAssembly hasn't implemented cons regs last return values");
1250 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1251 // registers.
1252 InTys.push_back(In.VT);
1253 }
1254
1255 // Lastly, if this is a call to a funcref we need to add an instruction
1256 // table.set to the chain and transform the call.
1258 CLI.CB->getCalledOperand()->getType())) {
1259 // In the absence of function references proposal where a funcref call is
1260 // lowered to call_ref, using reference types we generate a table.set to set
1261 // the funcref to a special table used solely for this purpose, followed by
1262 // a call_indirect. Here we just generate the table set, and return the
1263 // SDValue of the table.set so that LowerCall can finalize the lowering by
1264 // generating the call_indirect.
1265 SDValue Chain = Ops[0];
1266
1268 MF.getContext(), Subtarget);
1269 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1270 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1271 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1272 SDValue TableSet = DAG.getMemIntrinsicNode(
1273 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1274 MVT::funcref,
1275 // Machine Mem Operand args
1278 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1280
1281 Ops[0] = TableSet; // The new chain is the TableSet itself
1282 }
1283
1284 if (CLI.IsTailCall) {
1285 // ret_calls do not return values to the current frame
1286 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1287 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1288 }
1289
1290 InTys.push_back(MVT::Other);
1291 SDVTList InTyList = DAG.getVTList(InTys);
1292 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1293
1294 for (size_t I = 0; I < Ins.size(); ++I)
1295 InVals.push_back(Res.getValue(I));
1296
1297 // Return the chain
1298 return Res.getValue(Ins.size());
1299}
1300
1301bool WebAssemblyTargetLowering::CanLowerReturn(
1302 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1304 LLVMContext & /*Context*/) const {
1305 // WebAssembly can only handle returning tuples with multivalue enabled
1306 return WebAssembly::canLowerReturn(Outs.size(), Subtarget);
1307}
1308
1309SDValue WebAssemblyTargetLowering::LowerReturn(
1310 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1312 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1313 SelectionDAG &DAG) const {
1314 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) &&
1315 "MVP WebAssembly can only return up to one value");
1316 if (!callingConvSupported(CallConv))
1317 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1318
1319 SmallVector<SDValue, 4> RetOps(1, Chain);
1320 RetOps.append(OutVals.begin(), OutVals.end());
1321 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1322
1323 // Record the number and types of the return values.
1324 for (const ISD::OutputArg &Out : Outs) {
1325 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1326 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1327 assert(Out.IsFixed && "non-fixed return value is not valid");
1328 if (Out.Flags.isInAlloca())
1329 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1330 if (Out.Flags.isInConsecutiveRegs())
1331 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1333 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1334 }
1335
1336 return Chain;
1337}
1338
1339SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1340 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1341 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1342 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1343 if (!callingConvSupported(CallConv))
1344 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1345
1347 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1348
1349 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1350 // of the incoming values before they're represented by virtual registers.
1351 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1352
1353 bool HasSwiftErrorArg = false;
1354 bool HasSwiftSelfArg = false;
1355 for (const ISD::InputArg &In : Ins) {
1356 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1357 HasSwiftErrorArg |= In.Flags.isSwiftError();
1358 if (In.Flags.isInAlloca())
1359 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1360 if (In.Flags.isNest())
1361 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1362 if (In.Flags.isInConsecutiveRegs())
1363 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1364 if (In.Flags.isInConsecutiveRegsLast())
1365 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1366 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1367 // registers.
1368 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1369 DAG.getTargetConstant(InVals.size(),
1370 DL, MVT::i32))
1371 : DAG.getUNDEF(In.VT));
1372
1373 // Record the number and types of arguments.
1374 MFI->addParam(In.VT);
1375 }
1376
1377 // For swiftcc, emit additional swiftself and swifterror arguments
1378 // if there aren't. These additional arguments are also added for callee
1379 // signature They are necessary to match callee and caller signature for
1380 // indirect call.
1381 auto PtrVT = getPointerTy(MF.getDataLayout());
1382 if (CallConv == CallingConv::Swift) {
1383 if (!HasSwiftSelfArg) {
1384 MFI->addParam(PtrVT);
1385 }
1386 if (!HasSwiftErrorArg) {
1387 MFI->addParam(PtrVT);
1388 }
1389 }
1390 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1391 // the buffer is passed as an argument.
1392 if (IsVarArg) {
1393 MVT PtrVT = getPointerTy(MF.getDataLayout());
1394 Register VarargVreg =
1396 MFI->setVarargBufferVreg(VarargVreg);
1397 Chain = DAG.getCopyToReg(
1398 Chain, DL, VarargVreg,
1399 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1400 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1401 MFI->addParam(PtrVT);
1402 }
1403
1404 // Record the number and types of arguments and results.
1405 SmallVector<MVT, 4> Params;
1408 MF.getFunction(), DAG.getTarget(), Params, Results);
1409 for (MVT VT : Results)
1410 MFI->addResult(VT);
1411 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1412 // the param logic here with ComputeSignatureVTs
1413 assert(MFI->getParams().size() == Params.size() &&
1414 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1415 Params.begin()));
1416
1417 return Chain;
1418}
1419
1420void WebAssemblyTargetLowering::ReplaceNodeResults(
1422 switch (N->getOpcode()) {
1424 // Do not add any results, signifying that N should not be custom lowered
1425 // after all. This happens because simd128 turns on custom lowering for
1426 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1427 // illegal type.
1428 break;
1431 // Do not add any results, signifying that N should not be custom lowered.
1432 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1433 break;
1434 default:
1436 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1437 }
1438}
1439
1440//===----------------------------------------------------------------------===//
1441// Custom lowering hooks.
1442//===----------------------------------------------------------------------===//
1443
1444SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1445 SelectionDAG &DAG) const {
1446 SDLoc DL(Op);
1447 switch (Op.getOpcode()) {
1448 default:
1449 llvm_unreachable("unimplemented operation lowering");
1450 return SDValue();
1451 case ISD::FrameIndex:
1452 return LowerFrameIndex(Op, DAG);
1453 case ISD::GlobalAddress:
1454 return LowerGlobalAddress(Op, DAG);
1456 return LowerGlobalTLSAddress(Op, DAG);
1458 return LowerExternalSymbol(Op, DAG);
1459 case ISD::JumpTable:
1460 return LowerJumpTable(Op, DAG);
1461 case ISD::BR_JT:
1462 return LowerBR_JT(Op, DAG);
1463 case ISD::VASTART:
1464 return LowerVASTART(Op, DAG);
1465 case ISD::BlockAddress:
1466 case ISD::BRIND:
1467 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1468 return SDValue();
1469 case ISD::RETURNADDR:
1470 return LowerRETURNADDR(Op, DAG);
1471 case ISD::FRAMEADDR:
1472 return LowerFRAMEADDR(Op, DAG);
1473 case ISD::CopyToReg:
1474 return LowerCopyToReg(Op, DAG);
1477 return LowerAccessVectorElement(Op, DAG);
1481 return LowerIntrinsic(Op, DAG);
1483 return LowerSIGN_EXTEND_INREG(Op, DAG);
1486 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1487 case ISD::BUILD_VECTOR:
1488 return LowerBUILD_VECTOR(Op, DAG);
1490 return LowerVECTOR_SHUFFLE(Op, DAG);
1491 case ISD::SETCC:
1492 return LowerSETCC(Op, DAG);
1493 case ISD::SHL:
1494 case ISD::SRA:
1495 case ISD::SRL:
1496 return LowerShift(Op, DAG);
1499 return LowerFP_TO_INT_SAT(Op, DAG);
1500 case ISD::LOAD:
1501 return LowerLoad(Op, DAG);
1502 case ISD::STORE:
1503 return LowerStore(Op, DAG);
1504 case ISD::CTPOP:
1505 case ISD::CTLZ:
1506 case ISD::CTTZ:
1507 return DAG.UnrollVectorOp(Op.getNode());
1508 case ISD::CLEAR_CACHE:
1509 report_fatal_error("llvm.clear_cache is not supported on wasm");
1510 }
1511}
1512
1514 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1516
1517 return false;
1518}
1519
1520static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1521 SelectionDAG &DAG) {
1522 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1523 if (!FI)
1524 return std::nullopt;
1525
1526 auto &MF = DAG.getMachineFunction();
1528}
1529
1530SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1531 SelectionDAG &DAG) const {
1532 SDLoc DL(Op);
1533 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1534 const SDValue &Value = SN->getValue();
1535 const SDValue &Base = SN->getBasePtr();
1536 const SDValue &Offset = SN->getOffset();
1537
1539 if (!Offset->isUndef())
1540 report_fatal_error("unexpected offset when storing to webassembly global",
1541 false);
1542
1543 SDVTList Tys = DAG.getVTList(MVT::Other);
1544 SDValue Ops[] = {SN->getChain(), Value, Base};
1545 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1546 SN->getMemoryVT(), SN->getMemOperand());
1547 }
1548
1549 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1550 if (!Offset->isUndef())
1551 report_fatal_error("unexpected offset when storing to webassembly local",
1552 false);
1553
1554 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1555 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1556 SDValue Ops[] = {SN->getChain(), Idx, Value};
1557 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1558 }
1559
1562 "Encountered an unlowerable store to the wasm_var address space",
1563 false);
1564
1565 return Op;
1566}
1567
1568SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1569 SelectionDAG &DAG) const {
1570 SDLoc DL(Op);
1571 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1572 const SDValue &Base = LN->getBasePtr();
1573 const SDValue &Offset = LN->getOffset();
1574
1576 if (!Offset->isUndef())
1578 "unexpected offset when loading from webassembly global", false);
1579
1580 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1581 SDValue Ops[] = {LN->getChain(), Base};
1582 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1583 LN->getMemoryVT(), LN->getMemOperand());
1584 }
1585
1586 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1587 if (!Offset->isUndef())
1589 "unexpected offset when loading from webassembly local", false);
1590
1591 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1592 EVT LocalVT = LN->getValueType(0);
1593 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1594 {LN->getChain(), Idx});
1595 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1596 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1597 return Result;
1598 }
1599
1602 "Encountered an unlowerable load from the wasm_var address space",
1603 false);
1604
1605 return Op;
1606}
1607
1608SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1609 SelectionDAG &DAG) const {
1610 SDValue Src = Op.getOperand(2);
1611 if (isa<FrameIndexSDNode>(Src.getNode())) {
1612 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1613 // the FI to some LEA-like instruction, but since we don't have that, we
1614 // need to insert some kind of instruction that can take an FI operand and
1615 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1616 // local.copy between Op and its FI operand.
1617 SDValue Chain = Op.getOperand(0);
1618 SDLoc DL(Op);
1619 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1620 EVT VT = Src.getValueType();
1621 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1622 : WebAssembly::COPY_I64,
1623 DL, VT, Src),
1624 0);
1625 return Op.getNode()->getNumValues() == 1
1626 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1627 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1628 Op.getNumOperands() == 4 ? Op.getOperand(3)
1629 : SDValue());
1630 }
1631 return SDValue();
1632}
1633
1634SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1635 SelectionDAG &DAG) const {
1636 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1637 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1638}
1639
1640SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1641 SelectionDAG &DAG) const {
1642 SDLoc DL(Op);
1643
1644 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1645 fail(DL, DAG,
1646 "Non-Emscripten WebAssembly hasn't implemented "
1647 "__builtin_return_address");
1648 return SDValue();
1649 }
1650
1652 return SDValue();
1653
1654 unsigned Depth = Op.getConstantOperandVal(0);
1655 MakeLibCallOptions CallOptions;
1656 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1657 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1658 .first;
1659}
1660
1661SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1662 SelectionDAG &DAG) const {
1663 // Non-zero depths are not supported by WebAssembly currently. Use the
1664 // legalizer's default expansion, which is to return 0 (what this function is
1665 // documented to do).
1666 if (Op.getConstantOperandVal(0) > 0)
1667 return SDValue();
1668
1670 EVT VT = Op.getValueType();
1671 Register FP =
1673 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1674}
1675
1676SDValue
1677WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1678 SelectionDAG &DAG) const {
1679 SDLoc DL(Op);
1680 const auto *GA = cast<GlobalAddressSDNode>(Op);
1681
1684 report_fatal_error("cannot use thread-local storage without bulk memory",
1685 false);
1686
1687 const GlobalValue *GV = GA->getGlobal();
1688
1689 // Currently only Emscripten supports dynamic linking with threads. Therefore,
1690 // on other targets, if we have thread-local storage, only the local-exec
1691 // model is possible.
1692 auto model = Subtarget->getTargetTriple().isOSEmscripten()
1693 ? GV->getThreadLocalMode()
1695
1696 // Unsupported TLS modes
1699
1700 if (model == GlobalValue::LocalExecTLSModel ||
1703 getTargetMachine().shouldAssumeDSOLocal(GV))) {
1704 // For DSO-local TLS variables we use offset from __tls_base
1705
1706 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1707 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1708 : WebAssembly::GLOBAL_GET_I32;
1709 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1710
1712 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1713 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1714 0);
1715
1716 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1717 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1718 SDValue SymOffset =
1719 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1720
1721 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1722 }
1723
1725
1726 EVT VT = Op.getValueType();
1727 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1728 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1729 GA->getOffset(),
1731}
1732
1733SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1734 SelectionDAG &DAG) const {
1735 SDLoc DL(Op);
1736 const auto *GA = cast<GlobalAddressSDNode>(Op);
1737 EVT VT = Op.getValueType();
1738 assert(GA->getTargetFlags() == 0 &&
1739 "Unexpected target flags on generic GlobalAddressSDNode");
1741 fail(DL, DAG, "Invalid address space for WebAssembly target");
1742
1743 unsigned OperandFlags = 0;
1744 const GlobalValue *GV = GA->getGlobal();
1745 // Since WebAssembly tables cannot yet be shared accross modules, we don't
1746 // need special treatment for tables in PIC mode.
1747 if (isPositionIndependent() &&
1749 if (getTargetMachine().shouldAssumeDSOLocal(GV)) {
1751 MVT PtrVT = getPointerTy(MF.getDataLayout());
1752 const char *BaseName;
1753 if (GV->getValueType()->isFunctionTy()) {
1754 BaseName = MF.createExternalSymbolName("__table_base");
1756 } else {
1757 BaseName = MF.createExternalSymbolName("__memory_base");
1759 }
1761 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1762 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1763
1764 SDValue SymAddr = DAG.getNode(
1765 WebAssemblyISD::WrapperREL, DL, VT,
1766 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1767 OperandFlags));
1768
1769 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1770 }
1772 }
1773
1774 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1775 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1776 GA->getOffset(), OperandFlags));
1777}
1778
1779SDValue
1780WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1781 SelectionDAG &DAG) const {
1782 SDLoc DL(Op);
1783 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1784 EVT VT = Op.getValueType();
1785 assert(ES->getTargetFlags() == 0 &&
1786 "Unexpected target flags on generic ExternalSymbolSDNode");
1787 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1788 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1789}
1790
1791SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1792 SelectionDAG &DAG) const {
1793 // There's no need for a Wrapper node because we always incorporate a jump
1794 // table operand into a BR_TABLE instruction, rather than ever
1795 // materializing it in a register.
1796 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1797 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1798 JT->getTargetFlags());
1799}
1800
1801SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1802 SelectionDAG &DAG) const {
1803 SDLoc DL(Op);
1804 SDValue Chain = Op.getOperand(0);
1805 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1806 SDValue Index = Op.getOperand(2);
1807 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1808
1810 Ops.push_back(Chain);
1811 Ops.push_back(Index);
1812
1814 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1815
1816 // Add an operand for each case.
1817 for (auto *MBB : MBBs)
1818 Ops.push_back(DAG.getBasicBlock(MBB));
1819
1820 // Add the first MBB as a dummy default target for now. This will be replaced
1821 // with the proper default target (and the preceding range check eliminated)
1822 // if possible by WebAssemblyFixBrTableDefaults.
1823 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1824 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1825}
1826
1827SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1828 SelectionDAG &DAG) const {
1829 SDLoc DL(Op);
1831
1833 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1834
1835 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1836 MFI->getVarargBufferVreg(), PtrVT);
1837 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1838 MachinePointerInfo(SV));
1839}
1840
1841SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1842 SelectionDAG &DAG) const {
1844 unsigned IntNo;
1845 switch (Op.getOpcode()) {
1848 IntNo = Op.getConstantOperandVal(1);
1849 break;
1851 IntNo = Op.getConstantOperandVal(0);
1852 break;
1853 default:
1854 llvm_unreachable("Invalid intrinsic");
1855 }
1856 SDLoc DL(Op);
1857
1858 switch (IntNo) {
1859 default:
1860 return SDValue(); // Don't custom lower most intrinsics.
1861
1862 case Intrinsic::wasm_lsda: {
1863 auto PtrVT = getPointerTy(MF.getDataLayout());
1864 const char *SymName = MF.createExternalSymbolName(
1865 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
1866 if (isPositionIndependent()) {
1868 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
1869 const char *BaseName = MF.createExternalSymbolName("__memory_base");
1871 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1872 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1873 SDValue SymAddr =
1874 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
1875 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1876 }
1877 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
1878 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
1879 }
1880
1881 case Intrinsic::wasm_shuffle: {
1882 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1883 SDValue Ops[18];
1884 size_t OpIdx = 0;
1885 Ops[OpIdx++] = Op.getOperand(1);
1886 Ops[OpIdx++] = Op.getOperand(2);
1887 while (OpIdx < 18) {
1888 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1889 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
1890 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
1891 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
1892 } else {
1893 Ops[OpIdx++] = MaskIdx;
1894 }
1895 }
1896 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1897 }
1898 }
1899}
1900
1901SDValue
1902WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1903 SelectionDAG &DAG) const {
1904 SDLoc DL(Op);
1905 // If sign extension operations are disabled, allow sext_inreg only if operand
1906 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1907 // extension operations, but allowing sext_inreg in this context lets us have
1908 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1909 // everywhere would be simpler in this file, but would necessitate large and
1910 // brittle patterns to undo the expansion and select extract_lane_s
1911 // instructions.
1912 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1913 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1914 return SDValue();
1915
1916 const SDValue &Extract = Op.getOperand(0);
1917 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1918 if (VecT.getVectorElementType().getSizeInBits() > 32)
1919 return SDValue();
1920 MVT ExtractedLaneT =
1921 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1922 MVT ExtractedVecT =
1923 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1924 if (ExtractedVecT == VecT)
1925 return Op;
1926
1927 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1928 const SDNode *Index = Extract.getOperand(1).getNode();
1929 if (!isa<ConstantSDNode>(Index))
1930 return SDValue();
1931 unsigned IndexVal = Index->getAsZExtVal();
1932 unsigned Scale =
1933 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1934 assert(Scale > 1);
1935 SDValue NewIndex =
1936 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1937 SDValue NewExtract = DAG.getNode(
1939 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1940 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1941 Op.getOperand(1));
1942}
1943
1944SDValue
1945WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
1946 SelectionDAG &DAG) const {
1947 SDLoc DL(Op);
1948 EVT VT = Op.getValueType();
1949 SDValue Src = Op.getOperand(0);
1950 EVT SrcVT = Src.getValueType();
1951
1952 if (SrcVT.getVectorElementType() == MVT::i1 ||
1953 SrcVT.getVectorElementType() == MVT::i64)
1954 return SDValue();
1955
1956 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
1957 "Unexpected extension factor.");
1958 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
1959
1960 if (Scale != 2 && Scale != 4 && Scale != 8)
1961 return SDValue();
1962
1963 unsigned Ext;
1964 switch (Op.getOpcode()) {
1966 Ext = WebAssemblyISD::EXTEND_LOW_U;
1967 break;
1969 Ext = WebAssemblyISD::EXTEND_LOW_S;
1970 break;
1971 }
1972
1973 SDValue Ret = Src;
1974 while (Scale != 1) {
1975 Ret = DAG.getNode(Ext, DL,
1976 Ret.getValueType()
1977 .widenIntegerVectorElementType(*DAG.getContext())
1978 .getHalfNumVectorElementsVT(*DAG.getContext()),
1979 Ret);
1980 Scale /= 2;
1981 }
1982 assert(Ret.getValueType() == VT);
1983 return Ret;
1984}
1985
1987 SDLoc DL(Op);
1988 if (Op.getValueType() != MVT::v2f64)
1989 return SDValue();
1990
1991 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
1992 unsigned &Index) -> bool {
1993 switch (Op.getOpcode()) {
1994 case ISD::SINT_TO_FP:
1995 Opcode = WebAssemblyISD::CONVERT_LOW_S;
1996 break;
1997 case ISD::UINT_TO_FP:
1998 Opcode = WebAssemblyISD::CONVERT_LOW_U;
1999 break;
2000 case ISD::FP_EXTEND:
2001 Opcode = WebAssemblyISD::PROMOTE_LOW;
2002 break;
2003 default:
2004 return false;
2005 }
2006
2007 auto ExtractVector = Op.getOperand(0);
2008 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2009 return false;
2010
2011 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2012 return false;
2013
2014 SrcVec = ExtractVector.getOperand(0);
2015 Index = ExtractVector.getConstantOperandVal(1);
2016 return true;
2017 };
2018
2019 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2020 SDValue LHSSrcVec, RHSSrcVec;
2021 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2022 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2023 return SDValue();
2024
2025 if (LHSOpcode != RHSOpcode)
2026 return SDValue();
2027
2028 MVT ExpectedSrcVT;
2029 switch (LHSOpcode) {
2030 case WebAssemblyISD::CONVERT_LOW_S:
2031 case WebAssemblyISD::CONVERT_LOW_U:
2032 ExpectedSrcVT = MVT::v4i32;
2033 break;
2034 case WebAssemblyISD::PROMOTE_LOW:
2035 ExpectedSrcVT = MVT::v4f32;
2036 break;
2037 }
2038 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2039 return SDValue();
2040
2041 auto Src = LHSSrcVec;
2042 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2043 // Shuffle the source vector so that the converted lanes are the low lanes.
2044 Src = DAG.getVectorShuffle(
2045 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2046 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2047 }
2048 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2049}
2050
2051SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2052 SelectionDAG &DAG) const {
2053 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2054 return ConvertLow;
2055
2056 SDLoc DL(Op);
2057 const EVT VecT = Op.getValueType();
2058 const EVT LaneT = Op.getOperand(0).getValueType();
2059 const size_t Lanes = Op.getNumOperands();
2060 bool CanSwizzle = VecT == MVT::v16i8;
2061
2062 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2063 // possible number of lanes at once followed by a sequence of replace_lane
2064 // instructions to individually initialize any remaining lanes.
2065
2066 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2067 // swizzled lanes should be given greater weight.
2068
2069 // TODO: Investigate looping rather than always extracting/replacing specific
2070 // lanes to fill gaps.
2071
2072 auto IsConstant = [](const SDValue &V) {
2073 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2074 };
2075
2076 // Returns the source vector and index vector pair if they exist. Checks for:
2077 // (extract_vector_elt
2078 // $src,
2079 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2080 // )
2081 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2082 auto Bail = std::make_pair(SDValue(), SDValue());
2083 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2084 return Bail;
2085 const SDValue &SwizzleSrc = Lane->getOperand(0);
2086 const SDValue &IndexExt = Lane->getOperand(1);
2087 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2088 return Bail;
2089 const SDValue &Index = IndexExt->getOperand(0);
2090 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2091 return Bail;
2092 const SDValue &SwizzleIndices = Index->getOperand(0);
2093 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2094 SwizzleIndices.getValueType() != MVT::v16i8 ||
2095 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2096 Index->getConstantOperandVal(1) != I)
2097 return Bail;
2098 return std::make_pair(SwizzleSrc, SwizzleIndices);
2099 };
2100
2101 // If the lane is extracted from another vector at a constant index, return
2102 // that vector. The source vector must not have more lanes than the dest
2103 // because the shufflevector indices are in terms of the destination lanes and
2104 // would not be able to address the smaller individual source lanes.
2105 auto GetShuffleSrc = [&](const SDValue &Lane) {
2106 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2107 return SDValue();
2108 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2109 return SDValue();
2110 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2111 VecT.getVectorNumElements())
2112 return SDValue();
2113 return Lane->getOperand(0);
2114 };
2115
2116 using ValueEntry = std::pair<SDValue, size_t>;
2117 SmallVector<ValueEntry, 16> SplatValueCounts;
2118
2119 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2120 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2121
2122 using ShuffleEntry = std::pair<SDValue, size_t>;
2123 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2124
2125 auto AddCount = [](auto &Counts, const auto &Val) {
2126 auto CountIt =
2127 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2128 if (CountIt == Counts.end()) {
2129 Counts.emplace_back(Val, 1);
2130 } else {
2131 CountIt->second++;
2132 }
2133 };
2134
2135 auto GetMostCommon = [](auto &Counts) {
2136 auto CommonIt =
2137 std::max_element(Counts.begin(), Counts.end(), llvm::less_second());
2138 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2139 return *CommonIt;
2140 };
2141
2142 size_t NumConstantLanes = 0;
2143
2144 // Count eligible lanes for each type of vector creation op
2145 for (size_t I = 0; I < Lanes; ++I) {
2146 const SDValue &Lane = Op->getOperand(I);
2147 if (Lane.isUndef())
2148 continue;
2149
2150 AddCount(SplatValueCounts, Lane);
2151
2152 if (IsConstant(Lane))
2153 NumConstantLanes++;
2154 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2155 AddCount(ShuffleCounts, ShuffleSrc);
2156 if (CanSwizzle) {
2157 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2158 if (SwizzleSrcs.first)
2159 AddCount(SwizzleCounts, SwizzleSrcs);
2160 }
2161 }
2162
2163 SDValue SplatValue;
2164 size_t NumSplatLanes;
2165 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2166
2167 SDValue SwizzleSrc;
2168 SDValue SwizzleIndices;
2169 size_t NumSwizzleLanes = 0;
2170 if (SwizzleCounts.size())
2171 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2172 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2173
2174 // Shuffles can draw from up to two vectors, so find the two most common
2175 // sources.
2176 SDValue ShuffleSrc1, ShuffleSrc2;
2177 size_t NumShuffleLanes = 0;
2178 if (ShuffleCounts.size()) {
2179 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2180 llvm::erase_if(ShuffleCounts,
2181 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2182 }
2183 if (ShuffleCounts.size()) {
2184 size_t AdditionalShuffleLanes;
2185 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2186 GetMostCommon(ShuffleCounts);
2187 NumShuffleLanes += AdditionalShuffleLanes;
2188 }
2189
2190 // Predicate returning true if the lane is properly initialized by the
2191 // original instruction
2192 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2194 // Prefer swizzles over shuffles over vector consts over splats
2195 if (NumSwizzleLanes >= NumShuffleLanes &&
2196 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2197 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2198 SwizzleIndices);
2199 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2200 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2201 return Swizzled == GetSwizzleSrcs(I, Lane);
2202 };
2203 } else if (NumShuffleLanes >= NumConstantLanes &&
2204 NumShuffleLanes >= NumSplatLanes) {
2205 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2206 size_t DestLaneCount = VecT.getVectorNumElements();
2207 size_t Scale1 = 1;
2208 size_t Scale2 = 1;
2209 SDValue Src1 = ShuffleSrc1;
2210 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2211 if (Src1.getValueType() != VecT) {
2212 size_t LaneSize =
2214 assert(LaneSize > DestLaneSize);
2215 Scale1 = LaneSize / DestLaneSize;
2216 Src1 = DAG.getBitcast(VecT, Src1);
2217 }
2218 if (Src2.getValueType() != VecT) {
2219 size_t LaneSize =
2221 assert(LaneSize > DestLaneSize);
2222 Scale2 = LaneSize / DestLaneSize;
2223 Src2 = DAG.getBitcast(VecT, Src2);
2224 }
2225
2226 int Mask[16];
2227 assert(DestLaneCount <= 16);
2228 for (size_t I = 0; I < DestLaneCount; ++I) {
2229 const SDValue &Lane = Op->getOperand(I);
2230 SDValue Src = GetShuffleSrc(Lane);
2231 if (Src == ShuffleSrc1) {
2232 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2233 } else if (Src && Src == ShuffleSrc2) {
2234 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2235 } else {
2236 Mask[I] = -1;
2237 }
2238 }
2239 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2240 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2241 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2242 auto Src = GetShuffleSrc(Lane);
2243 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2244 };
2245 } else if (NumConstantLanes >= NumSplatLanes) {
2246 SmallVector<SDValue, 16> ConstLanes;
2247 for (const SDValue &Lane : Op->op_values()) {
2248 if (IsConstant(Lane)) {
2249 // Values may need to be fixed so that they will sign extend to be
2250 // within the expected range during ISel. Check whether the value is in
2251 // bounds based on the lane bit width and if it is out of bounds, lop
2252 // off the extra bits and subtract 2^n to reflect giving the high bit
2253 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2254 // cannot possibly be out of range.
2255 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
2256 int64_t Val = Const ? Const->getSExtValue() : 0;
2257 uint64_t LaneBits = 128 / Lanes;
2258 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2259 "Unexpected out of bounds negative value");
2260 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2261 uint64_t Mask = (1ll << LaneBits) - 1;
2262 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask;
2263 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2264 } else {
2265 ConstLanes.push_back(Lane);
2266 }
2267 } else if (LaneT.isFloatingPoint()) {
2268 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2269 } else {
2270 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2271 }
2272 }
2273 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2274 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2275 return IsConstant(Lane);
2276 };
2277 } else {
2278 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits();
2279 if (NumSplatLanes == 1 && Op->getOperand(0) == SplatValue &&
2280 (DestLaneSize == 32 || DestLaneSize == 64)) {
2281 // Could be selected to load_zero.
2282 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecT, SplatValue);
2283 } else {
2284 // Use a splat (which might be selected as a load splat)
2285 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2286 }
2287 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2288 return Lane == SplatValue;
2289 };
2290 }
2291
2292 assert(Result);
2293 assert(IsLaneConstructed);
2294
2295 // Add replace_lane instructions for any unhandled values
2296 for (size_t I = 0; I < Lanes; ++I) {
2297 const SDValue &Lane = Op->getOperand(I);
2298 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2299 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2300 DAG.getConstant(I, DL, MVT::i32));
2301 }
2302
2303 return Result;
2304}
2305
2306SDValue
2307WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2308 SelectionDAG &DAG) const {
2309 SDLoc DL(Op);
2310 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2311 MVT VecType = Op.getOperand(0).getSimpleValueType();
2312 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2313 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2314
2315 // Space for two vector args and sixteen mask indices
2316 SDValue Ops[18];
2317 size_t OpIdx = 0;
2318 Ops[OpIdx++] = Op.getOperand(0);
2319 Ops[OpIdx++] = Op.getOperand(1);
2320
2321 // Expand mask indices to byte indices and materialize them as operands
2322 for (int M : Mask) {
2323 for (size_t J = 0; J < LaneBytes; ++J) {
2324 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2325 // whole lane of vector input, to allow further reduction at VM. E.g.
2326 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2327 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2328 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2329 }
2330 }
2331
2332 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2333}
2334
2335SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2336 SelectionDAG &DAG) const {
2337 SDLoc DL(Op);
2338 // The legalizer does not know how to expand the unsupported comparison modes
2339 // of i64x2 vectors, so we manually unroll them here.
2340 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2342 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2343 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2344 const SDValue &CC = Op->getOperand(2);
2345 auto MakeLane = [&](unsigned I) {
2346 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2347 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2348 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2349 };
2350 return DAG.getBuildVector(Op->getValueType(0), DL,
2351 {MakeLane(0), MakeLane(1)});
2352}
2353
2354SDValue
2355WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2356 SelectionDAG &DAG) const {
2357 // Allow constant lane indices, expand variable lane indices
2358 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2359 if (isa<ConstantSDNode>(IdxNode)) {
2360 // Ensure the index type is i32 to match the tablegen patterns
2361 uint64_t Idx = IdxNode->getAsZExtVal();
2362 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2363 Ops[Op.getNumOperands() - 1] =
2364 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2365 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2366 }
2367 // Perform default expansion
2368 return SDValue();
2369}
2370
2372 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2373 // 32-bit and 64-bit unrolled shifts will have proper semantics
2374 if (LaneT.bitsGE(MVT::i32))
2375 return DAG.UnrollVectorOp(Op.getNode());
2376 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2377 SDLoc DL(Op);
2378 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2379 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2380 unsigned ShiftOpcode = Op.getOpcode();
2381 SmallVector<SDValue, 16> ShiftedElements;
2382 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2383 SmallVector<SDValue, 16> ShiftElements;
2384 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2385 SmallVector<SDValue, 16> UnrolledOps;
2386 for (size_t i = 0; i < NumLanes; ++i) {
2387 SDValue MaskedShiftValue =
2388 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2389 SDValue ShiftedValue = ShiftedElements[i];
2390 if (ShiftOpcode == ISD::SRA)
2391 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2392 ShiftedValue, DAG.getValueType(LaneT));
2393 UnrolledOps.push_back(
2394 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2395 }
2396 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2397}
2398
2399SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2400 SelectionDAG &DAG) const {
2401 SDLoc DL(Op);
2402
2403 // Only manually lower vector shifts
2404 assert(Op.getSimpleValueType().isVector());
2405
2406 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2407 auto ShiftVal = Op.getOperand(1);
2408
2409 // Try to skip bitmask operation since it is implied inside shift instruction
2410 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2411 if (MaskOp.getOpcode() != ISD::AND)
2412 return MaskOp;
2413 SDValue LHS = MaskOp.getOperand(0);
2414 SDValue RHS = MaskOp.getOperand(1);
2415 if (MaskOp.getValueType().isVector()) {
2416 APInt MaskVal;
2417 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
2418 std::swap(LHS, RHS);
2419
2420 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
2421 MaskVal == MaskBits)
2422 MaskOp = LHS;
2423 } else {
2424 if (!isa<ConstantSDNode>(RHS.getNode()))
2425 std::swap(LHS, RHS);
2426
2427 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
2428 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2429 MaskOp = LHS;
2430 }
2431
2432 return MaskOp;
2433 };
2434
2435 // Skip vector and operation
2436 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2437 ShiftVal = DAG.getSplatValue(ShiftVal);
2438 if (!ShiftVal)
2439 return unrollVectorShift(Op, DAG);
2440
2441 // Skip scalar and operation
2442 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2443 // Use anyext because none of the high bits can affect the shift
2444 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2445
2446 unsigned Opcode;
2447 switch (Op.getOpcode()) {
2448 case ISD::SHL:
2449 Opcode = WebAssemblyISD::VEC_SHL;
2450 break;
2451 case ISD::SRA:
2452 Opcode = WebAssemblyISD::VEC_SHR_S;
2453 break;
2454 case ISD::SRL:
2455 Opcode = WebAssemblyISD::VEC_SHR_U;
2456 break;
2457 default:
2458 llvm_unreachable("unexpected opcode");
2459 }
2460
2461 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2462}
2463
2464SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2465 SelectionDAG &DAG) const {
2466 SDLoc DL(Op);
2467 EVT ResT = Op.getValueType();
2468 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2469
2470 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2471 (SatVT == MVT::i32 || SatVT == MVT::i64))
2472 return Op;
2473
2474 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2475 return Op;
2476
2477 return SDValue();
2478}
2479
2480//===----------------------------------------------------------------------===//
2481// Custom DAG combine hooks
2482//===----------------------------------------------------------------------===//
2483static SDValue
2485 auto &DAG = DCI.DAG;
2486 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2487
2488 // Hoist vector bitcasts that don't change the number of lanes out of unary
2489 // shuffles, where they are less likely to get in the way of other combines.
2490 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2491 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2492 SDValue Bitcast = N->getOperand(0);
2493 if (Bitcast.getOpcode() != ISD::BITCAST)
2494 return SDValue();
2495 if (!N->getOperand(1).isUndef())
2496 return SDValue();
2497 SDValue CastOp = Bitcast.getOperand(0);
2498 EVT SrcType = CastOp.getValueType();
2499 EVT DstType = Bitcast.getValueType();
2500 if (!SrcType.is128BitVector() ||
2501 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2502 return SDValue();
2503 SDValue NewShuffle = DAG.getVectorShuffle(
2504 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2505 return DAG.getBitcast(DstType, NewShuffle);
2506}
2507
2508/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
2509/// split up into scalar instructions during legalization, and the vector
2510/// extending instructions are selected in performVectorExtendCombine below.
2511static SDValue
2514 auto &DAG = DCI.DAG;
2515 assert(N->getOpcode() == ISD::UINT_TO_FP ||
2516 N->getOpcode() == ISD::SINT_TO_FP);
2517
2518 EVT InVT = N->getOperand(0)->getValueType(0);
2519 EVT ResVT = N->getValueType(0);
2520 MVT ExtVT;
2521 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2522 ExtVT = MVT::v4i32;
2523 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2524 ExtVT = MVT::v2i32;
2525 else
2526 return SDValue();
2527
2528 unsigned Op =
2530 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
2531 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
2532}
2533
2534static SDValue
2536 auto &DAG = DCI.DAG;
2537 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2538 N->getOpcode() == ISD::ZERO_EXTEND);
2539
2540 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2541 // possible before the extract_subvector can be expanded.
2542 auto Extract = N->getOperand(0);
2543 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2544 return SDValue();
2545 auto Source = Extract.getOperand(0);
2546 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2547 if (IndexNode == nullptr)
2548 return SDValue();
2549 auto Index = IndexNode->getZExtValue();
2550
2551 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2552 // extracted subvector is the low or high half of its source.
2553 EVT ResVT = N->getValueType(0);
2554 if (ResVT == MVT::v8i16) {
2555 if (Extract.getValueType() != MVT::v8i8 ||
2556 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2557 return SDValue();
2558 } else if (ResVT == MVT::v4i32) {
2559 if (Extract.getValueType() != MVT::v4i16 ||
2560 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2561 return SDValue();
2562 } else if (ResVT == MVT::v2i64) {
2563 if (Extract.getValueType() != MVT::v2i32 ||
2564 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2565 return SDValue();
2566 } else {
2567 return SDValue();
2568 }
2569
2570 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2571 bool IsLow = Index == 0;
2572
2573 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2574 : WebAssemblyISD::EXTEND_HIGH_S)
2575 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2576 : WebAssemblyISD::EXTEND_HIGH_U);
2577
2578 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2579}
2580
2581static SDValue
2583 auto &DAG = DCI.DAG;
2584
2585 auto GetWasmConversionOp = [](unsigned Op) {
2586 switch (Op) {
2588 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2590 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2591 case ISD::FP_ROUND:
2592 return WebAssemblyISD::DEMOTE_ZERO;
2593 }
2594 llvm_unreachable("unexpected op");
2595 };
2596
2597 auto IsZeroSplat = [](SDValue SplatVal) {
2598 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2599 APInt SplatValue, SplatUndef;
2600 unsigned SplatBitSize;
2601 bool HasAnyUndefs;
2602 // Endianness doesn't matter in this context because we are looking for
2603 // an all-zero value.
2604 return Splat &&
2605 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2606 HasAnyUndefs) &&
2607 SplatValue == 0;
2608 };
2609
2610 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2611 // Combine this:
2612 //
2613 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2614 //
2615 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2616 //
2617 // Or this:
2618 //
2619 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2620 //
2621 // into (f32x4.demote_zero_f64x2 $x).
2622 EVT ResVT;
2623 EVT ExpectedConversionType;
2624 auto Conversion = N->getOperand(0);
2625 auto ConversionOp = Conversion.getOpcode();
2626 switch (ConversionOp) {
2629 ResVT = MVT::v4i32;
2630 ExpectedConversionType = MVT::v2i32;
2631 break;
2632 case ISD::FP_ROUND:
2633 ResVT = MVT::v4f32;
2634 ExpectedConversionType = MVT::v2f32;
2635 break;
2636 default:
2637 return SDValue();
2638 }
2639
2640 if (N->getValueType(0) != ResVT)
2641 return SDValue();
2642
2643 if (Conversion.getValueType() != ExpectedConversionType)
2644 return SDValue();
2645
2646 auto Source = Conversion.getOperand(0);
2647 if (Source.getValueType() != MVT::v2f64)
2648 return SDValue();
2649
2650 if (!IsZeroSplat(N->getOperand(1)) ||
2651 N->getOperand(1).getValueType() != ExpectedConversionType)
2652 return SDValue();
2653
2654 unsigned Op = GetWasmConversionOp(ConversionOp);
2655 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2656 }
2657
2658 // Combine this:
2659 //
2660 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2661 //
2662 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2663 //
2664 // Or this:
2665 //
2666 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2667 //
2668 // into (f32x4.demote_zero_f64x2 $x).
2669 EVT ResVT;
2670 auto ConversionOp = N->getOpcode();
2671 switch (ConversionOp) {
2674 ResVT = MVT::v4i32;
2675 break;
2676 case ISD::FP_ROUND:
2677 ResVT = MVT::v4f32;
2678 break;
2679 default:
2680 llvm_unreachable("unexpected op");
2681 }
2682
2683 if (N->getValueType(0) != ResVT)
2684 return SDValue();
2685
2686 auto Concat = N->getOperand(0);
2687 if (Concat.getValueType() != MVT::v4f64)
2688 return SDValue();
2689
2690 auto Source = Concat.getOperand(0);
2691 if (Source.getValueType() != MVT::v2f64)
2692 return SDValue();
2693
2694 if (!IsZeroSplat(Concat.getOperand(1)) ||
2695 Concat.getOperand(1).getValueType() != MVT::v2f64)
2696 return SDValue();
2697
2698 unsigned Op = GetWasmConversionOp(ConversionOp);
2699 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2700}
2701
2702// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
2703static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
2704 const SDLoc &DL, unsigned VectorWidth) {
2705 EVT VT = Vec.getValueType();
2706 EVT ElVT = VT.getVectorElementType();
2707 unsigned Factor = VT.getSizeInBits() / VectorWidth;
2708 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
2709 VT.getVectorNumElements() / Factor);
2710
2711 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
2712 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
2713 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
2714
2715 // This is the index of the first element of the VectorWidth-bit chunk
2716 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
2717 IdxVal &= ~(ElemsPerChunk - 1);
2718
2719 // If the input is a buildvector just emit a smaller one.
2720 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
2721 return DAG.getBuildVector(ResultVT, DL,
2722 Vec->ops().slice(IdxVal, ElemsPerChunk));
2723
2724 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
2725 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
2726}
2727
2728// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
2729// is the expected destination value type after recursion. In is the initial
2730// input. Note that the input should have enough leading zero bits to prevent
2731// NARROW_U from saturating results.
2733 SelectionDAG &DAG) {
2734 EVT SrcVT = In.getValueType();
2735
2736 // No truncation required, we might get here due to recursive calls.
2737 if (SrcVT == DstVT)
2738 return In;
2739
2740 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
2741 unsigned NumElems = SrcVT.getVectorNumElements();
2742 if (!isPowerOf2_32(NumElems))
2743 return SDValue();
2744 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
2745 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
2746
2747 LLVMContext &Ctx = *DAG.getContext();
2748 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
2749
2750 // Narrow to the largest type possible:
2751 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
2752 EVT InVT = MVT::i16, OutVT = MVT::i8;
2753 if (SrcVT.getScalarSizeInBits() > 16) {
2754 InVT = MVT::i32;
2755 OutVT = MVT::i16;
2756 }
2757 unsigned SubSizeInBits = SrcSizeInBits / 2;
2758 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
2759 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
2760
2761 // Split lower/upper subvectors.
2762 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
2763 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
2764
2765 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
2766 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
2767 Lo = DAG.getBitcast(InVT, Lo);
2768 Hi = DAG.getBitcast(InVT, Hi);
2769 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
2770 return DAG.getBitcast(DstVT, Res);
2771 }
2772
2773 // Recursively narrow lower/upper subvectors, concat result and narrow again.
2774 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
2775 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
2776 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
2777
2778 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
2779 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
2780 return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
2781}
2782
2785 auto &DAG = DCI.DAG;
2786
2787 SDValue In = N->getOperand(0);
2788 EVT InVT = In.getValueType();
2789 if (!InVT.isSimple())
2790 return SDValue();
2791
2792 EVT OutVT = N->getValueType(0);
2793 if (!OutVT.isVector())
2794 return SDValue();
2795
2796 EVT OutSVT = OutVT.getVectorElementType();
2797 EVT InSVT = InVT.getVectorElementType();
2798 // Currently only cover truncate to v16i8 or v8i16.
2799 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
2800 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
2801 return SDValue();
2802
2803 SDLoc DL(N);
2805 OutVT.getScalarSizeInBits());
2806 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
2807 return truncateVectorWithNARROW(OutVT, In, DL, DAG);
2808}
2809
2812 auto &DAG = DCI.DAG;
2813 SDLoc DL(N);
2814 SDValue Src = N->getOperand(0);
2815 EVT VT = N->getValueType(0);
2816 EVT SrcVT = Src.getValueType();
2817
2818 // bitcast <N x i1> to iN
2819 // ==> bitmask
2820 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2821 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) {
2822 unsigned NumElts = SrcVT.getVectorNumElements();
2823 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2824 return SDValue();
2825 EVT Width = MVT::getIntegerVT(128 / NumElts);
2826 return DAG.getZExtOrTrunc(
2827 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2828 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
2829 DAG.getSExtOrTrunc(N->getOperand(0), DL,
2830 SrcVT.changeVectorElementType(Width))}),
2831 DL, VT);
2832 }
2833
2834 return SDValue();
2835}
2836
2839 auto &DAG = DCI.DAG;
2840
2841 SDValue LHS = N->getOperand(0);
2842 SDValue RHS = N->getOperand(1);
2843 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
2844 SDLoc DL(N);
2845 EVT VT = N->getValueType(0);
2846
2847 // setcc (iN (bitcast (vNi1 X))), 0, ne
2848 // ==> any_true (vNi1 X)
2849 // setcc (iN (bitcast (vNi1 X))), 0, eq
2850 // ==> xor (any_true (vNi1 X)), -1
2851 // setcc (iN (bitcast (vNi1 X))), -1, eq
2852 // ==> all_true (vNi1 X)
2853 // setcc (iN (bitcast (vNi1 X))), -1, ne
2854 // ==> xor (all_true (vNi1 X)), -1
2855 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2856 (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2858 LHS->getOpcode() == ISD::BITCAST) {
2859 EVT FromVT = LHS->getOperand(0).getValueType();
2860 if (FromVT.isFixedLengthVector() &&
2861 FromVT.getVectorElementType() == MVT::i1) {
2862 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue
2863 : Intrinsic::wasm_alltrue;
2864 unsigned NumElts = FromVT.getVectorNumElements();
2865 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2866 return SDValue();
2867 EVT Width = MVT::getIntegerVT(128 / NumElts);
2868 SDValue Ret = DAG.getZExtOrTrunc(
2869 DAG.getNode(
2870 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2871 {DAG.getConstant(Intrin, DL, MVT::i32),
2872 DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
2873 FromVT.changeVectorElementType(Width))}),
2874 DL, MVT::i1);
2875 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) ||
2876 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) {
2877 Ret = DAG.getNOT(DL, Ret, MVT::i1);
2878 }
2879 return DAG.getZExtOrTrunc(Ret, DL, VT);
2880 }
2881 }
2882
2883 return SDValue();
2884}
2885
2886SDValue
2887WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2888 DAGCombinerInfo &DCI) const {
2889 switch (N->getOpcode()) {
2890 default:
2891 return SDValue();
2892 case ISD::BITCAST:
2893 return performBitcastCombine(N, DCI);
2894 case ISD::SETCC:
2895 return performSETCCCombine(N, DCI);
2897 return performVECTOR_SHUFFLECombine(N, DCI);
2898 case ISD::SIGN_EXTEND:
2899 case ISD::ZERO_EXTEND:
2900 return performVectorExtendCombine(N, DCI);
2901 case ISD::UINT_TO_FP:
2902 case ISD::SINT_TO_FP:
2903 return performVectorExtendToFPCombine(N, DCI);
2906 case ISD::FP_ROUND:
2908 return performVectorTruncZeroCombine(N, DCI);
2909 case ISD::TRUNCATE:
2910 return performTruncateCombine(N, DCI);
2911 }
2912}
unsigned const MachineRegisterInfo * MRI
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Symbol * Sym
Definition: ELF_riscv.cpp:479
Hexagon Common GEP
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
static unsigned NumFixedArgs
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static bool callingConvSupported(CallingConv::ID CallConv)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
X86 cmov Conversion
static constexpr int Concat[]
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:274
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
@ Add
*p = old + v
Definition: Instructions.h:712
@ Or
*p = old | v
Definition: Instructions.h:720
@ Sub
*p = old - v
Definition: Instructions.h:714
@ And
*p = old & v
Definition: Instructions.h:716
@ Xor
*p = old ^ v
Definition: Instructions.h:722
BinOp getOperation() const
Definition: Instructions.h:787
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:214
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Type * getValueType() const
Definition: GlobalValue.h:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
void setNoStrip() const
Definition: MCSymbolWasm.h:66
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:231
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:733
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:722
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:226
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:736
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:746
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:842
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:487
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:741
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:488
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:787
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:690
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:482
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:813
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
Definition: SelectionDAG.h:859
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
Definition: SelectionDAG.h:500
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:570
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getDoubleTy(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:242
static Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
Register getFrameRegister(const MachineFunction &MF) const override
const Triple & getTargetTriple() const
const WebAssemblyInstrInfo * getInstrInfo() const override
const WebAssemblyRegisterInfo * getRegisterInfo() const override
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
Definition: ilist_node.h:132
#define INT64_MIN
Definition: DataTypes.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
Definition: CallingConv.h:229
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:779
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1194
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1190
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:743
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1223
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1099
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:497
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:205
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:840
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:557
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:716
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:870
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:963
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:953
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1480
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ FrameIndex
Definition: ISDOpcodes.h:80
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:804
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:634
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1056
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1145
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1120
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1124
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:641
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1219
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:673
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:734
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:614
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:587
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:549
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:209
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:810
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1279
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:906
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:771
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1109
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:848
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:696
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:938
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1047
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:886
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition: ISDOpcodes.h:164
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1276
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:538
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ExternalSymbol
Definition: ISDOpcodes.h:83
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:919
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:881
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:905
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:816
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1214
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:793
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:507
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:198
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:529
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1603
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:50
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:353
@ Offset
Definition: DWP.cpp:480
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2082
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:382
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:74
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:147
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:359
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:371
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
bool is128BitVector() const
Return true if this is a 128-bit vector type.
Definition: ValueTypes.h:204
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:367
bool isFixedLengthVector() const
Definition: ValueTypes.h:178
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:314
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:283
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Definition: ValueTypes.h:209
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:204
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:319
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:327
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1459