LLVM 19.0.0git
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsWebAssembly.h"
36#include "llvm/Support/Debug.h"
42using namespace llvm;
43
44#define DEBUG_TYPE "wasm-lower"
45
47 const TargetMachine &TM, const WebAssemblySubtarget &STI)
48 : TargetLowering(TM), Subtarget(&STI) {
49 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
50
51 // Booleans always contain 0 or 1.
53 // Except in SIMD vectors
55 // We don't know the microarchitecture here, so just reduce register pressure.
57 // Tell ISel that we have a stack pointer.
59 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
60 // Set up the register classes.
61 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
62 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
63 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
64 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
65 if (Subtarget->hasSIMD128()) {
66 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
69 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
70 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
71 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
72 }
73 if (Subtarget->hasHalfPrecision()) {
74 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass);
75 }
76 if (Subtarget->hasReferenceTypes()) {
77 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
78 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
79 if (Subtarget->hasExceptionHandling()) {
80 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass);
81 }
82 }
83 // Compute derived properties from the register classes.
85
86 // Transform loads and stores to pointers in address space 1 to loads and
87 // stores to WebAssembly global variables, outside linear memory.
88 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
91 }
92 if (Subtarget->hasSIMD128()) {
93 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
94 MVT::v2f64}) {
97 }
98 }
99 if (Subtarget->hasReferenceTypes()) {
100 // We need custom load and store lowering for both externref, funcref and
101 // Other. The MVT::Other here represents tables of reference types.
102 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
105 }
106 }
107
115
116 // Take the default expansion for va_arg, va_copy, and va_end. There is no
117 // default action for va_start, so we do that custom.
122
123 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
124 // Don't expand the floating-point types to constant pools.
126 // Expand floating-point comparisons.
130 // Expand floating-point library function operators.
131 for (auto Op :
134 // Note supported floating-point library function operators that otherwise
135 // default to expand.
139 // Support minimum and maximum, which otherwise default to expand.
142 // WebAssembly currently has no builtin f16 support.
146 setTruncStoreAction(T, MVT::f16, Expand);
147 }
148
149 if (Subtarget->hasHalfPrecision()) {
152 }
153
154 // Expand unavailable integer operations.
155 for (auto Op :
159 for (auto T : {MVT::i32, MVT::i64})
161 if (Subtarget->hasSIMD128())
162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
164 }
165
166 if (Subtarget->hasNontrappingFPToInt())
168 for (auto T : {MVT::i32, MVT::i64})
170
171 // SIMD-specific configuration
172 if (Subtarget->hasSIMD128()) {
173 // Combine vector mask reductions into alltrue/anytrue
175
176 // Convert vector to integer bitcasts to bitmask
178
179 // Hoist bitcasts out of shuffles
181
182 // Combine extends of extract_subvectors into widening ops
184
185 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
186 // conversions ops
189
190 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
191 // into conversion ops
194
196
197 // Support saturating add for i8x16 and i16x8
198 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
199 for (auto T : {MVT::v16i8, MVT::v8i16})
201
202 // Support integer abs
203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
205
206 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
207 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
208 MVT::v2f64})
210
211 // We have custom shuffle lowering to expose the shuffle mask
212 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
213 MVT::v2f64})
215
216 // Support splatting
217 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
218 MVT::v2f64})
220
221 // Custom lowering since wasm shifts must have a scalar shift amount
222 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
225
226 // Custom lower lane accesses to expand out variable indices
228 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
229 MVT::v2f64})
231
232 // There is no i8x16.mul instruction
233 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
234
235 // There is no vector conditional select instruction
236 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
237 MVT::v2f64})
239
240 // Expand integer operations supported for scalars but not SIMD
241 for (auto Op :
243 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
245
246 // But we do have integer min and max operations
247 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
248 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
250
251 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
252 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
253 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
254 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
255
256 // Custom lower bit counting operations for other types to scalarize them.
257 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
258 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
260
261 // Expand float operations supported for scalars but not SIMD
264 for (auto T : {MVT::v4f32, MVT::v2f64})
266
267 // Unsigned comparison operations are unavailable for i64x2 vectors.
269 setCondCodeAction(CC, MVT::v2i64, Custom);
270
271 // 64x2 conversions are not in the spec
272 for (auto Op :
274 for (auto T : {MVT::v2i64, MVT::v2f64})
276
277 // But saturating fp_to_int converstions are
279 setOperationAction(Op, MVT::v4i32, Custom);
280
281 // Support vector extending
285 }
286 }
287
288 // As a special case, these operators use the type to mean the type to
289 // sign-extend from.
291 if (!Subtarget->hasSignExt()) {
292 // Sign extends are legal only when extending a vector extract
293 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
294 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
296 }
299
300 // Dynamic stack allocation: use the default expansion.
304
308
309 // Expand these forms; we pattern-match the forms that we can handle in isel.
310 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
311 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
313
314 // We have custom switch handling.
316
317 // WebAssembly doesn't have:
318 // - Floating-point extending loads.
319 // - Floating-point truncating stores.
320 // - i1 extending loads.
321 // - truncating SIMD stores and most extending loads
322 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
323 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
324 for (auto T : MVT::integer_valuetypes())
325 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
326 setLoadExtAction(Ext, T, MVT::i1, Promote);
327 if (Subtarget->hasSIMD128()) {
328 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
329 MVT::v2f64}) {
330 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
331 if (MVT(T) != MemT) {
333 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
334 setLoadExtAction(Ext, T, MemT, Expand);
335 }
336 }
337 }
338 // But some vector extending loads are legal
339 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
340 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
341 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
342 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
343 }
344 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
345 }
346
347 // Don't do anything clever with build_pairs
349
350 // Trap lowers to wasm unreachable
351 setOperationAction(ISD::TRAP, MVT::Other, Legal);
353
354 // Exception handling intrinsics
358
360
361 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
362 // consistent with the f64 and f128 names.
363 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
364 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
365
366 // Define the emscripten name for return address helper.
367 // TODO: when implementing other Wasm backends, make this generic or only do
368 // this on emscripten depending on what they end up doing.
369 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
370
371 // Always convert switches to br_tables unless there is only one case, which
372 // is equivalent to a simple branch. This reduces code size for wasm, and we
373 // defer possible jump table optimizations to the VM.
375}
376
378 uint32_t AS) const {
380 return MVT::externref;
382 return MVT::funcref;
384}
385
387 uint32_t AS) const {
389 return MVT::externref;
391 return MVT::funcref;
393}
394
396WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
397 // We have wasm instructions for these
398 switch (AI->getOperation()) {
406 default:
407 break;
408 }
410}
411
412bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
413 // Implementation copied from X86TargetLowering.
414 unsigned Opc = VecOp.getOpcode();
415
416 // Assume target opcodes can't be scalarized.
417 // TODO - do we have any exceptions?
418 if (Opc >= ISD::BUILTIN_OP_END)
419 return false;
420
421 // If the vector op is not supported, try to convert to scalar.
422 EVT VecVT = VecOp.getValueType();
423 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
424 return true;
425
426 // If the vector op is supported, but the scalar op is not, the transform may
427 // not be worthwhile.
428 EVT ScalarVT = VecVT.getScalarType();
429 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
430}
431
432FastISel *WebAssemblyTargetLowering::createFastISel(
433 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
434 return WebAssembly::createFastISel(FuncInfo, LibInfo);
435}
436
437MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
438 EVT VT) const {
439 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
440 if (BitWidth > 1 && BitWidth < 8)
441 BitWidth = 8;
442
443 if (BitWidth > 64) {
444 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
445 // the count to be an i32.
446 BitWidth = 32;
448 "32-bit shift counts ought to be enough for anyone");
449 }
450
453 "Unable to represent scalar shift amount type");
454 return Result;
455}
456
457// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
458// undefined result on invalid/overflow, to the WebAssembly opcode, which
459// traps on invalid/overflow.
462 const TargetInstrInfo &TII,
463 bool IsUnsigned, bool Int64,
464 bool Float64, unsigned LoweredOpcode) {
466
467 Register OutReg = MI.getOperand(0).getReg();
468 Register InReg = MI.getOperand(1).getReg();
469
470 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
471 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
472 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
473 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
474 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
475 unsigned Eqz = WebAssembly::EQZ_I32;
476 unsigned And = WebAssembly::AND_I32;
477 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
478 int64_t Substitute = IsUnsigned ? 0 : Limit;
479 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
480 auto &Context = BB->getParent()->getFunction().getContext();
481 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
482
483 const BasicBlock *LLVMBB = BB->getBasicBlock();
484 MachineFunction *F = BB->getParent();
485 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
486 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
487 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
488
490 F->insert(It, FalseMBB);
491 F->insert(It, TrueMBB);
492 F->insert(It, DoneMBB);
493
494 // Transfer the remainder of BB and its successor edges to DoneMBB.
495 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
497
498 BB->addSuccessor(TrueMBB);
499 BB->addSuccessor(FalseMBB);
500 TrueMBB->addSuccessor(DoneMBB);
501 FalseMBB->addSuccessor(DoneMBB);
502
503 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
504 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
505 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
506 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
507 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
508 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
509 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
510
511 MI.eraseFromParent();
512 // For signed numbers, we can do a single comparison to determine whether
513 // fabs(x) is within range.
514 if (IsUnsigned) {
515 Tmp0 = InReg;
516 } else {
517 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
518 }
519 BuildMI(BB, DL, TII.get(FConst), Tmp1)
520 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
521 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
522
523 // For unsigned numbers, we have to do a separate comparison with zero.
524 if (IsUnsigned) {
525 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
526 Register SecondCmpReg =
527 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
528 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
529 BuildMI(BB, DL, TII.get(FConst), Tmp1)
530 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
531 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
532 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
533 CmpReg = AndReg;
534 }
535
536 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
537
538 // Create the CFG diamond to select between doing the conversion or using
539 // the substitute value.
540 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
541 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
542 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
543 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
544 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
545 .addReg(FalseReg)
546 .addMBB(FalseMBB)
547 .addReg(TrueReg)
548 .addMBB(TrueMBB);
549
550 return DoneMBB;
551}
552
553static MachineBasicBlock *
555 const WebAssemblySubtarget *Subtarget,
556 const TargetInstrInfo &TII) {
557 MachineInstr &CallParams = *CallResults.getPrevNode();
558 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
559 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
560 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
561
562 bool IsIndirect =
563 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
564 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
565
566 bool IsFuncrefCall = false;
567 if (IsIndirect && CallParams.getOperand(0).isReg()) {
568 Register Reg = CallParams.getOperand(0).getReg();
569 const MachineFunction *MF = BB->getParent();
570 const MachineRegisterInfo &MRI = MF->getRegInfo();
571 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
572 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
573 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
574 }
575
576 unsigned CallOp;
577 if (IsIndirect && IsRetCall) {
578 CallOp = WebAssembly::RET_CALL_INDIRECT;
579 } else if (IsIndirect) {
580 CallOp = WebAssembly::CALL_INDIRECT;
581 } else if (IsRetCall) {
582 CallOp = WebAssembly::RET_CALL;
583 } else {
584 CallOp = WebAssembly::CALL;
585 }
586
587 MachineFunction &MF = *BB->getParent();
588 const MCInstrDesc &MCID = TII.get(CallOp);
589 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
590
591 // Move the function pointer to the end of the arguments for indirect calls
592 if (IsIndirect) {
593 auto FnPtr = CallParams.getOperand(0);
594 CallParams.removeOperand(0);
595
596 // For funcrefs, call_indirect is done through __funcref_call_table and the
597 // funcref is always installed in slot 0 of the table, therefore instead of
598 // having the function pointer added at the end of the params list, a zero
599 // (the index in
600 // __funcref_call_table is added).
601 if (IsFuncrefCall) {
602 Register RegZero =
603 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
604 MachineInstrBuilder MIBC0 =
605 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
606
607 BB->insert(CallResults.getIterator(), MIBC0);
608 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
609 } else
610 CallParams.addOperand(FnPtr);
611 }
612
613 for (auto Def : CallResults.defs())
614 MIB.add(Def);
615
616 if (IsIndirect) {
617 // Placeholder for the type index.
618 MIB.addImm(0);
619 // The table into which this call_indirect indexes.
620 MCSymbolWasm *Table = IsFuncrefCall
622 MF.getContext(), Subtarget)
624 MF.getContext(), Subtarget);
625 if (Subtarget->hasReferenceTypes()) {
626 MIB.addSym(Table);
627 } else {
628 // For the MVP there is at most one table whose number is 0, but we can't
629 // write a table symbol or issue relocations. Instead we just ensure the
630 // table is live and write a zero.
631 Table->setNoStrip();
632 MIB.addImm(0);
633 }
634 }
635
636 for (auto Use : CallParams.uses())
637 MIB.add(Use);
638
639 BB->insert(CallResults.getIterator(), MIB);
640 CallParams.eraseFromParent();
641 CallResults.eraseFromParent();
642
643 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
644 // table slot with ref.null upon call_indirect return.
645 //
646 // This generates the following code, which comes right after a call_indirect
647 // of a funcref:
648 //
649 // i32.const 0
650 // ref.null func
651 // table.set __funcref_call_table
652 if (IsIndirect && IsFuncrefCall) {
654 MF.getContext(), Subtarget);
655 Register RegZero =
656 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
657 MachineInstr *Const0 =
658 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
659 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
660
661 Register RegFuncref =
662 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
663 MachineInstr *RefNull =
664 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
665 BB->insertAfter(Const0->getIterator(), RefNull);
666
667 MachineInstr *TableSet =
668 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
669 .addSym(Table)
670 .addReg(RegZero)
671 .addReg(RegFuncref);
672 BB->insertAfter(RefNull->getIterator(), TableSet);
673 }
674
675 return BB;
676}
677
678MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
679 MachineInstr &MI, MachineBasicBlock *BB) const {
680 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
681 DebugLoc DL = MI.getDebugLoc();
682
683 switch (MI.getOpcode()) {
684 default:
685 llvm_unreachable("Unexpected instr type to insert");
686 case WebAssembly::FP_TO_SINT_I32_F32:
687 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
688 WebAssembly::I32_TRUNC_S_F32);
689 case WebAssembly::FP_TO_UINT_I32_F32:
690 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
691 WebAssembly::I32_TRUNC_U_F32);
692 case WebAssembly::FP_TO_SINT_I64_F32:
693 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
694 WebAssembly::I64_TRUNC_S_F32);
695 case WebAssembly::FP_TO_UINT_I64_F32:
696 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
697 WebAssembly::I64_TRUNC_U_F32);
698 case WebAssembly::FP_TO_SINT_I32_F64:
699 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
700 WebAssembly::I32_TRUNC_S_F64);
701 case WebAssembly::FP_TO_UINT_I32_F64:
702 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
703 WebAssembly::I32_TRUNC_U_F64);
704 case WebAssembly::FP_TO_SINT_I64_F64:
705 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
706 WebAssembly::I64_TRUNC_S_F64);
707 case WebAssembly::FP_TO_UINT_I64_F64:
708 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
709 WebAssembly::I64_TRUNC_U_F64);
710 case WebAssembly::CALL_RESULTS:
711 case WebAssembly::RET_CALL_RESULTS:
712 return LowerCallResults(MI, DL, BB, Subtarget, TII);
713 }
714}
715
716const char *
717WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
718 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
721 break;
722#define HANDLE_NODETYPE(NODE) \
723 case WebAssemblyISD::NODE: \
724 return "WebAssemblyISD::" #NODE;
725#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
726#include "WebAssemblyISD.def"
727#undef HANDLE_MEM_NODETYPE
728#undef HANDLE_NODETYPE
729 }
730 return nullptr;
731}
732
733std::pair<unsigned, const TargetRegisterClass *>
734WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
735 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
736 // First, see if this is a constraint that directly corresponds to a
737 // WebAssembly register class.
738 if (Constraint.size() == 1) {
739 switch (Constraint[0]) {
740 case 'r':
741 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
742 if (Subtarget->hasSIMD128() && VT.isVector()) {
743 if (VT.getSizeInBits() == 128)
744 return std::make_pair(0U, &WebAssembly::V128RegClass);
745 }
746 if (VT.isInteger() && !VT.isVector()) {
747 if (VT.getSizeInBits() <= 32)
748 return std::make_pair(0U, &WebAssembly::I32RegClass);
749 if (VT.getSizeInBits() <= 64)
750 return std::make_pair(0U, &WebAssembly::I64RegClass);
751 }
752 if (VT.isFloatingPoint() && !VT.isVector()) {
753 switch (VT.getSizeInBits()) {
754 case 32:
755 return std::make_pair(0U, &WebAssembly::F32RegClass);
756 case 64:
757 return std::make_pair(0U, &WebAssembly::F64RegClass);
758 default:
759 break;
760 }
761 }
762 break;
763 default:
764 break;
765 }
766 }
767
769}
770
771bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
772 // Assume ctz is a relatively cheap operation.
773 return true;
774}
775
776bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
777 // Assume clz is a relatively cheap operation.
778 return true;
779}
780
781bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
782 const AddrMode &AM,
783 Type *Ty, unsigned AS,
784 Instruction *I) const {
785 // WebAssembly offsets are added as unsigned without wrapping. The
786 // isLegalAddressingMode gives us no way to determine if wrapping could be
787 // happening, so we approximate this by accepting only non-negative offsets.
788 if (AM.BaseOffs < 0)
789 return false;
790
791 // WebAssembly has no scale register operands.
792 if (AM.Scale != 0)
793 return false;
794
795 // Everything else is legal.
796 return true;
797}
798
799bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
800 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
801 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
802 // WebAssembly supports unaligned accesses, though it should be declared
803 // with the p2align attribute on loads and stores which do so, and there
804 // may be a performance impact. We tell LLVM they're "fast" because
805 // for the kinds of things that LLVM uses this for (merging adjacent stores
806 // of constants, etc.), WebAssembly implementations will either want the
807 // unaligned access or they'll split anyway.
808 if (Fast)
809 *Fast = 1;
810 return true;
811}
812
813bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
814 AttributeList Attr) const {
815 // The current thinking is that wasm engines will perform this optimization,
816 // so we can save on code size.
817 return true;
818}
819
820bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
821 EVT ExtT = ExtVal.getValueType();
822 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
823 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
824 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
825 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
826}
827
828bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
829 const GlobalAddressSDNode *GA) const {
830 // Wasm doesn't support function addresses with offsets
831 const GlobalValue *GV = GA->getGlobal();
832 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
833}
834
835bool WebAssemblyTargetLowering::shouldSinkOperands(
836 Instruction *I, SmallVectorImpl<Use *> &Ops) const {
837 using namespace llvm::PatternMatch;
838
839 if (!I->getType()->isVectorTy() || !I->isShift())
840 return false;
841
842 Value *V = I->getOperand(1);
843 // We dont need to sink constant splat.
844 if (dyn_cast<Constant>(V))
845 return false;
846
848 m_Value(), m_ZeroMask()))) {
849 // Sink insert
850 Ops.push_back(&cast<Instruction>(V)->getOperandUse(0));
851 // Sink shuffle
852 Ops.push_back(&I->getOperandUse(1));
853 return true;
854 }
855
856 return false;
857}
858
859EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
860 LLVMContext &C,
861 EVT VT) const {
862 if (VT.isVector())
864
865 // So far, all branch instructions in Wasm take an I32 condition.
866 // The default TargetLowering::getSetCCResultType returns the pointer size,
867 // which would be useful to reduce instruction counts when testing
868 // against 64-bit pointers/values if at some point Wasm supports that.
869 return EVT::getIntegerVT(C, 32);
870}
871
872bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
873 const CallInst &I,
874 MachineFunction &MF,
875 unsigned Intrinsic) const {
876 switch (Intrinsic) {
877 case Intrinsic::wasm_memory_atomic_notify:
879 Info.memVT = MVT::i32;
880 Info.ptrVal = I.getArgOperand(0);
881 Info.offset = 0;
882 Info.align = Align(4);
883 // atomic.notify instruction does not really load the memory specified with
884 // this argument, but MachineMemOperand should either be load or store, so
885 // we set this to a load.
886 // FIXME Volatile isn't really correct, but currently all LLVM atomic
887 // instructions are treated as volatiles in the backend, so we should be
888 // consistent. The same applies for wasm_atomic_wait intrinsics too.
890 return true;
891 case Intrinsic::wasm_memory_atomic_wait32:
893 Info.memVT = MVT::i32;
894 Info.ptrVal = I.getArgOperand(0);
895 Info.offset = 0;
896 Info.align = Align(4);
898 return true;
899 case Intrinsic::wasm_memory_atomic_wait64:
901 Info.memVT = MVT::i64;
902 Info.ptrVal = I.getArgOperand(0);
903 Info.offset = 0;
904 Info.align = Align(8);
906 return true;
907 case Intrinsic::wasm_loadf16_f32:
909 Info.memVT = MVT::f16;
910 Info.ptrVal = I.getArgOperand(0);
911 Info.offset = 0;
912 Info.align = Align(2);
914 return true;
915 case Intrinsic::wasm_storef16_f32:
917 Info.memVT = MVT::f16;
918 Info.ptrVal = I.getArgOperand(1);
919 Info.offset = 0;
920 Info.align = Align(2);
922 return true;
923 default:
924 return false;
925 }
926}
927
928void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
929 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
930 const SelectionDAG &DAG, unsigned Depth) const {
931 switch (Op.getOpcode()) {
932 default:
933 break;
935 unsigned IntNo = Op.getConstantOperandVal(0);
936 switch (IntNo) {
937 default:
938 break;
939 case Intrinsic::wasm_bitmask: {
940 unsigned BitWidth = Known.getBitWidth();
941 EVT VT = Op.getOperand(1).getSimpleValueType();
942 unsigned PossibleBits = VT.getVectorNumElements();
943 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
944 Known.Zero |= ZeroMask;
945 break;
946 }
947 }
948 }
949 }
950}
951
953WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
954 if (VT.isFixedLengthVector()) {
955 MVT EltVT = VT.getVectorElementType();
956 // We have legal vector types with these lane types, so widening the
957 // vector would let us use some of the lanes directly without having to
958 // extend or truncate values.
959 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
960 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
961 return TypeWidenVector;
962 }
963
965}
966
967bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
968 SDValue Op, const TargetLoweringOpt &TLO) const {
969 // ISel process runs DAGCombiner after legalization; this step is called
970 // SelectionDAG optimization phase. This post-legalization combining process
971 // runs DAGCombiner on each node, and if there was a change to be made,
972 // re-runs legalization again on it and its user nodes to make sure
973 // everythiing is in a legalized state.
974 //
975 // The legalization calls lowering routines, and we do our custom lowering for
976 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
977 // into zeros. But there is a set of routines in DAGCombiner that turns unused
978 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
979 // turns unused vector elements into undefs. But this routine does not work
980 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
981 // combination can result in a infinite loop, in which undefs are converted to
982 // zeros in legalization and back to undefs in combining.
983 //
984 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
985 // running for build_vectors.
986 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
987 return false;
988 return true;
989}
990
991//===----------------------------------------------------------------------===//
992// WebAssembly Lowering private implementation.
993//===----------------------------------------------------------------------===//
994
995//===----------------------------------------------------------------------===//
996// Lowering Code
997//===----------------------------------------------------------------------===//
998
999static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
1001 DAG.getContext()->diagnose(
1002 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
1003}
1004
1005// Test whether the given calling convention is supported.
1007 // We currently support the language-independent target-independent
1008 // conventions. We don't yet have a way to annotate calls with properties like
1009 // "cold", and we don't have any call-clobbered registers, so these are mostly
1010 // all handled the same.
1011 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
1012 CallConv == CallingConv::Cold ||
1013 CallConv == CallingConv::PreserveMost ||
1014 CallConv == CallingConv::PreserveAll ||
1015 CallConv == CallingConv::CXX_FAST_TLS ||
1017 CallConv == CallingConv::Swift;
1018}
1019
1020SDValue
1021WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1022 SmallVectorImpl<SDValue> &InVals) const {
1023 SelectionDAG &DAG = CLI.DAG;
1024 SDLoc DL = CLI.DL;
1025 SDValue Chain = CLI.Chain;
1026 SDValue Callee = CLI.Callee;
1028 auto Layout = MF.getDataLayout();
1029
1030 CallingConv::ID CallConv = CLI.CallConv;
1031 if (!callingConvSupported(CallConv))
1032 fail(DL, DAG,
1033 "WebAssembly doesn't support language-specific or target-specific "
1034 "calling conventions yet");
1035 if (CLI.IsPatchPoint)
1036 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
1037
1038 if (CLI.IsTailCall) {
1039 auto NoTail = [&](const char *Msg) {
1040 if (CLI.CB && CLI.CB->isMustTailCall())
1041 fail(DL, DAG, Msg);
1042 CLI.IsTailCall = false;
1043 };
1044
1045 if (!Subtarget->hasTailCall())
1046 NoTail("WebAssembly 'tail-call' feature not enabled");
1047
1048 // Varargs calls cannot be tail calls because the buffer is on the stack
1049 if (CLI.IsVarArg)
1050 NoTail("WebAssembly does not support varargs tail calls");
1051
1052 // Do not tail call unless caller and callee return types match
1053 const Function &F = MF.getFunction();
1055 Type *RetTy = F.getReturnType();
1056 SmallVector<MVT, 4> CallerRetTys;
1057 SmallVector<MVT, 4> CalleeRetTys;
1058 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
1059 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
1060 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1061 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1062 CalleeRetTys.begin());
1063 if (!TypesMatch)
1064 NoTail("WebAssembly tail call requires caller and callee return types to "
1065 "match");
1066
1067 // If pointers to local stack values are passed, we cannot tail call
1068 if (CLI.CB) {
1069 for (auto &Arg : CLI.CB->args()) {
1070 Value *Val = Arg.get();
1071 // Trace the value back through pointer operations
1072 while (true) {
1073 Value *Src = Val->stripPointerCastsAndAliases();
1074 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
1075 Src = GEP->getPointerOperand();
1076 if (Val == Src)
1077 break;
1078 Val = Src;
1079 }
1080 if (isa<AllocaInst>(Val)) {
1081 NoTail(
1082 "WebAssembly does not support tail calling with stack arguments");
1083 break;
1084 }
1085 }
1086 }
1087 }
1088
1090 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1091 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1092
1093 // The generic code may have added an sret argument. If we're lowering an
1094 // invoke function, the ABI requires that the function pointer be the first
1095 // argument, so we may have to swap the arguments.
1096 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1097 Outs[0].Flags.isSRet()) {
1098 std::swap(Outs[0], Outs[1]);
1099 std::swap(OutVals[0], OutVals[1]);
1100 }
1101
1102 bool HasSwiftSelfArg = false;
1103 bool HasSwiftErrorArg = false;
1104 unsigned NumFixedArgs = 0;
1105 for (unsigned I = 0; I < Outs.size(); ++I) {
1106 const ISD::OutputArg &Out = Outs[I];
1107 SDValue &OutVal = OutVals[I];
1108 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1109 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1110 if (Out.Flags.isNest())
1111 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1112 if (Out.Flags.isInAlloca())
1113 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1114 if (Out.Flags.isInConsecutiveRegs())
1115 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1117 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1118 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1119 auto &MFI = MF.getFrameInfo();
1120 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1122 /*isSS=*/false);
1123 SDValue SizeNode =
1124 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1125 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1126 Chain = DAG.getMemcpy(
1127 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
1128 /*isVolatile*/ false, /*AlwaysInline=*/false,
1129 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
1130 OutVal = FINode;
1131 }
1132 // Count the number of fixed args *after* legalization.
1133 NumFixedArgs += Out.IsFixed;
1134 }
1135
1136 bool IsVarArg = CLI.IsVarArg;
1137 auto PtrVT = getPointerTy(Layout);
1138
1139 // For swiftcc, emit additional swiftself and swifterror arguments
1140 // if there aren't. These additional arguments are also added for callee
1141 // signature They are necessary to match callee and caller signature for
1142 // indirect call.
1143 if (CallConv == CallingConv::Swift) {
1144 if (!HasSwiftSelfArg) {
1145 NumFixedArgs++;
1146 ISD::OutputArg Arg;
1147 Arg.Flags.setSwiftSelf();
1148 CLI.Outs.push_back(Arg);
1149 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1150 CLI.OutVals.push_back(ArgVal);
1151 }
1152 if (!HasSwiftErrorArg) {
1153 NumFixedArgs++;
1154 ISD::OutputArg Arg;
1155 Arg.Flags.setSwiftError();
1156 CLI.Outs.push_back(Arg);
1157 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1158 CLI.OutVals.push_back(ArgVal);
1159 }
1160 }
1161
1162 // Analyze operands of the call, assigning locations to each operand.
1164 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1165
1166 if (IsVarArg) {
1167 // Outgoing non-fixed arguments are placed in a buffer. First
1168 // compute their offsets and the total amount of buffer space needed.
1169 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1170 const ISD::OutputArg &Out = Outs[I];
1171 SDValue &Arg = OutVals[I];
1172 EVT VT = Arg.getValueType();
1173 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1174 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1175 Align Alignment =
1176 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1177 unsigned Offset =
1178 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1179 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1180 Offset, VT.getSimpleVT(),
1182 }
1183 }
1184
1185 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1186
1187 SDValue FINode;
1188 if (IsVarArg && NumBytes) {
1189 // For non-fixed arguments, next emit stores to store the argument values
1190 // to the stack buffer at the offsets computed above.
1191 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1192 Layout.getStackAlignment(),
1193 /*isSS=*/false);
1194 unsigned ValNo = 0;
1196 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1197 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1198 "ArgLocs should remain in order and only hold varargs args");
1199 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1200 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1201 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1202 DAG.getConstant(Offset, DL, PtrVT));
1203 Chains.push_back(
1204 DAG.getStore(Chain, DL, Arg, Add,
1206 }
1207 if (!Chains.empty())
1208 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1209 } else if (IsVarArg) {
1210 FINode = DAG.getIntPtrConstant(0, DL);
1211 }
1212
1213 if (Callee->getOpcode() == ISD::GlobalAddress) {
1214 // If the callee is a GlobalAddress node (quite common, every direct call
1215 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1216 // doesn't at MO_GOT which is not needed for direct calls.
1217 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
1220 GA->getOffset());
1221 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1222 getPointerTy(DAG.getDataLayout()), Callee);
1223 }
1224
1225 // Compute the operands for the CALLn node.
1227 Ops.push_back(Chain);
1228 Ops.push_back(Callee);
1229
1230 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1231 // isn't reliable.
1232 Ops.append(OutVals.begin(),
1233 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1234 // Add a pointer to the vararg buffer.
1235 if (IsVarArg)
1236 Ops.push_back(FINode);
1237
1238 SmallVector<EVT, 8> InTys;
1239 for (const auto &In : Ins) {
1240 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1241 assert(!In.Flags.isNest() && "nest is not valid for return values");
1242 if (In.Flags.isInAlloca())
1243 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1244 if (In.Flags.isInConsecutiveRegs())
1245 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1246 if (In.Flags.isInConsecutiveRegsLast())
1247 fail(DL, DAG,
1248 "WebAssembly hasn't implemented cons regs last return values");
1249 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1250 // registers.
1251 InTys.push_back(In.VT);
1252 }
1253
1254 // Lastly, if this is a call to a funcref we need to add an instruction
1255 // table.set to the chain and transform the call.
1257 CLI.CB->getCalledOperand()->getType())) {
1258 // In the absence of function references proposal where a funcref call is
1259 // lowered to call_ref, using reference types we generate a table.set to set
1260 // the funcref to a special table used solely for this purpose, followed by
1261 // a call_indirect. Here we just generate the table set, and return the
1262 // SDValue of the table.set so that LowerCall can finalize the lowering by
1263 // generating the call_indirect.
1264 SDValue Chain = Ops[0];
1265
1267 MF.getContext(), Subtarget);
1268 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1269 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1270 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1271 SDValue TableSet = DAG.getMemIntrinsicNode(
1272 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1273 MVT::funcref,
1274 // Machine Mem Operand args
1277 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1279
1280 Ops[0] = TableSet; // The new chain is the TableSet itself
1281 }
1282
1283 if (CLI.IsTailCall) {
1284 // ret_calls do not return values to the current frame
1285 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1286 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1287 }
1288
1289 InTys.push_back(MVT::Other);
1290 SDVTList InTyList = DAG.getVTList(InTys);
1291 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1292
1293 for (size_t I = 0; I < Ins.size(); ++I)
1294 InVals.push_back(Res.getValue(I));
1295
1296 // Return the chain
1297 return Res.getValue(Ins.size());
1298}
1299
1300bool WebAssemblyTargetLowering::CanLowerReturn(
1301 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1303 LLVMContext & /*Context*/) const {
1304 // WebAssembly can only handle returning tuples with multivalue enabled
1305 return WebAssembly::canLowerReturn(Outs.size(), Subtarget);
1306}
1307
1308SDValue WebAssemblyTargetLowering::LowerReturn(
1309 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1311 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1312 SelectionDAG &DAG) const {
1313 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) &&
1314 "MVP WebAssembly can only return up to one value");
1315 if (!callingConvSupported(CallConv))
1316 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1317
1318 SmallVector<SDValue, 4> RetOps(1, Chain);
1319 RetOps.append(OutVals.begin(), OutVals.end());
1320 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1321
1322 // Record the number and types of the return values.
1323 for (const ISD::OutputArg &Out : Outs) {
1324 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1325 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1326 assert(Out.IsFixed && "non-fixed return value is not valid");
1327 if (Out.Flags.isInAlloca())
1328 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1329 if (Out.Flags.isInConsecutiveRegs())
1330 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1332 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1333 }
1334
1335 return Chain;
1336}
1337
1338SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1339 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1340 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1341 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1342 if (!callingConvSupported(CallConv))
1343 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1344
1346 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1347
1348 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1349 // of the incoming values before they're represented by virtual registers.
1350 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1351
1352 bool HasSwiftErrorArg = false;
1353 bool HasSwiftSelfArg = false;
1354 for (const ISD::InputArg &In : Ins) {
1355 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1356 HasSwiftErrorArg |= In.Flags.isSwiftError();
1357 if (In.Flags.isInAlloca())
1358 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1359 if (In.Flags.isNest())
1360 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1361 if (In.Flags.isInConsecutiveRegs())
1362 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1363 if (In.Flags.isInConsecutiveRegsLast())
1364 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1365 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1366 // registers.
1367 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1368 DAG.getTargetConstant(InVals.size(),
1369 DL, MVT::i32))
1370 : DAG.getUNDEF(In.VT));
1371
1372 // Record the number and types of arguments.
1373 MFI->addParam(In.VT);
1374 }
1375
1376 // For swiftcc, emit additional swiftself and swifterror arguments
1377 // if there aren't. These additional arguments are also added for callee
1378 // signature They are necessary to match callee and caller signature for
1379 // indirect call.
1380 auto PtrVT = getPointerTy(MF.getDataLayout());
1381 if (CallConv == CallingConv::Swift) {
1382 if (!HasSwiftSelfArg) {
1383 MFI->addParam(PtrVT);
1384 }
1385 if (!HasSwiftErrorArg) {
1386 MFI->addParam(PtrVT);
1387 }
1388 }
1389 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1390 // the buffer is passed as an argument.
1391 if (IsVarArg) {
1392 MVT PtrVT = getPointerTy(MF.getDataLayout());
1393 Register VarargVreg =
1395 MFI->setVarargBufferVreg(VarargVreg);
1396 Chain = DAG.getCopyToReg(
1397 Chain, DL, VarargVreg,
1398 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1399 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1400 MFI->addParam(PtrVT);
1401 }
1402
1403 // Record the number and types of arguments and results.
1404 SmallVector<MVT, 4> Params;
1407 MF.getFunction(), DAG.getTarget(), Params, Results);
1408 for (MVT VT : Results)
1409 MFI->addResult(VT);
1410 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1411 // the param logic here with ComputeSignatureVTs
1412 assert(MFI->getParams().size() == Params.size() &&
1413 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1414 Params.begin()));
1415
1416 return Chain;
1417}
1418
1419void WebAssemblyTargetLowering::ReplaceNodeResults(
1421 switch (N->getOpcode()) {
1423 // Do not add any results, signifying that N should not be custom lowered
1424 // after all. This happens because simd128 turns on custom lowering for
1425 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1426 // illegal type.
1427 break;
1430 // Do not add any results, signifying that N should not be custom lowered.
1431 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1432 break;
1433 default:
1435 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1436 }
1437}
1438
1439//===----------------------------------------------------------------------===//
1440// Custom lowering hooks.
1441//===----------------------------------------------------------------------===//
1442
1443SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1444 SelectionDAG &DAG) const {
1445 SDLoc DL(Op);
1446 switch (Op.getOpcode()) {
1447 default:
1448 llvm_unreachable("unimplemented operation lowering");
1449 return SDValue();
1450 case ISD::FrameIndex:
1451 return LowerFrameIndex(Op, DAG);
1452 case ISD::GlobalAddress:
1453 return LowerGlobalAddress(Op, DAG);
1455 return LowerGlobalTLSAddress(Op, DAG);
1457 return LowerExternalSymbol(Op, DAG);
1458 case ISD::JumpTable:
1459 return LowerJumpTable(Op, DAG);
1460 case ISD::BR_JT:
1461 return LowerBR_JT(Op, DAG);
1462 case ISD::VASTART:
1463 return LowerVASTART(Op, DAG);
1464 case ISD::BlockAddress:
1465 case ISD::BRIND:
1466 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1467 return SDValue();
1468 case ISD::RETURNADDR:
1469 return LowerRETURNADDR(Op, DAG);
1470 case ISD::FRAMEADDR:
1471 return LowerFRAMEADDR(Op, DAG);
1472 case ISD::CopyToReg:
1473 return LowerCopyToReg(Op, DAG);
1476 return LowerAccessVectorElement(Op, DAG);
1480 return LowerIntrinsic(Op, DAG);
1482 return LowerSIGN_EXTEND_INREG(Op, DAG);
1485 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1486 case ISD::BUILD_VECTOR:
1487 return LowerBUILD_VECTOR(Op, DAG);
1489 return LowerVECTOR_SHUFFLE(Op, DAG);
1490 case ISD::SETCC:
1491 return LowerSETCC(Op, DAG);
1492 case ISD::SHL:
1493 case ISD::SRA:
1494 case ISD::SRL:
1495 return LowerShift(Op, DAG);
1498 return LowerFP_TO_INT_SAT(Op, DAG);
1499 case ISD::LOAD:
1500 return LowerLoad(Op, DAG);
1501 case ISD::STORE:
1502 return LowerStore(Op, DAG);
1503 case ISD::CTPOP:
1504 case ISD::CTLZ:
1505 case ISD::CTTZ:
1506 return DAG.UnrollVectorOp(Op.getNode());
1507 case ISD::CLEAR_CACHE:
1508 report_fatal_error("llvm.clear_cache is not supported on wasm");
1509 }
1510}
1511
1513 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1515
1516 return false;
1517}
1518
1519static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1520 SelectionDAG &DAG) {
1521 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1522 if (!FI)
1523 return std::nullopt;
1524
1525 auto &MF = DAG.getMachineFunction();
1527}
1528
1529SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1530 SelectionDAG &DAG) const {
1531 SDLoc DL(Op);
1532 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1533 const SDValue &Value = SN->getValue();
1534 const SDValue &Base = SN->getBasePtr();
1535 const SDValue &Offset = SN->getOffset();
1536
1538 if (!Offset->isUndef())
1539 report_fatal_error("unexpected offset when storing to webassembly global",
1540 false);
1541
1542 SDVTList Tys = DAG.getVTList(MVT::Other);
1543 SDValue Ops[] = {SN->getChain(), Value, Base};
1544 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1545 SN->getMemoryVT(), SN->getMemOperand());
1546 }
1547
1548 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1549 if (!Offset->isUndef())
1550 report_fatal_error("unexpected offset when storing to webassembly local",
1551 false);
1552
1553 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1554 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1555 SDValue Ops[] = {SN->getChain(), Idx, Value};
1556 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1557 }
1558
1561 "Encountered an unlowerable store to the wasm_var address space",
1562 false);
1563
1564 return Op;
1565}
1566
1567SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1568 SelectionDAG &DAG) const {
1569 SDLoc DL(Op);
1570 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1571 const SDValue &Base = LN->getBasePtr();
1572 const SDValue &Offset = LN->getOffset();
1573
1575 if (!Offset->isUndef())
1577 "unexpected offset when loading from webassembly global", false);
1578
1579 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1580 SDValue Ops[] = {LN->getChain(), Base};
1581 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1582 LN->getMemoryVT(), LN->getMemOperand());
1583 }
1584
1585 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1586 if (!Offset->isUndef())
1588 "unexpected offset when loading from webassembly local", false);
1589
1590 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1591 EVT LocalVT = LN->getValueType(0);
1592 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1593 {LN->getChain(), Idx});
1594 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1595 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1596 return Result;
1597 }
1598
1601 "Encountered an unlowerable load from the wasm_var address space",
1602 false);
1603
1604 return Op;
1605}
1606
1607SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1608 SelectionDAG &DAG) const {
1609 SDValue Src = Op.getOperand(2);
1610 if (isa<FrameIndexSDNode>(Src.getNode())) {
1611 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1612 // the FI to some LEA-like instruction, but since we don't have that, we
1613 // need to insert some kind of instruction that can take an FI operand and
1614 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1615 // local.copy between Op and its FI operand.
1616 SDValue Chain = Op.getOperand(0);
1617 SDLoc DL(Op);
1618 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1619 EVT VT = Src.getValueType();
1620 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1621 : WebAssembly::COPY_I64,
1622 DL, VT, Src),
1623 0);
1624 return Op.getNode()->getNumValues() == 1
1625 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1626 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1627 Op.getNumOperands() == 4 ? Op.getOperand(3)
1628 : SDValue());
1629 }
1630 return SDValue();
1631}
1632
1633SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1634 SelectionDAG &DAG) const {
1635 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1636 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1637}
1638
1639SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1640 SelectionDAG &DAG) const {
1641 SDLoc DL(Op);
1642
1643 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1644 fail(DL, DAG,
1645 "Non-Emscripten WebAssembly hasn't implemented "
1646 "__builtin_return_address");
1647 return SDValue();
1648 }
1649
1651 return SDValue();
1652
1653 unsigned Depth = Op.getConstantOperandVal(0);
1654 MakeLibCallOptions CallOptions;
1655 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1656 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1657 .first;
1658}
1659
1660SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1661 SelectionDAG &DAG) const {
1662 // Non-zero depths are not supported by WebAssembly currently. Use the
1663 // legalizer's default expansion, which is to return 0 (what this function is
1664 // documented to do).
1665 if (Op.getConstantOperandVal(0) > 0)
1666 return SDValue();
1667
1669 EVT VT = Op.getValueType();
1670 Register FP =
1672 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1673}
1674
1675SDValue
1676WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1677 SelectionDAG &DAG) const {
1678 SDLoc DL(Op);
1679 const auto *GA = cast<GlobalAddressSDNode>(Op);
1680
1683 report_fatal_error("cannot use thread-local storage without bulk memory",
1684 false);
1685
1686 const GlobalValue *GV = GA->getGlobal();
1687
1688 // Currently only Emscripten supports dynamic linking with threads. Therefore,
1689 // on other targets, if we have thread-local storage, only the local-exec
1690 // model is possible.
1691 auto model = Subtarget->getTargetTriple().isOSEmscripten()
1692 ? GV->getThreadLocalMode()
1694
1695 // Unsupported TLS modes
1698
1699 if (model == GlobalValue::LocalExecTLSModel ||
1702 getTargetMachine().shouldAssumeDSOLocal(GV))) {
1703 // For DSO-local TLS variables we use offset from __tls_base
1704
1705 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1706 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1707 : WebAssembly::GLOBAL_GET_I32;
1708 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1709
1711 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1712 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1713 0);
1714
1715 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1716 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1717 SDValue SymOffset =
1718 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1719
1720 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1721 }
1722
1724
1725 EVT VT = Op.getValueType();
1726 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1727 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1728 GA->getOffset(),
1730}
1731
1732SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1733 SelectionDAG &DAG) const {
1734 SDLoc DL(Op);
1735 const auto *GA = cast<GlobalAddressSDNode>(Op);
1736 EVT VT = Op.getValueType();
1737 assert(GA->getTargetFlags() == 0 &&
1738 "Unexpected target flags on generic GlobalAddressSDNode");
1740 fail(DL, DAG, "Invalid address space for WebAssembly target");
1741
1742 unsigned OperandFlags = 0;
1743 const GlobalValue *GV = GA->getGlobal();
1744 // Since WebAssembly tables cannot yet be shared accross modules, we don't
1745 // need special treatment for tables in PIC mode.
1746 if (isPositionIndependent() &&
1748 if (getTargetMachine().shouldAssumeDSOLocal(GV)) {
1750 MVT PtrVT = getPointerTy(MF.getDataLayout());
1751 const char *BaseName;
1752 if (GV->getValueType()->isFunctionTy()) {
1753 BaseName = MF.createExternalSymbolName("__table_base");
1755 } else {
1756 BaseName = MF.createExternalSymbolName("__memory_base");
1758 }
1760 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1761 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1762
1763 SDValue SymAddr = DAG.getNode(
1764 WebAssemblyISD::WrapperREL, DL, VT,
1765 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1766 OperandFlags));
1767
1768 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1769 }
1771 }
1772
1773 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1774 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1775 GA->getOffset(), OperandFlags));
1776}
1777
1778SDValue
1779WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1780 SelectionDAG &DAG) const {
1781 SDLoc DL(Op);
1782 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1783 EVT VT = Op.getValueType();
1784 assert(ES->getTargetFlags() == 0 &&
1785 "Unexpected target flags on generic ExternalSymbolSDNode");
1786 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1787 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1788}
1789
1790SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1791 SelectionDAG &DAG) const {
1792 // There's no need for a Wrapper node because we always incorporate a jump
1793 // table operand into a BR_TABLE instruction, rather than ever
1794 // materializing it in a register.
1795 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1796 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1797 JT->getTargetFlags());
1798}
1799
1800SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1801 SelectionDAG &DAG) const {
1802 SDLoc DL(Op);
1803 SDValue Chain = Op.getOperand(0);
1804 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1805 SDValue Index = Op.getOperand(2);
1806 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1807
1809 Ops.push_back(Chain);
1810 Ops.push_back(Index);
1811
1813 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1814
1815 // Add an operand for each case.
1816 for (auto *MBB : MBBs)
1817 Ops.push_back(DAG.getBasicBlock(MBB));
1818
1819 // Add the first MBB as a dummy default target for now. This will be replaced
1820 // with the proper default target (and the preceding range check eliminated)
1821 // if possible by WebAssemblyFixBrTableDefaults.
1822 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1823 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1824}
1825
1826SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1827 SelectionDAG &DAG) const {
1828 SDLoc DL(Op);
1830
1832 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1833
1834 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1835 MFI->getVarargBufferVreg(), PtrVT);
1836 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1837 MachinePointerInfo(SV));
1838}
1839
1840SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1841 SelectionDAG &DAG) const {
1843 unsigned IntNo;
1844 switch (Op.getOpcode()) {
1847 IntNo = Op.getConstantOperandVal(1);
1848 break;
1850 IntNo = Op.getConstantOperandVal(0);
1851 break;
1852 default:
1853 llvm_unreachable("Invalid intrinsic");
1854 }
1855 SDLoc DL(Op);
1856
1857 switch (IntNo) {
1858 default:
1859 return SDValue(); // Don't custom lower most intrinsics.
1860
1861 case Intrinsic::wasm_lsda: {
1862 auto PtrVT = getPointerTy(MF.getDataLayout());
1863 const char *SymName = MF.createExternalSymbolName(
1864 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
1865 if (isPositionIndependent()) {
1867 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
1868 const char *BaseName = MF.createExternalSymbolName("__memory_base");
1870 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1871 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1872 SDValue SymAddr =
1873 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
1874 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1875 }
1876 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
1877 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
1878 }
1879
1880 case Intrinsic::wasm_shuffle: {
1881 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1882 SDValue Ops[18];
1883 size_t OpIdx = 0;
1884 Ops[OpIdx++] = Op.getOperand(1);
1885 Ops[OpIdx++] = Op.getOperand(2);
1886 while (OpIdx < 18) {
1887 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1888 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
1889 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
1890 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
1891 } else {
1892 Ops[OpIdx++] = MaskIdx;
1893 }
1894 }
1895 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1896 }
1897 }
1898}
1899
1900SDValue
1901WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1902 SelectionDAG &DAG) const {
1903 SDLoc DL(Op);
1904 // If sign extension operations are disabled, allow sext_inreg only if operand
1905 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1906 // extension operations, but allowing sext_inreg in this context lets us have
1907 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1908 // everywhere would be simpler in this file, but would necessitate large and
1909 // brittle patterns to undo the expansion and select extract_lane_s
1910 // instructions.
1911 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1912 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1913 return SDValue();
1914
1915 const SDValue &Extract = Op.getOperand(0);
1916 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1917 if (VecT.getVectorElementType().getSizeInBits() > 32)
1918 return SDValue();
1919 MVT ExtractedLaneT =
1920 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1921 MVT ExtractedVecT =
1922 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1923 if (ExtractedVecT == VecT)
1924 return Op;
1925
1926 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1927 const SDNode *Index = Extract.getOperand(1).getNode();
1928 if (!isa<ConstantSDNode>(Index))
1929 return SDValue();
1930 unsigned IndexVal = Index->getAsZExtVal();
1931 unsigned Scale =
1932 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1933 assert(Scale > 1);
1934 SDValue NewIndex =
1935 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1936 SDValue NewExtract = DAG.getNode(
1938 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1939 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1940 Op.getOperand(1));
1941}
1942
1943SDValue
1944WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
1945 SelectionDAG &DAG) const {
1946 SDLoc DL(Op);
1947 EVT VT = Op.getValueType();
1948 SDValue Src = Op.getOperand(0);
1949 EVT SrcVT = Src.getValueType();
1950
1951 if (SrcVT.getVectorElementType() == MVT::i1 ||
1952 SrcVT.getVectorElementType() == MVT::i64)
1953 return SDValue();
1954
1955 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
1956 "Unexpected extension factor.");
1957 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
1958
1959 if (Scale != 2 && Scale != 4 && Scale != 8)
1960 return SDValue();
1961
1962 unsigned Ext;
1963 switch (Op.getOpcode()) {
1965 Ext = WebAssemblyISD::EXTEND_LOW_U;
1966 break;
1968 Ext = WebAssemblyISD::EXTEND_LOW_S;
1969 break;
1970 }
1971
1972 SDValue Ret = Src;
1973 while (Scale != 1) {
1974 Ret = DAG.getNode(Ext, DL,
1975 Ret.getValueType()
1976 .widenIntegerVectorElementType(*DAG.getContext())
1977 .getHalfNumVectorElementsVT(*DAG.getContext()),
1978 Ret);
1979 Scale /= 2;
1980 }
1981 assert(Ret.getValueType() == VT);
1982 return Ret;
1983}
1984
1986 SDLoc DL(Op);
1987 if (Op.getValueType() != MVT::v2f64)
1988 return SDValue();
1989
1990 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
1991 unsigned &Index) -> bool {
1992 switch (Op.getOpcode()) {
1993 case ISD::SINT_TO_FP:
1994 Opcode = WebAssemblyISD::CONVERT_LOW_S;
1995 break;
1996 case ISD::UINT_TO_FP:
1997 Opcode = WebAssemblyISD::CONVERT_LOW_U;
1998 break;
1999 case ISD::FP_EXTEND:
2000 Opcode = WebAssemblyISD::PROMOTE_LOW;
2001 break;
2002 default:
2003 return false;
2004 }
2005
2006 auto ExtractVector = Op.getOperand(0);
2007 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2008 return false;
2009
2010 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2011 return false;
2012
2013 SrcVec = ExtractVector.getOperand(0);
2014 Index = ExtractVector.getConstantOperandVal(1);
2015 return true;
2016 };
2017
2018 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2019 SDValue LHSSrcVec, RHSSrcVec;
2020 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2021 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2022 return SDValue();
2023
2024 if (LHSOpcode != RHSOpcode)
2025 return SDValue();
2026
2027 MVT ExpectedSrcVT;
2028 switch (LHSOpcode) {
2029 case WebAssemblyISD::CONVERT_LOW_S:
2030 case WebAssemblyISD::CONVERT_LOW_U:
2031 ExpectedSrcVT = MVT::v4i32;
2032 break;
2033 case WebAssemblyISD::PROMOTE_LOW:
2034 ExpectedSrcVT = MVT::v4f32;
2035 break;
2036 }
2037 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2038 return SDValue();
2039
2040 auto Src = LHSSrcVec;
2041 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2042 // Shuffle the source vector so that the converted lanes are the low lanes.
2043 Src = DAG.getVectorShuffle(
2044 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2045 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2046 }
2047 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2048}
2049
2050SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2051 SelectionDAG &DAG) const {
2052 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2053 return ConvertLow;
2054
2055 SDLoc DL(Op);
2056 const EVT VecT = Op.getValueType();
2057 const EVT LaneT = Op.getOperand(0).getValueType();
2058 const size_t Lanes = Op.getNumOperands();
2059 bool CanSwizzle = VecT == MVT::v16i8;
2060
2061 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2062 // possible number of lanes at once followed by a sequence of replace_lane
2063 // instructions to individually initialize any remaining lanes.
2064
2065 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2066 // swizzled lanes should be given greater weight.
2067
2068 // TODO: Investigate looping rather than always extracting/replacing specific
2069 // lanes to fill gaps.
2070
2071 auto IsConstant = [](const SDValue &V) {
2072 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2073 };
2074
2075 // Returns the source vector and index vector pair if they exist. Checks for:
2076 // (extract_vector_elt
2077 // $src,
2078 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2079 // )
2080 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2081 auto Bail = std::make_pair(SDValue(), SDValue());
2082 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2083 return Bail;
2084 const SDValue &SwizzleSrc = Lane->getOperand(0);
2085 const SDValue &IndexExt = Lane->getOperand(1);
2086 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2087 return Bail;
2088 const SDValue &Index = IndexExt->getOperand(0);
2089 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2090 return Bail;
2091 const SDValue &SwizzleIndices = Index->getOperand(0);
2092 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2093 SwizzleIndices.getValueType() != MVT::v16i8 ||
2094 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2095 Index->getConstantOperandVal(1) != I)
2096 return Bail;
2097 return std::make_pair(SwizzleSrc, SwizzleIndices);
2098 };
2099
2100 // If the lane is extracted from another vector at a constant index, return
2101 // that vector. The source vector must not have more lanes than the dest
2102 // because the shufflevector indices are in terms of the destination lanes and
2103 // would not be able to address the smaller individual source lanes.
2104 auto GetShuffleSrc = [&](const SDValue &Lane) {
2105 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2106 return SDValue();
2107 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2108 return SDValue();
2109 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2110 VecT.getVectorNumElements())
2111 return SDValue();
2112 return Lane->getOperand(0);
2113 };
2114
2115 using ValueEntry = std::pair<SDValue, size_t>;
2116 SmallVector<ValueEntry, 16> SplatValueCounts;
2117
2118 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2119 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2120
2121 using ShuffleEntry = std::pair<SDValue, size_t>;
2122 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2123
2124 auto AddCount = [](auto &Counts, const auto &Val) {
2125 auto CountIt =
2126 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2127 if (CountIt == Counts.end()) {
2128 Counts.emplace_back(Val, 1);
2129 } else {
2130 CountIt->second++;
2131 }
2132 };
2133
2134 auto GetMostCommon = [](auto &Counts) {
2135 auto CommonIt =
2136 std::max_element(Counts.begin(), Counts.end(), llvm::less_second());
2137 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2138 return *CommonIt;
2139 };
2140
2141 size_t NumConstantLanes = 0;
2142
2143 // Count eligible lanes for each type of vector creation op
2144 for (size_t I = 0; I < Lanes; ++I) {
2145 const SDValue &Lane = Op->getOperand(I);
2146 if (Lane.isUndef())
2147 continue;
2148
2149 AddCount(SplatValueCounts, Lane);
2150
2151 if (IsConstant(Lane))
2152 NumConstantLanes++;
2153 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2154 AddCount(ShuffleCounts, ShuffleSrc);
2155 if (CanSwizzle) {
2156 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2157 if (SwizzleSrcs.first)
2158 AddCount(SwizzleCounts, SwizzleSrcs);
2159 }
2160 }
2161
2162 SDValue SplatValue;
2163 size_t NumSplatLanes;
2164 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2165
2166 SDValue SwizzleSrc;
2167 SDValue SwizzleIndices;
2168 size_t NumSwizzleLanes = 0;
2169 if (SwizzleCounts.size())
2170 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2171 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2172
2173 // Shuffles can draw from up to two vectors, so find the two most common
2174 // sources.
2175 SDValue ShuffleSrc1, ShuffleSrc2;
2176 size_t NumShuffleLanes = 0;
2177 if (ShuffleCounts.size()) {
2178 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2179 llvm::erase_if(ShuffleCounts,
2180 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2181 }
2182 if (ShuffleCounts.size()) {
2183 size_t AdditionalShuffleLanes;
2184 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2185 GetMostCommon(ShuffleCounts);
2186 NumShuffleLanes += AdditionalShuffleLanes;
2187 }
2188
2189 // Predicate returning true if the lane is properly initialized by the
2190 // original instruction
2191 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2193 // Prefer swizzles over shuffles over vector consts over splats
2194 if (NumSwizzleLanes >= NumShuffleLanes &&
2195 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2196 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2197 SwizzleIndices);
2198 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2199 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2200 return Swizzled == GetSwizzleSrcs(I, Lane);
2201 };
2202 } else if (NumShuffleLanes >= NumConstantLanes &&
2203 NumShuffleLanes >= NumSplatLanes) {
2204 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2205 size_t DestLaneCount = VecT.getVectorNumElements();
2206 size_t Scale1 = 1;
2207 size_t Scale2 = 1;
2208 SDValue Src1 = ShuffleSrc1;
2209 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2210 if (Src1.getValueType() != VecT) {
2211 size_t LaneSize =
2213 assert(LaneSize > DestLaneSize);
2214 Scale1 = LaneSize / DestLaneSize;
2215 Src1 = DAG.getBitcast(VecT, Src1);
2216 }
2217 if (Src2.getValueType() != VecT) {
2218 size_t LaneSize =
2220 assert(LaneSize > DestLaneSize);
2221 Scale2 = LaneSize / DestLaneSize;
2222 Src2 = DAG.getBitcast(VecT, Src2);
2223 }
2224
2225 int Mask[16];
2226 assert(DestLaneCount <= 16);
2227 for (size_t I = 0; I < DestLaneCount; ++I) {
2228 const SDValue &Lane = Op->getOperand(I);
2229 SDValue Src = GetShuffleSrc(Lane);
2230 if (Src == ShuffleSrc1) {
2231 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2232 } else if (Src && Src == ShuffleSrc2) {
2233 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2234 } else {
2235 Mask[I] = -1;
2236 }
2237 }
2238 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2239 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2240 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2241 auto Src = GetShuffleSrc(Lane);
2242 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2243 };
2244 } else if (NumConstantLanes >= NumSplatLanes) {
2245 SmallVector<SDValue, 16> ConstLanes;
2246 for (const SDValue &Lane : Op->op_values()) {
2247 if (IsConstant(Lane)) {
2248 // Values may need to be fixed so that they will sign extend to be
2249 // within the expected range during ISel. Check whether the value is in
2250 // bounds based on the lane bit width and if it is out of bounds, lop
2251 // off the extra bits and subtract 2^n to reflect giving the high bit
2252 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2253 // cannot possibly be out of range.
2254 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
2255 int64_t Val = Const ? Const->getSExtValue() : 0;
2256 uint64_t LaneBits = 128 / Lanes;
2257 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2258 "Unexpected out of bounds negative value");
2259 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2260 uint64_t Mask = (1ll << LaneBits) - 1;
2261 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask;
2262 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2263 } else {
2264 ConstLanes.push_back(Lane);
2265 }
2266 } else if (LaneT.isFloatingPoint()) {
2267 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2268 } else {
2269 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2270 }
2271 }
2272 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2273 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2274 return IsConstant(Lane);
2275 };
2276 } else {
2277 // Use a splat (which might be selected as a load splat)
2278 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2279 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2280 return Lane == SplatValue;
2281 };
2282 }
2283
2284 assert(Result);
2285 assert(IsLaneConstructed);
2286
2287 // Add replace_lane instructions for any unhandled values
2288 for (size_t I = 0; I < Lanes; ++I) {
2289 const SDValue &Lane = Op->getOperand(I);
2290 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2291 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2292 DAG.getConstant(I, DL, MVT::i32));
2293 }
2294
2295 return Result;
2296}
2297
2298SDValue
2299WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2300 SelectionDAG &DAG) const {
2301 SDLoc DL(Op);
2302 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2303 MVT VecType = Op.getOperand(0).getSimpleValueType();
2304 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2305 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2306
2307 // Space for two vector args and sixteen mask indices
2308 SDValue Ops[18];
2309 size_t OpIdx = 0;
2310 Ops[OpIdx++] = Op.getOperand(0);
2311 Ops[OpIdx++] = Op.getOperand(1);
2312
2313 // Expand mask indices to byte indices and materialize them as operands
2314 for (int M : Mask) {
2315 for (size_t J = 0; J < LaneBytes; ++J) {
2316 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2317 // whole lane of vector input, to allow further reduction at VM. E.g.
2318 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2319 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2320 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2321 }
2322 }
2323
2324 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2325}
2326
2327SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2328 SelectionDAG &DAG) const {
2329 SDLoc DL(Op);
2330 // The legalizer does not know how to expand the unsupported comparison modes
2331 // of i64x2 vectors, so we manually unroll them here.
2332 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2334 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2335 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2336 const SDValue &CC = Op->getOperand(2);
2337 auto MakeLane = [&](unsigned I) {
2338 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2339 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2340 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2341 };
2342 return DAG.getBuildVector(Op->getValueType(0), DL,
2343 {MakeLane(0), MakeLane(1)});
2344}
2345
2346SDValue
2347WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2348 SelectionDAG &DAG) const {
2349 // Allow constant lane indices, expand variable lane indices
2350 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2351 if (isa<ConstantSDNode>(IdxNode)) {
2352 // Ensure the index type is i32 to match the tablegen patterns
2353 uint64_t Idx = IdxNode->getAsZExtVal();
2354 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2355 Ops[Op.getNumOperands() - 1] =
2356 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2357 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2358 }
2359 // Perform default expansion
2360 return SDValue();
2361}
2362
2364 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2365 // 32-bit and 64-bit unrolled shifts will have proper semantics
2366 if (LaneT.bitsGE(MVT::i32))
2367 return DAG.UnrollVectorOp(Op.getNode());
2368 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2369 SDLoc DL(Op);
2370 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2371 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2372 unsigned ShiftOpcode = Op.getOpcode();
2373 SmallVector<SDValue, 16> ShiftedElements;
2374 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2375 SmallVector<SDValue, 16> ShiftElements;
2376 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2377 SmallVector<SDValue, 16> UnrolledOps;
2378 for (size_t i = 0; i < NumLanes; ++i) {
2379 SDValue MaskedShiftValue =
2380 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2381 SDValue ShiftedValue = ShiftedElements[i];
2382 if (ShiftOpcode == ISD::SRA)
2383 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2384 ShiftedValue, DAG.getValueType(LaneT));
2385 UnrolledOps.push_back(
2386 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2387 }
2388 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2389}
2390
2391SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2392 SelectionDAG &DAG) const {
2393 SDLoc DL(Op);
2394
2395 // Only manually lower vector shifts
2396 assert(Op.getSimpleValueType().isVector());
2397
2398 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2399 auto ShiftVal = Op.getOperand(1);
2400
2401 // Try to skip bitmask operation since it is implied inside shift instruction
2402 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2403 if (MaskOp.getOpcode() != ISD::AND)
2404 return MaskOp;
2405 SDValue LHS = MaskOp.getOperand(0);
2406 SDValue RHS = MaskOp.getOperand(1);
2407 if (MaskOp.getValueType().isVector()) {
2408 APInt MaskVal;
2409 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
2410 std::swap(LHS, RHS);
2411
2412 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
2413 MaskVal == MaskBits)
2414 MaskOp = LHS;
2415 } else {
2416 if (!isa<ConstantSDNode>(RHS.getNode()))
2417 std::swap(LHS, RHS);
2418
2419 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
2420 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2421 MaskOp = LHS;
2422 }
2423
2424 return MaskOp;
2425 };
2426
2427 // Skip vector and operation
2428 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2429 ShiftVal = DAG.getSplatValue(ShiftVal);
2430 if (!ShiftVal)
2431 return unrollVectorShift(Op, DAG);
2432
2433 // Skip scalar and operation
2434 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2435 // Use anyext because none of the high bits can affect the shift
2436 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2437
2438 unsigned Opcode;
2439 switch (Op.getOpcode()) {
2440 case ISD::SHL:
2441 Opcode = WebAssemblyISD::VEC_SHL;
2442 break;
2443 case ISD::SRA:
2444 Opcode = WebAssemblyISD::VEC_SHR_S;
2445 break;
2446 case ISD::SRL:
2447 Opcode = WebAssemblyISD::VEC_SHR_U;
2448 break;
2449 default:
2450 llvm_unreachable("unexpected opcode");
2451 }
2452
2453 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2454}
2455
2456SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2457 SelectionDAG &DAG) const {
2458 SDLoc DL(Op);
2459 EVT ResT = Op.getValueType();
2460 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2461
2462 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2463 (SatVT == MVT::i32 || SatVT == MVT::i64))
2464 return Op;
2465
2466 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2467 return Op;
2468
2469 return SDValue();
2470}
2471
2472//===----------------------------------------------------------------------===//
2473// Custom DAG combine hooks
2474//===----------------------------------------------------------------------===//
2475static SDValue
2477 auto &DAG = DCI.DAG;
2478 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2479
2480 // Hoist vector bitcasts that don't change the number of lanes out of unary
2481 // shuffles, where they are less likely to get in the way of other combines.
2482 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2483 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2484 SDValue Bitcast = N->getOperand(0);
2485 if (Bitcast.getOpcode() != ISD::BITCAST)
2486 return SDValue();
2487 if (!N->getOperand(1).isUndef())
2488 return SDValue();
2489 SDValue CastOp = Bitcast.getOperand(0);
2490 EVT SrcType = CastOp.getValueType();
2491 EVT DstType = Bitcast.getValueType();
2492 if (!SrcType.is128BitVector() ||
2493 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2494 return SDValue();
2495 SDValue NewShuffle = DAG.getVectorShuffle(
2496 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2497 return DAG.getBitcast(DstType, NewShuffle);
2498}
2499
2500/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
2501/// split up into scalar instructions during legalization, and the vector
2502/// extending instructions are selected in performVectorExtendCombine below.
2503static SDValue
2506 auto &DAG = DCI.DAG;
2507 assert(N->getOpcode() == ISD::UINT_TO_FP ||
2508 N->getOpcode() == ISD::SINT_TO_FP);
2509
2510 EVT InVT = N->getOperand(0)->getValueType(0);
2511 EVT ResVT = N->getValueType(0);
2512 MVT ExtVT;
2513 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2514 ExtVT = MVT::v4i32;
2515 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2516 ExtVT = MVT::v2i32;
2517 else
2518 return SDValue();
2519
2520 unsigned Op =
2522 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
2523 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
2524}
2525
2526static SDValue
2528 auto &DAG = DCI.DAG;
2529 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2530 N->getOpcode() == ISD::ZERO_EXTEND);
2531
2532 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2533 // possible before the extract_subvector can be expanded.
2534 auto Extract = N->getOperand(0);
2535 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2536 return SDValue();
2537 auto Source = Extract.getOperand(0);
2538 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2539 if (IndexNode == nullptr)
2540 return SDValue();
2541 auto Index = IndexNode->getZExtValue();
2542
2543 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2544 // extracted subvector is the low or high half of its source.
2545 EVT ResVT = N->getValueType(0);
2546 if (ResVT == MVT::v8i16) {
2547 if (Extract.getValueType() != MVT::v8i8 ||
2548 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2549 return SDValue();
2550 } else if (ResVT == MVT::v4i32) {
2551 if (Extract.getValueType() != MVT::v4i16 ||
2552 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2553 return SDValue();
2554 } else if (ResVT == MVT::v2i64) {
2555 if (Extract.getValueType() != MVT::v2i32 ||
2556 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2557 return SDValue();
2558 } else {
2559 return SDValue();
2560 }
2561
2562 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2563 bool IsLow = Index == 0;
2564
2565 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2566 : WebAssemblyISD::EXTEND_HIGH_S)
2567 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2568 : WebAssemblyISD::EXTEND_HIGH_U);
2569
2570 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2571}
2572
2573static SDValue
2575 auto &DAG = DCI.DAG;
2576
2577 auto GetWasmConversionOp = [](unsigned Op) {
2578 switch (Op) {
2580 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2582 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2583 case ISD::FP_ROUND:
2584 return WebAssemblyISD::DEMOTE_ZERO;
2585 }
2586 llvm_unreachable("unexpected op");
2587 };
2588
2589 auto IsZeroSplat = [](SDValue SplatVal) {
2590 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2591 APInt SplatValue, SplatUndef;
2592 unsigned SplatBitSize;
2593 bool HasAnyUndefs;
2594 // Endianness doesn't matter in this context because we are looking for
2595 // an all-zero value.
2596 return Splat &&
2597 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2598 HasAnyUndefs) &&
2599 SplatValue == 0;
2600 };
2601
2602 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2603 // Combine this:
2604 //
2605 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2606 //
2607 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2608 //
2609 // Or this:
2610 //
2611 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2612 //
2613 // into (f32x4.demote_zero_f64x2 $x).
2614 EVT ResVT;
2615 EVT ExpectedConversionType;
2616 auto Conversion = N->getOperand(0);
2617 auto ConversionOp = Conversion.getOpcode();
2618 switch (ConversionOp) {
2621 ResVT = MVT::v4i32;
2622 ExpectedConversionType = MVT::v2i32;
2623 break;
2624 case ISD::FP_ROUND:
2625 ResVT = MVT::v4f32;
2626 ExpectedConversionType = MVT::v2f32;
2627 break;
2628 default:
2629 return SDValue();
2630 }
2631
2632 if (N->getValueType(0) != ResVT)
2633 return SDValue();
2634
2635 if (Conversion.getValueType() != ExpectedConversionType)
2636 return SDValue();
2637
2638 auto Source = Conversion.getOperand(0);
2639 if (Source.getValueType() != MVT::v2f64)
2640 return SDValue();
2641
2642 if (!IsZeroSplat(N->getOperand(1)) ||
2643 N->getOperand(1).getValueType() != ExpectedConversionType)
2644 return SDValue();
2645
2646 unsigned Op = GetWasmConversionOp(ConversionOp);
2647 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2648 }
2649
2650 // Combine this:
2651 //
2652 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2653 //
2654 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2655 //
2656 // Or this:
2657 //
2658 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2659 //
2660 // into (f32x4.demote_zero_f64x2 $x).
2661 EVT ResVT;
2662 auto ConversionOp = N->getOpcode();
2663 switch (ConversionOp) {
2666 ResVT = MVT::v4i32;
2667 break;
2668 case ISD::FP_ROUND:
2669 ResVT = MVT::v4f32;
2670 break;
2671 default:
2672 llvm_unreachable("unexpected op");
2673 }
2674
2675 if (N->getValueType(0) != ResVT)
2676 return SDValue();
2677
2678 auto Concat = N->getOperand(0);
2679 if (Concat.getValueType() != MVT::v4f64)
2680 return SDValue();
2681
2682 auto Source = Concat.getOperand(0);
2683 if (Source.getValueType() != MVT::v2f64)
2684 return SDValue();
2685
2686 if (!IsZeroSplat(Concat.getOperand(1)) ||
2687 Concat.getOperand(1).getValueType() != MVT::v2f64)
2688 return SDValue();
2689
2690 unsigned Op = GetWasmConversionOp(ConversionOp);
2691 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2692}
2693
2694// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
2695static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
2696 const SDLoc &DL, unsigned VectorWidth) {
2697 EVT VT = Vec.getValueType();
2698 EVT ElVT = VT.getVectorElementType();
2699 unsigned Factor = VT.getSizeInBits() / VectorWidth;
2700 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
2701 VT.getVectorNumElements() / Factor);
2702
2703 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
2704 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
2705 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
2706
2707 // This is the index of the first element of the VectorWidth-bit chunk
2708 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
2709 IdxVal &= ~(ElemsPerChunk - 1);
2710
2711 // If the input is a buildvector just emit a smaller one.
2712 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
2713 return DAG.getBuildVector(ResultVT, DL,
2714 Vec->ops().slice(IdxVal, ElemsPerChunk));
2715
2716 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
2717 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
2718}
2719
2720// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
2721// is the expected destination value type after recursion. In is the initial
2722// input. Note that the input should have enough leading zero bits to prevent
2723// NARROW_U from saturating results.
2725 SelectionDAG &DAG) {
2726 EVT SrcVT = In.getValueType();
2727
2728 // No truncation required, we might get here due to recursive calls.
2729 if (SrcVT == DstVT)
2730 return In;
2731
2732 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
2733 unsigned NumElems = SrcVT.getVectorNumElements();
2734 if (!isPowerOf2_32(NumElems))
2735 return SDValue();
2736 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
2737 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
2738
2739 LLVMContext &Ctx = *DAG.getContext();
2740 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
2741
2742 // Narrow to the largest type possible:
2743 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
2744 EVT InVT = MVT::i16, OutVT = MVT::i8;
2745 if (SrcVT.getScalarSizeInBits() > 16) {
2746 InVT = MVT::i32;
2747 OutVT = MVT::i16;
2748 }
2749 unsigned SubSizeInBits = SrcSizeInBits / 2;
2750 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
2751 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
2752
2753 // Split lower/upper subvectors.
2754 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
2755 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
2756
2757 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
2758 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
2759 Lo = DAG.getBitcast(InVT, Lo);
2760 Hi = DAG.getBitcast(InVT, Hi);
2761 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
2762 return DAG.getBitcast(DstVT, Res);
2763 }
2764
2765 // Recursively narrow lower/upper subvectors, concat result and narrow again.
2766 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
2767 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
2768 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
2769
2770 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
2771 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
2772 return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
2773}
2774
2777 auto &DAG = DCI.DAG;
2778
2779 SDValue In = N->getOperand(0);
2780 EVT InVT = In.getValueType();
2781 if (!InVT.isSimple())
2782 return SDValue();
2783
2784 EVT OutVT = N->getValueType(0);
2785 if (!OutVT.isVector())
2786 return SDValue();
2787
2788 EVT OutSVT = OutVT.getVectorElementType();
2789 EVT InSVT = InVT.getVectorElementType();
2790 // Currently only cover truncate to v16i8 or v8i16.
2791 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
2792 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
2793 return SDValue();
2794
2795 SDLoc DL(N);
2797 OutVT.getScalarSizeInBits());
2798 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
2799 return truncateVectorWithNARROW(OutVT, In, DL, DAG);
2800}
2801
2804 auto &DAG = DCI.DAG;
2805 SDLoc DL(N);
2806 SDValue Src = N->getOperand(0);
2807 EVT VT = N->getValueType(0);
2808 EVT SrcVT = Src.getValueType();
2809
2810 // bitcast <N x i1> to iN
2811 // ==> bitmask
2812 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2813 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) {
2814 unsigned NumElts = SrcVT.getVectorNumElements();
2815 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2816 return SDValue();
2817 EVT Width = MVT::getIntegerVT(128 / NumElts);
2818 return DAG.getZExtOrTrunc(
2819 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2820 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
2821 DAG.getSExtOrTrunc(N->getOperand(0), DL,
2822 SrcVT.changeVectorElementType(Width))}),
2823 DL, VT);
2824 }
2825
2826 return SDValue();
2827}
2828
2831 auto &DAG = DCI.DAG;
2832
2833 SDValue LHS = N->getOperand(0);
2834 SDValue RHS = N->getOperand(1);
2835 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
2836 SDLoc DL(N);
2837 EVT VT = N->getValueType(0);
2838
2839 // setcc (iN (bitcast (vNi1 X))), 0, ne
2840 // ==> any_true (vNi1 X)
2841 // setcc (iN (bitcast (vNi1 X))), 0, eq
2842 // ==> xor (any_true (vNi1 X)), -1
2843 // setcc (iN (bitcast (vNi1 X))), -1, eq
2844 // ==> all_true (vNi1 X)
2845 // setcc (iN (bitcast (vNi1 X))), -1, ne
2846 // ==> xor (all_true (vNi1 X)), -1
2847 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2848 (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2850 LHS->getOpcode() == ISD::BITCAST) {
2851 EVT FromVT = LHS->getOperand(0).getValueType();
2852 if (FromVT.isFixedLengthVector() &&
2853 FromVT.getVectorElementType() == MVT::i1) {
2854 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue
2855 : Intrinsic::wasm_alltrue;
2856 unsigned NumElts = FromVT.getVectorNumElements();
2857 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2858 return SDValue();
2859 EVT Width = MVT::getIntegerVT(128 / NumElts);
2860 SDValue Ret = DAG.getZExtOrTrunc(
2861 DAG.getNode(
2862 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2863 {DAG.getConstant(Intrin, DL, MVT::i32),
2864 DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
2865 FromVT.changeVectorElementType(Width))}),
2866 DL, MVT::i1);
2867 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) ||
2868 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) {
2869 Ret = DAG.getNOT(DL, Ret, MVT::i1);
2870 }
2871 return DAG.getZExtOrTrunc(Ret, DL, VT);
2872 }
2873 }
2874
2875 return SDValue();
2876}
2877
2878SDValue
2879WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2880 DAGCombinerInfo &DCI) const {
2881 switch (N->getOpcode()) {
2882 default:
2883 return SDValue();
2884 case ISD::BITCAST:
2885 return performBitcastCombine(N, DCI);
2886 case ISD::SETCC:
2887 return performSETCCCombine(N, DCI);
2889 return performVECTOR_SHUFFLECombine(N, DCI);
2890 case ISD::SIGN_EXTEND:
2891 case ISD::ZERO_EXTEND:
2892 return performVectorExtendCombine(N, DCI);
2893 case ISD::UINT_TO_FP:
2894 case ISD::SINT_TO_FP:
2895 return performVectorExtendToFPCombine(N, DCI);
2898 case ISD::FP_ROUND:
2900 return performVectorTruncZeroCombine(N, DCI);
2901 case ISD::TRUNCATE:
2902 return performTruncateCombine(N, DCI);
2903 }
2904}
unsigned const MachineRegisterInfo * MRI
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Symbol * Sym
Definition: ELF_riscv.cpp:479
Hexagon Common GEP
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
static unsigned NumFixedArgs
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static bool callingConvSupported(CallingConv::ID CallConv)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
X86 cmov Conversion
static constexpr int Concat[]
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:77
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:285
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:275
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:695
@ Add
*p = old + v
Definition: Instructions.h:711
@ Or
*p = old | v
Definition: Instructions.h:719
@ Sub
*p = old - v
Definition: Instructions.h:713
@ And
*p = old & v
Definition: Instructions.h:715
@ Xor
*p = old ^ v
Definition: Instructions.h:721
BinOp getOperation() const
Definition: Instructions.h:786
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:207
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Type * getValueType() const
Definition: GlobalValue.h:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
void setNoStrip() const
Definition: MCSymbolWasm.h:66
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:230
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:733
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:722
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:227
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:736
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:746
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:842
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:486
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:741
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:487
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:787
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:690
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:481
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:813
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
Definition: SelectionDAG.h:859
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
Definition: SelectionDAG.h:499
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:568
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getDoubleTy(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:246
static Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:697
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
Register getFrameRegister(const MachineFunction &MF) const override
const Triple & getTargetTriple() const
const WebAssemblyInstrInfo * getInstrInfo() const override
const WebAssemblyRegisterInfo * getRegisterInfo() const override
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
Definition: ilist_node.h:132
#define INT64_MIN
Definition: DataTypes.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
Definition: CallingConv.h:229
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:764
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1147
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1143
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:728
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1176
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1052
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:491
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:205
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:804
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:551
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:702
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:834
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:927
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:917
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1431
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ FrameIndex
Definition: ISDOpcodes.h:80
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:788
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1009
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1098
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1073
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1077
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:635
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1172
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:659
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:719
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:608
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:581
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:543
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:209
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:794
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1232
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:870
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:756
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1062
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:812
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:682
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:902
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1005
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:850
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition: ISDOpcodes.h:164
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:694
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1229
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:532
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ExternalSymbol
Definition: ISDOpcodes.h:83
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:883
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:845
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:869
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:800
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1167
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:777
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:501
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:198
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:523
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1554
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:50
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:337
@ Offset
Definition: DWP.cpp:480
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:360
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:93
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:73
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:146
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:370
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
bool is128BitVector() const
Return true if this is a 128-bit vector type.
Definition: ValueTypes.h:203
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:366
bool isFixedLengthVector() const
Definition: ValueTypes.h:177
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:167
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:313
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:282
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Definition: ValueTypes.h:208
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:203
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:156
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:326
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1459