LLVM  9.0.0svn
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
17 #include "WebAssemblySubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
40  const TargetMachine &TM, const WebAssemblySubtarget &STI)
41  : TargetLowering(TM), Subtarget(&STI) {
42  auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44  // Booleans always contain 0 or 1.
46  // Except in SIMD vectors
48  // We don't know the microarchitecture here, so just reduce register pressure.
50  // Tell ISel that we have a stack pointer.
52  Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
53  // Set up the register classes.
54  addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
55  addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
56  addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
57  addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
58  if (Subtarget->hasSIMD128()) {
59  addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
60  addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
61  addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
62  addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
63  }
64  if (Subtarget->hasUnimplementedSIMD128()) {
65  addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
66  addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
67  }
68  // Compute derived properties from the register classes.
70 
76 
77  // Take the default expansion for va_arg, va_copy, and va_end. There is no
78  // default action for va_start, so we do that custom.
83 
84  for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
85  // Don't expand the floating-point types to constant pools.
87  // Expand floating-point comparisons.
88  for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91  // Expand floating-point library function operators.
92  for (auto Op :
95  // Note supported floating-point library function operators that otherwise
96  // default to expand.
97  for (auto Op :
100  // Support minimum and maximum, which otherwise default to expand.
103  // WebAssembly currently has no builtin f16 support.
108  }
109 
110  // Expand unavailable integer operations.
111  for (auto Op :
115  for (auto T : {MVT::i32, MVT::i64})
117  if (Subtarget->hasSIMD128())
118  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
120  if (Subtarget->hasUnimplementedSIMD128())
122  }
123 
124  // SIMD-specific configuration
125  if (Subtarget->hasSIMD128()) {
126  // Support saturating add for i8x16 and i16x8
127  for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128  for (auto T : {MVT::v16i8, MVT::v8i16})
130 
131  // Custom lower BUILD_VECTORs to minimize number of replace_lanes
132  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
134  if (Subtarget->hasUnimplementedSIMD128())
135  for (auto T : {MVT::v2i64, MVT::v2f64})
137 
138  // We have custom shuffle lowering to expose the shuffle mask
139  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
141  if (Subtarget->hasUnimplementedSIMD128())
142  for (auto T: {MVT::v2i64, MVT::v2f64})
144 
145  // Custom lowering since wasm shifts must have a scalar shift amount
146  for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
147  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
149  if (Subtarget->hasUnimplementedSIMD128())
151  }
152 
153  // Custom lower lane accesses to expand out variable indices
155  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
157  if (Subtarget->hasUnimplementedSIMD128())
158  for (auto T : {MVT::v2i64, MVT::v2f64})
160  }
161 
162  // There is no i64x2.mul instruction
164 
165  // There are no vector select instructions
166  for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
167  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
169  if (Subtarget->hasUnimplementedSIMD128())
170  for (auto T : {MVT::v2i64, MVT::v2f64})
172  }
173 
174  // Expand integer operations supported for scalars but not SIMD
177  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
179  if (Subtarget->hasUnimplementedSIMD128())
181  }
182 
183  // Expand float operations supported for scalars but not SIMD
188  if (Subtarget->hasUnimplementedSIMD128())
190  }
191 
192  // Expand additional SIMD ops that V8 hasn't implemented yet
193  if (!Subtarget->hasUnimplementedSIMD128()) {
196  }
197  }
198 
199  // As a special case, these operators use the type to mean the type to
200  // sign-extend from.
202  if (!Subtarget->hasSignExt()) {
203  // Sign extends are legal only when extending a vector extract
204  auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
205  for (auto T : {MVT::i8, MVT::i16, MVT::i32})
207  }
208  for (auto T : MVT::integer_vector_valuetypes())
210 
211  // Dynamic stack allocation: use the default expansion.
215 
218 
219  // Expand these forms; we pattern-match the forms that we can handle in isel.
220  for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
221  for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
223 
224  // We have custom switch handling.
226 
227  // WebAssembly doesn't have:
228  // - Floating-point extending loads.
229  // - Floating-point truncating stores.
230  // - i1 extending loads.
231  // - extending/truncating SIMD loads/stores
234  for (auto T : MVT::integer_valuetypes())
237  if (Subtarget->hasSIMD128()) {
239  MVT::v2f64}) {
240  for (auto MemT : MVT::vector_valuetypes()) {
241  if (MVT(T) != MemT) {
242  setTruncStoreAction(T, MemT, Expand);
244  setLoadExtAction(Ext, T, MemT, Expand);
245  }
246  }
247  }
248  }
249 
250  // Don't do anything clever with build_pairs
252 
253  // Trap lowers to wasm unreachable
255 
256  // Exception handling intrinsics
259 
261 
262  if (Subtarget->hasBulkMemory()) {
263  // Use memory.copy and friends over multiple loads and stores
264  MaxStoresPerMemcpy = 1;
268  MaxStoresPerMemset = 1;
270  }
271 
272  // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
273  // consistent with the f64 and f128 names.
274  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
275  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
276 
277  // Define the emscripten name for return address helper.
278  // TODO: when implementing other WASM backends, make this generic or only do
279  // this on emscripten depending on what they end up doing.
280  setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
281 
282  // Always convert switches to br_tables unless there is only one case, which
283  // is equivalent to a simple branch. This reduces code size for wasm, and we
284  // defer possible jump table optimizations to the VM.
286 }
287 
289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
290  // We have wasm instructions for these
291  switch (AI->getOperation()) {
292  case AtomicRMWInst::Add:
293  case AtomicRMWInst::Sub:
294  case AtomicRMWInst::And:
295  case AtomicRMWInst::Or:
296  case AtomicRMWInst::Xor:
297  case AtomicRMWInst::Xchg:
299  default:
300  break;
301  }
303 }
304 
305 FastISel *WebAssemblyTargetLowering::createFastISel(
306  FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
307  return WebAssembly::createFastISel(FuncInfo, LibInfo);
308 }
309 
310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
311  EVT VT) const {
312  unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
313  if (BitWidth > 1 && BitWidth < 8)
314  BitWidth = 8;
315 
316  if (BitWidth > 64) {
317  // The shift will be lowered to a libcall, and compiler-rt libcalls expect
318  // the count to be an i32.
319  BitWidth = 32;
320  assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
321  "32-bit shift counts ought to be enough for anyone");
322  }
323 
324  MVT Result = MVT::getIntegerVT(BitWidth);
326  "Unable to represent scalar shift amount type");
327  return Result;
328 }
329 
330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
331 // undefined result on invalid/overflow, to the WebAssembly opcode, which
332 // traps on invalid/overflow.
334  MachineBasicBlock *BB,
335  const TargetInstrInfo &TII,
336  bool IsUnsigned, bool Int64,
337  bool Float64, unsigned LoweredOpcode) {
339 
340  unsigned OutReg = MI.getOperand(0).getReg();
341  unsigned InReg = MI.getOperand(1).getReg();
342 
343  unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
344  unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
345  unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
346  unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
347  unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
348  unsigned Eqz = WebAssembly::EQZ_I32;
349  unsigned And = WebAssembly::AND_I32;
350  int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
351  int64_t Substitute = IsUnsigned ? 0 : Limit;
352  double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
353  auto &Context = BB->getParent()->getFunction().getContext();
355 
356  const BasicBlock *LLVMBB = BB->getBasicBlock();
357  MachineFunction *F = BB->getParent();
358  MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
359  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
360  MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
361 
363  F->insert(It, FalseMBB);
364  F->insert(It, TrueMBB);
365  F->insert(It, DoneMBB);
366 
367  // Transfer the remainder of BB and its successor edges to DoneMBB.
368  DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
369  DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
370 
371  BB->addSuccessor(TrueMBB);
372  BB->addSuccessor(FalseMBB);
373  TrueMBB->addSuccessor(DoneMBB);
374  FalseMBB->addSuccessor(DoneMBB);
375 
376  unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
377  Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
379  CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380  EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
381  FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382  TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
383 
384  MI.eraseFromParent();
385  // For signed numbers, we can do a single comparison to determine whether
386  // fabs(x) is within range.
387  if (IsUnsigned) {
388  Tmp0 = InReg;
389  } else {
390  BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
391  }
392  BuildMI(BB, DL, TII.get(FConst), Tmp1)
393  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
394  BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
395 
396  // For unsigned numbers, we have to do a separate comparison with zero.
397  if (IsUnsigned) {
398  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
399  unsigned SecondCmpReg =
400  MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401  unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
402  BuildMI(BB, DL, TII.get(FConst), Tmp1)
403  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
404  BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
405  BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
406  CmpReg = AndReg;
407  }
408 
409  BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
410 
411  // Create the CFG diamond to select between doing the conversion or using
412  // the substitute value.
413  BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
414  BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
415  BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
416  BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
417  BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
418  .addReg(FalseReg)
419  .addMBB(FalseMBB)
420  .addReg(TrueReg)
421  .addMBB(TrueMBB);
422 
423  return DoneMBB;
424 }
425 
426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
427  MachineInstr &MI, MachineBasicBlock *BB) const {
428  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
429  DebugLoc DL = MI.getDebugLoc();
430 
431  switch (MI.getOpcode()) {
432  default:
433  llvm_unreachable("Unexpected instr type to insert");
434  case WebAssembly::FP_TO_SINT_I32_F32:
435  return LowerFPToInt(MI, DL, BB, TII, false, false, false,
436  WebAssembly::I32_TRUNC_S_F32);
437  case WebAssembly::FP_TO_UINT_I32_F32:
438  return LowerFPToInt(MI, DL, BB, TII, true, false, false,
439  WebAssembly::I32_TRUNC_U_F32);
440  case WebAssembly::FP_TO_SINT_I64_F32:
441  return LowerFPToInt(MI, DL, BB, TII, false, true, false,
442  WebAssembly::I64_TRUNC_S_F32);
443  case WebAssembly::FP_TO_UINT_I64_F32:
444  return LowerFPToInt(MI, DL, BB, TII, true, true, false,
445  WebAssembly::I64_TRUNC_U_F32);
446  case WebAssembly::FP_TO_SINT_I32_F64:
447  return LowerFPToInt(MI, DL, BB, TII, false, false, true,
448  WebAssembly::I32_TRUNC_S_F64);
449  case WebAssembly::FP_TO_UINT_I32_F64:
450  return LowerFPToInt(MI, DL, BB, TII, true, false, true,
451  WebAssembly::I32_TRUNC_U_F64);
452  case WebAssembly::FP_TO_SINT_I64_F64:
453  return LowerFPToInt(MI, DL, BB, TII, false, true, true,
454  WebAssembly::I64_TRUNC_S_F64);
455  case WebAssembly::FP_TO_UINT_I64_F64:
456  return LowerFPToInt(MI, DL, BB, TII, true, true, true,
457  WebAssembly::I64_TRUNC_U_F64);
458  llvm_unreachable("Unexpected instruction to emit with custom inserter");
459  }
460 }
461 
462 const char *
463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
464  switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
466  break;
467 #define HANDLE_NODETYPE(NODE) \
468  case WebAssemblyISD::NODE: \
469  return "WebAssemblyISD::" #NODE;
470 #include "WebAssemblyISD.def"
471 #undef HANDLE_NODETYPE
472  }
473  return nullptr;
474 }
475 
476 std::pair<unsigned, const TargetRegisterClass *>
477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
478  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
479  // First, see if this is a constraint that directly corresponds to a
480  // WebAssembly register class.
481  if (Constraint.size() == 1) {
482  switch (Constraint[0]) {
483  case 'r':
484  assert(VT != MVT::iPTR && "Pointer MVT not expected here");
485  if (Subtarget->hasSIMD128() && VT.isVector()) {
486  if (VT.getSizeInBits() == 128)
487  return std::make_pair(0U, &WebAssembly::V128RegClass);
488  }
489  if (VT.isInteger() && !VT.isVector()) {
490  if (VT.getSizeInBits() <= 32)
491  return std::make_pair(0U, &WebAssembly::I32RegClass);
492  if (VT.getSizeInBits() <= 64)
493  return std::make_pair(0U, &WebAssembly::I64RegClass);
494  }
495  break;
496  default:
497  break;
498  }
499  }
500 
501  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
502 }
503 
504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
505  // Assume ctz is a relatively cheap operation.
506  return true;
507 }
508 
509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
510  // Assume clz is a relatively cheap operation.
511  return true;
512 }
513 
514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
515  const AddrMode &AM,
516  Type *Ty, unsigned AS,
517  Instruction *I) const {
518  // WebAssembly offsets are added as unsigned without wrapping. The
519  // isLegalAddressingMode gives us no way to determine if wrapping could be
520  // happening, so we approximate this by accepting only non-negative offsets.
521  if (AM.BaseOffs < 0)
522  return false;
523 
524  // WebAssembly has no scale register operands.
525  if (AM.Scale != 0)
526  return false;
527 
528  // Everything else is legal.
529  return true;
530 }
531 
532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
533  EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
534  MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
535  // WebAssembly supports unaligned accesses, though it should be declared
536  // with the p2align attribute on loads and stores which do so, and there
537  // may be a performance impact. We tell LLVM they're "fast" because
538  // for the kinds of things that LLVM uses this for (merging adjacent stores
539  // of constants, etc.), WebAssembly implementations will either want the
540  // unaligned access or they'll split anyway.
541  if (Fast)
542  *Fast = true;
543  return true;
544 }
545 
546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
547  AttributeList Attr) const {
548  // The current thinking is that wasm engines will perform this optimization,
549  // so we can save on code size.
550  return true;
551 }
552 
553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
554  LLVMContext &C,
555  EVT VT) const {
556  if (VT.isVector())
558 
559  return TargetLowering::getSetCCResultType(DL, C, VT);
560 }
561 
562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563  const CallInst &I,
564  MachineFunction &MF,
565  unsigned Intrinsic) const {
566  switch (Intrinsic) {
567  case Intrinsic::wasm_atomic_notify:
569  Info.memVT = MVT::i32;
570  Info.ptrVal = I.getArgOperand(0);
571  Info.offset = 0;
572  Info.align = 4;
573  // atomic.notify instruction does not really load the memory specified with
574  // this argument, but MachineMemOperand should either be load or store, so
575  // we set this to a load.
576  // FIXME Volatile isn't really correct, but currently all LLVM atomic
577  // instructions are treated as volatiles in the backend, so we should be
578  // consistent. The same applies for wasm_atomic_wait intrinsics too.
580  return true;
581  case Intrinsic::wasm_atomic_wait_i32:
583  Info.memVT = MVT::i32;
584  Info.ptrVal = I.getArgOperand(0);
585  Info.offset = 0;
586  Info.align = 4;
588  return true;
589  case Intrinsic::wasm_atomic_wait_i64:
591  Info.memVT = MVT::i64;
592  Info.ptrVal = I.getArgOperand(0);
593  Info.offset = 0;
594  Info.align = 8;
596  return true;
597  default:
598  return false;
599  }
600 }
601 
602 //===----------------------------------------------------------------------===//
603 // WebAssembly Lowering private implementation.
604 //===----------------------------------------------------------------------===//
605 
606 //===----------------------------------------------------------------------===//
607 // Lowering Code
608 //===----------------------------------------------------------------------===//
609 
610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
612  DAG.getContext()->diagnose(
614 }
615 
616 // Test whether the given calling convention is supported.
617 static bool callingConvSupported(CallingConv::ID CallConv) {
618  // We currently support the language-independent target-independent
619  // conventions. We don't yet have a way to annotate calls with properties like
620  // "cold", and we don't have any call-clobbered registers, so these are mostly
621  // all handled the same.
622  return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
623  CallConv == CallingConv::Cold ||
624  CallConv == CallingConv::PreserveMost ||
625  CallConv == CallingConv::PreserveAll ||
626  CallConv == CallingConv::CXX_FAST_TLS;
627 }
628 
629 SDValue
630 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
631  SmallVectorImpl<SDValue> &InVals) const {
632  SelectionDAG &DAG = CLI.DAG;
633  SDLoc DL = CLI.DL;
634  SDValue Chain = CLI.Chain;
635  SDValue Callee = CLI.Callee;
637  auto Layout = MF.getDataLayout();
638 
639  CallingConv::ID CallConv = CLI.CallConv;
640  if (!callingConvSupported(CallConv))
641  fail(DL, DAG,
642  "WebAssembly doesn't support language-specific or target-specific "
643  "calling conventions yet");
644  if (CLI.IsPatchPoint)
645  fail(DL, DAG, "WebAssembly doesn't support patch point yet");
646 
647  // WebAssembly doesn't currently support explicit tail calls. If they are
648  // required, fail. Otherwise, just disable them.
649  if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
651  (CLI.CS && CLI.CS.isMustTailCall()))
652  fail(DL, DAG, "WebAssembly doesn't support tail call yet");
653  CLI.IsTailCall = false;
654 
656  if (Ins.size() > 1)
657  fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
658 
659  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
660  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
661  unsigned NumFixedArgs = 0;
662  for (unsigned I = 0; I < Outs.size(); ++I) {
663  const ISD::OutputArg &Out = Outs[I];
664  SDValue &OutVal = OutVals[I];
665  if (Out.Flags.isNest())
666  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
667  if (Out.Flags.isInAlloca())
668  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
669  if (Out.Flags.isInConsecutiveRegs())
670  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
671  if (Out.Flags.isInConsecutiveRegsLast())
672  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
673  if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
674  auto &MFI = MF.getFrameInfo();
675  int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
676  Out.Flags.getByValAlign(),
677  /*isSS=*/false);
678  SDValue SizeNode =
679  DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
680  SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
681  Chain = DAG.getMemcpy(
682  Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
683  /*isVolatile*/ false, /*AlwaysInline=*/false,
684  /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
685  OutVal = FINode;
686  }
687  // Count the number of fixed args *after* legalization.
688  NumFixedArgs += Out.IsFixed;
689  }
690 
691  bool IsVarArg = CLI.IsVarArg;
692  auto PtrVT = getPointerTy(Layout);
693 
694  // Analyze operands of the call, assigning locations to each operand.
696  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
697 
698  if (IsVarArg) {
699  // Outgoing non-fixed arguments are placed in a buffer. First
700  // compute their offsets and the total amount of buffer space needed.
701  for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
702  const ISD::OutputArg &Out = Outs[I];
703  SDValue &Arg = OutVals[I];
704  EVT VT = Arg.getValueType();
705  assert(VT != MVT::iPTR && "Legalized args should be concrete");
706  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
707  unsigned Align = std::max(Out.Flags.getOrigAlign(),
708  Layout.getABITypeAlignment(Ty));
709  unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
710  Align);
711  CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
712  Offset, VT.getSimpleVT(),
714  }
715  }
716 
717  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
718 
719  SDValue FINode;
720  if (IsVarArg && NumBytes) {
721  // For non-fixed arguments, next emit stores to store the argument values
722  // to the stack buffer at the offsets computed above.
723  int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
724  Layout.getStackAlignment(),
725  /*isSS=*/false);
726  unsigned ValNo = 0;
728  for (SDValue Arg :
729  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
730  assert(ArgLocs[ValNo].getValNo() == ValNo &&
731  "ArgLocs should remain in order and only hold varargs args");
732  unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
733  FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
734  SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
735  DAG.getConstant(Offset, DL, PtrVT));
736  Chains.push_back(
737  DAG.getStore(Chain, DL, Arg, Add,
738  MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
739  }
740  if (!Chains.empty())
741  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
742  } else if (IsVarArg) {
743  FINode = DAG.getIntPtrConstant(0, DL);
744  }
745 
746  if (Callee->getOpcode() == ISD::GlobalAddress) {
747  // If the callee is a GlobalAddress node (quite common, every direct call
748  // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
749  // doesn't at MO_GOT which is not needed for direct calls.
750  GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
751  Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
753  GA->getOffset());
754  Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
755  getPointerTy(DAG.getDataLayout()), Callee);
756  }
757 
758  // Compute the operands for the CALLn node.
760  Ops.push_back(Chain);
761  Ops.push_back(Callee);
762 
763  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
764  // isn't reliable.
765  Ops.append(OutVals.begin(),
766  IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
767  // Add a pointer to the vararg buffer.
768  if (IsVarArg)
769  Ops.push_back(FINode);
770 
771  SmallVector<EVT, 8> InTys;
772  for (const auto &In : Ins) {
773  assert(!In.Flags.isByVal() && "byval is not valid for return values");
774  assert(!In.Flags.isNest() && "nest is not valid for return values");
775  if (In.Flags.isInAlloca())
776  fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
777  if (In.Flags.isInConsecutiveRegs())
778  fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
779  if (In.Flags.isInConsecutiveRegsLast())
780  fail(DL, DAG,
781  "WebAssembly hasn't implemented cons regs last return values");
782  // Ignore In.getOrigAlign() because all our arguments are passed in
783  // registers.
784  InTys.push_back(In.VT);
785  }
786  InTys.push_back(MVT::Other);
787  SDVTList InTyList = DAG.getVTList(InTys);
788  SDValue Res =
789  DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
790  DL, InTyList, Ops);
791  if (Ins.empty()) {
792  Chain = Res;
793  } else {
794  InVals.push_back(Res);
795  Chain = Res.getValue(1);
796  }
797 
798  return Chain;
799 }
800 
801 bool WebAssemblyTargetLowering::CanLowerReturn(
802  CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
804  LLVMContext & /*Context*/) const {
805  // WebAssembly can't currently handle returning tuples.
806  return Outs.size() <= 1;
807 }
808 
809 SDValue WebAssemblyTargetLowering::LowerReturn(
810  SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
812  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
813  SelectionDAG &DAG) const {
814  assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
815  if (!callingConvSupported(CallConv))
816  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
817 
818  SmallVector<SDValue, 4> RetOps(1, Chain);
819  RetOps.append(OutVals.begin(), OutVals.end());
820  Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
821 
822  // Record the number and types of the return values.
823  for (const ISD::OutputArg &Out : Outs) {
824  assert(!Out.Flags.isByVal() && "byval is not valid for return values");
825  assert(!Out.Flags.isNest() && "nest is not valid for return values");
826  assert(Out.IsFixed && "non-fixed return value is not valid");
827  if (Out.Flags.isInAlloca())
828  fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
829  if (Out.Flags.isInConsecutiveRegs())
830  fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
831  if (Out.Flags.isInConsecutiveRegsLast())
832  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
833  }
834 
835  return Chain;
836 }
837 
838 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
839  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
840  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
841  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
842  if (!callingConvSupported(CallConv))
843  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
844 
846  auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
847 
848  // Set up the incoming ARGUMENTS value, which serves to represent the liveness
849  // of the incoming values before they're represented by virtual registers.
850  MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
851 
852  for (const ISD::InputArg &In : Ins) {
853  if (In.Flags.isInAlloca())
854  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
855  if (In.Flags.isNest())
856  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
857  if (In.Flags.isInConsecutiveRegs())
858  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
859  if (In.Flags.isInConsecutiveRegsLast())
860  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
861  // Ignore In.getOrigAlign() because all our arguments are passed in
862  // registers.
863  InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
864  DAG.getTargetConstant(InVals.size(),
865  DL, MVT::i32))
866  : DAG.getUNDEF(In.VT));
867 
868  // Record the number and types of arguments.
869  MFI->addParam(In.VT);
870  }
871 
872  // Varargs are copied into a buffer allocated by the caller, and a pointer to
873  // the buffer is passed as an argument.
874  if (IsVarArg) {
875  MVT PtrVT = getPointerTy(MF.getDataLayout());
876  unsigned VarargVreg =
878  MFI->setVarargBufferVreg(VarargVreg);
879  Chain = DAG.getCopyToReg(
880  Chain, DL, VarargVreg,
881  DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
882  DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
883  MFI->addParam(PtrVT);
884  }
885 
886  // Record the number and types of arguments and results.
887  SmallVector<MVT, 4> Params;
890  DAG.getTarget(), Params, Results);
891  for (MVT VT : Results)
892  MFI->addResult(VT);
893  // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
894  // the param logic here with ComputeSignatureVTs
895  assert(MFI->getParams().size() == Params.size() &&
896  std::equal(MFI->getParams().begin(), MFI->getParams().end(),
897  Params.begin()));
898 
899  return Chain;
900 }
901 
902 void WebAssemblyTargetLowering::ReplaceNodeResults(
904  switch (N->getOpcode()) {
906  // Do not add any results, signifying that N should not be custom lowered
907  // after all. This happens because simd128 turns on custom lowering for
908  // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
909  // illegal type.
910  break;
911  default:
913  "ReplaceNodeResults not implemented for this op for WebAssembly!");
914  }
915 }
916 
917 //===----------------------------------------------------------------------===//
918 // Custom lowering hooks.
919 //===----------------------------------------------------------------------===//
920 
921 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
922  SelectionDAG &DAG) const {
923  SDLoc DL(Op);
924  switch (Op.getOpcode()) {
925  default:
926  llvm_unreachable("unimplemented operation lowering");
927  return SDValue();
928  case ISD::FrameIndex:
929  return LowerFrameIndex(Op, DAG);
930  case ISD::GlobalAddress:
931  return LowerGlobalAddress(Op, DAG);
932  case ISD::ExternalSymbol:
933  return LowerExternalSymbol(Op, DAG);
934  case ISD::JumpTable:
935  return LowerJumpTable(Op, DAG);
936  case ISD::BR_JT:
937  return LowerBR_JT(Op, DAG);
938  case ISD::VASTART:
939  return LowerVASTART(Op, DAG);
940  case ISD::BlockAddress:
941  case ISD::BRIND:
942  fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
943  return SDValue();
944  case ISD::RETURNADDR:
945  return LowerRETURNADDR(Op, DAG);
946  case ISD::FRAMEADDR:
947  return LowerFRAMEADDR(Op, DAG);
948  case ISD::CopyToReg:
949  return LowerCopyToReg(Op, DAG);
952  return LowerAccessVectorElement(Op, DAG);
953  case ISD::INTRINSIC_VOID:
956  return LowerIntrinsic(Op, DAG);
958  return LowerSIGN_EXTEND_INREG(Op, DAG);
959  case ISD::BUILD_VECTOR:
960  return LowerBUILD_VECTOR(Op, DAG);
961  case ISD::VECTOR_SHUFFLE:
962  return LowerVECTOR_SHUFFLE(Op, DAG);
963  case ISD::SHL:
964  case ISD::SRA:
965  case ISD::SRL:
966  return LowerShift(Op, DAG);
967  }
968 }
969 
970 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
971  SelectionDAG &DAG) const {
972  SDValue Src = Op.getOperand(2);
973  if (isa<FrameIndexSDNode>(Src.getNode())) {
974  // CopyToReg nodes don't support FrameIndex operands. Other targets select
975  // the FI to some LEA-like instruction, but since we don't have that, we
976  // need to insert some kind of instruction that can take an FI operand and
977  // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
978  // local.copy between Op and its FI operand.
979  SDValue Chain = Op.getOperand(0);
980  SDLoc DL(Op);
981  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
982  EVT VT = Src.getValueType();
983  SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
984  : WebAssembly::COPY_I64,
985  DL, VT, Src),
986  0);
987  return Op.getNode()->getNumValues() == 1
988  ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
989  : DAG.getCopyToReg(Chain, DL, Reg, Copy,
990  Op.getNumOperands() == 4 ? Op.getOperand(3)
991  : SDValue());
992  }
993  return SDValue();
994 }
995 
996 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
997  SelectionDAG &DAG) const {
998  int FI = cast<FrameIndexSDNode>(Op)->getIndex();
999  return DAG.getTargetFrameIndex(FI, Op.getValueType());
1000 }
1001 
1002 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1003  SelectionDAG &DAG) const {
1004  SDLoc DL(Op);
1005 
1006  if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1007  fail(DL, DAG,
1008  "Non-Emscripten WebAssembly hasn't implemented "
1009  "__builtin_return_address");
1010  return SDValue();
1011  }
1012 
1014  return SDValue();
1015 
1016  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1017  return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1018  {DAG.getConstant(Depth, DL, MVT::i32)}, false, DL)
1019  .first;
1020 }
1021 
1022 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1023  SelectionDAG &DAG) const {
1024  // Non-zero depths are not supported by WebAssembly currently. Use the
1025  // legalizer's default expansion, which is to return 0 (what this function is
1026  // documented to do).
1027  if (Op.getConstantOperandVal(0) > 0)
1028  return SDValue();
1029 
1031  EVT VT = Op.getValueType();
1032  unsigned FP =
1034  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1035 }
1036 
1037 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1038  SelectionDAG &DAG) const {
1039  SDLoc DL(Op);
1040  const auto *GA = cast<GlobalAddressSDNode>(Op);
1041  EVT VT = Op.getValueType();
1042  assert(GA->getTargetFlags() == 0 &&
1043  "Unexpected target flags on generic GlobalAddressSDNode");
1044  if (GA->getAddressSpace() != 0)
1045  fail(DL, DAG, "WebAssembly only expects the 0 address space");
1046 
1047  unsigned OperandFlags = 0;
1048  if (isPositionIndependent()) {
1049  const GlobalValue *GV = GA->getGlobal();
1050  if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1051  MachineFunction &MF = DAG.getMachineFunction();
1052  MVT PtrVT = getPointerTy(MF.getDataLayout());
1053  const char *BaseName;
1054  if (GV->getValueType()->isFunctionTy()) {
1055  BaseName = MF.createExternalSymbolName("__table_base");
1056  OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1057  }
1058  else {
1059  BaseName = MF.createExternalSymbolName("__memory_base");
1060  OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1061  }
1062  SDValue BaseAddr =
1063  DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1064  DAG.getTargetExternalSymbol(BaseName, PtrVT));
1065 
1066  SDValue SymAddr = DAG.getNode(
1068  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1069  OperandFlags));
1070 
1071  return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1072  } else {
1073  OperandFlags = WebAssemblyII::MO_GOT;
1074  }
1075  }
1076 
1077  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1078  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1079  GA->getOffset(), OperandFlags));
1080 }
1081 
1082 SDValue
1083 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1084  SelectionDAG &DAG) const {
1085  SDLoc DL(Op);
1086  const auto *ES = cast<ExternalSymbolSDNode>(Op);
1087  EVT VT = Op.getValueType();
1088  assert(ES->getTargetFlags() == 0 &&
1089  "Unexpected target flags on generic ExternalSymbolSDNode");
1090  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1091  DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1092 }
1093 
1094 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1095  SelectionDAG &DAG) const {
1096  // There's no need for a Wrapper node because we always incorporate a jump
1097  // table operand into a BR_TABLE instruction, rather than ever
1098  // materializing it in a register.
1099  const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1100  return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1101  JT->getTargetFlags());
1102 }
1103 
1104 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1105  SelectionDAG &DAG) const {
1106  SDLoc DL(Op);
1107  SDValue Chain = Op.getOperand(0);
1108  const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1109  SDValue Index = Op.getOperand(2);
1110  assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1111 
1113  Ops.push_back(Chain);
1114  Ops.push_back(Index);
1115 
1117  const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1118 
1119  // Add an operand for each case.
1120  for (auto MBB : MBBs)
1121  Ops.push_back(DAG.getBasicBlock(MBB));
1122 
1123  // TODO: For now, we just pick something arbitrary for a default case for now.
1124  // We really want to sniff out the guard and put in the real default case (and
1125  // delete the guard).
1126  Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1127 
1128  return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1129 }
1130 
1131 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1132  SelectionDAG &DAG) const {
1133  SDLoc DL(Op);
1135 
1136  auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1137  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1138 
1139  SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1140  MFI->getVarargBufferVreg(), PtrVT);
1141  return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1142  MachinePointerInfo(SV), 0);
1143 }
1144 
1145 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1146  SelectionDAG &DAG) const {
1147  MachineFunction &MF = DAG.getMachineFunction();
1148  unsigned IntNo;
1149  switch (Op.getOpcode()) {
1150  case ISD::INTRINSIC_VOID:
1152  IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1153  break;
1155  IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1156  break;
1157  default:
1158  llvm_unreachable("Invalid intrinsic");
1159  }
1160  SDLoc DL(Op);
1161 
1162  switch (IntNo) {
1163  default:
1164  return SDValue(); // Don't custom lower most intrinsics.
1165 
1166  case Intrinsic::wasm_lsda: {
1167  EVT VT = Op.getValueType();
1168  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1169  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1170  auto &Context = MF.getMMI().getContext();
1171  MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1172  Twine(MF.getFunctionNumber()));
1173  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1174  DAG.getMCSymbol(S, PtrVT));
1175  }
1176 
1177  case Intrinsic::wasm_throw: {
1178  // We only support C++ exceptions for now
1179  int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1180  if (Tag != CPP_EXCEPTION)
1181  llvm_unreachable("Invalid tag!");
1182  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1183  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1184  const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1185  SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1186  DAG.getTargetExternalSymbol(SymName, PtrVT));
1187  return DAG.getNode(WebAssemblyISD::THROW, DL,
1188  MVT::Other, // outchain type
1189  {
1190  Op.getOperand(0), // inchain
1191  SymNode, // exception symbol
1192  Op.getOperand(3) // thrown value
1193  });
1194  }
1195  }
1196 }
1197 
1198 SDValue
1199 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1200  SelectionDAG &DAG) const {
1201  SDLoc DL(Op);
1202  // If sign extension operations are disabled, allow sext_inreg only if operand
1203  // is a vector extract. SIMD does not depend on sign extension operations, but
1204  // allowing sext_inreg in this context lets us have simple patterns to select
1205  // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1206  // simpler in this file, but would necessitate large and brittle patterns to
1207  // undo the expansion and select extract_lane_s instructions.
1208  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1209  if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1210  const SDValue &Extract = Op.getOperand(0);
1211  MVT VecT = Extract.getOperand(0).getSimpleValueType();
1212  MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1213  ->getVT()
1214  .getSimpleVT();
1215  MVT ExtractedVecT =
1216  MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1217  if (ExtractedVecT == VecT)
1218  return Op;
1219  // Bitcast vector to appropriate type to ensure ISel pattern coverage
1220  const SDValue &Index = Extract.getOperand(1);
1221  unsigned IndexVal =
1222  static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1223  unsigned Scale =
1224  ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1225  assert(Scale > 1);
1226  SDValue NewIndex =
1227  DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1228  SDValue NewExtract = DAG.getNode(
1229  ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1230  DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1231  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(),
1232  NewExtract, Op.getOperand(1));
1233  }
1234  // Otherwise expand
1235  return SDValue();
1236 }
1237 
1238 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1239  SelectionDAG &DAG) const {
1240  SDLoc DL(Op);
1241  const EVT VecT = Op.getValueType();
1242  const EVT LaneT = Op.getOperand(0).getValueType();
1243  const size_t Lanes = Op.getNumOperands();
1244  auto IsConstant = [](const SDValue &V) {
1245  return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1246  };
1247 
1248  // Find the most common operand, which is approximately the best to splat
1249  using Entry = std::pair<SDValue, size_t>;
1250  SmallVector<Entry, 16> ValueCounts;
1251  size_t NumConst = 0, NumDynamic = 0;
1252  for (const SDValue &Lane : Op->op_values()) {
1253  if (Lane.isUndef()) {
1254  continue;
1255  } else if (IsConstant(Lane)) {
1256  NumConst++;
1257  } else {
1258  NumDynamic++;
1259  }
1260  auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1261  [&Lane](Entry A) { return A.first == Lane; });
1262  if (CountIt == ValueCounts.end()) {
1263  ValueCounts.emplace_back(Lane, 1);
1264  } else {
1265  CountIt->second++;
1266  }
1267  }
1268  auto CommonIt =
1269  std::max_element(ValueCounts.begin(), ValueCounts.end(),
1270  [](Entry A, Entry B) { return A.second < B.second; });
1271  assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1272  SDValue SplatValue = CommonIt->first;
1273  size_t NumCommon = CommonIt->second;
1274 
1275  // If v128.const is available, consider using it instead of a splat
1276  if (Subtarget->hasUnimplementedSIMD128()) {
1277  // {i32,i64,f32,f64}.const opcode, and value
1278  const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1279  // SIMD prefix and opcode
1280  const size_t SplatBytes = 2;
1281  const size_t SplatConstBytes = SplatBytes + ConstBytes;
1282  // SIMD prefix, opcode, and lane index
1283  const size_t ReplaceBytes = 3;
1284  const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1285  // SIMD prefix, v128.const opcode, and 128-bit value
1286  const size_t VecConstBytes = 18;
1287  // Initial v128.const and a replace_lane for each non-const operand
1288  const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1289  // Initial splat and all necessary replace_lanes
1290  const size_t SplatInitBytes =
1291  IsConstant(SplatValue)
1292  // Initial constant splat
1293  ? (SplatConstBytes +
1294  // Constant replace_lanes
1295  (NumConst - NumCommon) * ReplaceConstBytes +
1296  // Dynamic replace_lanes
1297  (NumDynamic * ReplaceBytes))
1298  // Initial dynamic splat
1299  : (SplatBytes +
1300  // Constant replace_lanes
1301  (NumConst * ReplaceConstBytes) +
1302  // Dynamic replace_lanes
1303  (NumDynamic - NumCommon) * ReplaceBytes);
1304  if (ConstInitBytes < SplatInitBytes) {
1305  // Create build_vector that will lower to initial v128.const
1306  SmallVector<SDValue, 16> ConstLanes;
1307  for (const SDValue &Lane : Op->op_values()) {
1308  if (IsConstant(Lane)) {
1309  ConstLanes.push_back(Lane);
1310  } else if (LaneT.isFloatingPoint()) {
1311  ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1312  } else {
1313  ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1314  }
1315  }
1316  SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1317  // Add replace_lane instructions for non-const lanes
1318  for (size_t I = 0; I < Lanes; ++I) {
1319  const SDValue &Lane = Op->getOperand(I);
1320  if (!Lane.isUndef() && !IsConstant(Lane))
1321  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1322  DAG.getConstant(I, DL, MVT::i32));
1323  }
1324  return Result;
1325  }
1326  }
1327  // Use a splat for the initial vector
1328  SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1329  // Add replace_lane instructions for other values
1330  for (size_t I = 0; I < Lanes; ++I) {
1331  const SDValue &Lane = Op->getOperand(I);
1332  if (Lane != SplatValue)
1333  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1334  DAG.getConstant(I, DL, MVT::i32));
1335  }
1336  return Result;
1337 }
1338 
1339 SDValue
1340 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1341  SelectionDAG &DAG) const {
1342  SDLoc DL(Op);
1343  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1345  assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1346  size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1347 
1348  // Space for two vector args and sixteen mask indices
1349  SDValue Ops[18];
1350  size_t OpIdx = 0;
1351  Ops[OpIdx++] = Op.getOperand(0);
1352  Ops[OpIdx++] = Op.getOperand(1);
1353 
1354  // Expand mask indices to byte indices and materialize them as operands
1355  for (int M : Mask) {
1356  for (size_t J = 0; J < LaneBytes; ++J) {
1357  // Lower undefs (represented by -1 in mask) to zero
1358  uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1359  Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1360  }
1361  }
1362 
1363  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1364 }
1365 
1366 SDValue
1367 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1368  SelectionDAG &DAG) const {
1369  // Allow constant lane indices, expand variable lane indices
1370  SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1371  if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1372  return Op;
1373  else
1374  // Perform default expansion
1375  return SDValue();
1376 }
1377 
1380  // 32-bit and 64-bit unrolled shifts will have proper semantics
1381  if (LaneT.bitsGE(MVT::i32))
1382  return DAG.UnrollVectorOp(Op.getNode());
1383  // Otherwise mask the shift value to get proper semantics from 32-bit shift
1384  SDLoc DL(Op);
1385  SDValue ShiftVal = Op.getOperand(1);
1386  uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1387  SDValue MaskedShiftVal = DAG.getNode(
1388  ISD::AND, // mask opcode
1389  DL, ShiftVal.getValueType(), // masked value type
1390  ShiftVal, // original shift value operand
1391  DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1392  );
1393 
1394  return DAG.UnrollVectorOp(
1395  DAG.getNode(Op.getOpcode(), // original shift opcode
1396  DL, Op.getValueType(), // original return type
1397  Op.getOperand(0), // original vector operand,
1398  MaskedShiftVal // new masked shift value operand
1399  )
1400  .getNode());
1401 }
1402 
1403 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1404  SelectionDAG &DAG) const {
1405  SDLoc DL(Op);
1406 
1407  // Only manually lower vector shifts
1409 
1410  // Expand all vector shifts until V8 fixes its implementation
1411  // TODO: remove this once V8 is fixed
1412  if (!Subtarget->hasUnimplementedSIMD128())
1413  return unrollVectorShift(Op, DAG);
1414 
1415  // Unroll non-splat vector shifts
1416  BuildVectorSDNode *ShiftVec;
1417  SDValue SplatVal;
1418  if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1419  !(SplatVal = ShiftVec->getSplatValue()))
1420  return unrollVectorShift(Op, DAG);
1421 
1422  // All splats except i64x2 const splats are handled by patterns
1423  auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1424  if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1425  return Op;
1426 
1427  // i64x2 const splats are custom lowered to avoid unnecessary wraps
1428  unsigned Opcode;
1429  switch (Op.getOpcode()) {
1430  case ISD::SHL:
1431  Opcode = WebAssemblyISD::VEC_SHL;
1432  break;
1433  case ISD::SRA:
1434  Opcode = WebAssemblyISD::VEC_SHR_S;
1435  break;
1436  case ISD::SRL:
1437  Opcode = WebAssemblyISD::VEC_SHR_U;
1438  break;
1439  default:
1440  llvm_unreachable("unexpected opcode");
1441  }
1442  APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1443  return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1444  DAG.getConstant(Shift, DL, MVT::i32));
1445 }
1446 
1447 //===----------------------------------------------------------------------===//
1448 // WebAssembly Optimization Hooks
1449 //===----------------------------------------------------------------------===//
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static MVT getIntegerVT(unsigned BitWidth)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:601
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static Type * getDoubleTy(LLVMContext &C)
Definition: Type.cpp:164
bool isUndef() const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
const GlobalValue * getGlobal() const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
Diagnostic information for unsupported feature in backend.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:678
This class represents lattice values for constants.
Definition: AllocatorList.h:23
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Definition: ISDOpcodes.h:633
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:391
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:384
unsigned getReg() const
getReg - Returns the register number.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:730
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
SDValue getBasicBlock(MachineBasicBlock *MBB)
unsigned getVectorNumElements() const
Function Alias Analysis Results
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
F(f)
MachineModuleInfo & getMMI() const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:875
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:459
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:135
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:480
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:163
#define INT64_MIN
Definition: DataTypes.h:80
Shift and rotation operations.
Definition: ISDOpcodes.h:434
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:750
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
uint64_t getConstantOperandVal(unsigned i) const
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
bool isInConsecutiveRegs() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:463
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:403
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT&#39;s, which are used to parameterize some operations.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:726
unsigned getSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
This file declares the WebAssembly-specific subclass of TargetMachine.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:400
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:634
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:404
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:453
const MCContext & getContext() const
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:868
static mvt_range integer_vector_valuetypes()
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:582
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:757
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true, bool isPostTypeLegalization=false) const
Returns a pair of (return value, chain).
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:351
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
The memory access is volatile.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:234
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:657
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
unsigned getOrigAlign() const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
const Triple & getTargetTriple() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:759
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:356
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:40
This file provides WebAssembly-specific target descriptions.
unsigned char getTargetFlags() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:798
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
static mvt_range vector_valuetypes()
self_iterator getIterator()
Definition: ilist_node.h:81
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:750
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1220
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:33
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:639
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:52
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
unsigned first
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:264
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:639
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:406
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:363
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:740
Provides information about what library functions are available for the current target.
const DebugLoc & getDebugLoc() const
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:95
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:437
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:605
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:694
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:685
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static mvt_range integer_valuetypes()
unsigned getFrameRegister(const MachineFunction &MF) const override
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getByValAlign() const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:444
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:336
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
const WebAssemblyRegisterInfo * getRegisterInfo() const override
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:666
Representation of each machine instruction.
Definition: MachineInstr.h:63
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:755
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:411
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:711
static bool callingConvSupported(CallingConv::ID CallConv)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:510
bool isInConsecutiveRegsLast() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
TargetOptions Options
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
This file declares WebAssembly-specific per-machine-function information.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Type * getValueType() const
Definition: GlobalValue.h:279
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:636
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:326
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
bool isUndef() const
Return true if the type of the node type undefined.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
const WebAssemblyInstrInfo * getInstrInfo() const override
unsigned getNumOperands() const
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
LLVMContext * getContext() const
Definition: SelectionDAG.h:410
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:628
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:404
BRIND - Indirect branch.
Definition: ISDOpcodes.h:662
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:651