LLVM  10.0.0svn
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
17 #include "WebAssemblySubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
40  const TargetMachine &TM, const WebAssemblySubtarget &STI)
41  : TargetLowering(TM), Subtarget(&STI) {
42  auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44  // Booleans always contain 0 or 1.
46  // Except in SIMD vectors
48  // We don't know the microarchitecture here, so just reduce register pressure.
50  // Tell ISel that we have a stack pointer.
52  Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
53  // Set up the register classes.
54  addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
55  addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
56  addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
57  addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
58  if (Subtarget->hasSIMD128()) {
59  addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
60  addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
61  addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
62  addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
63  }
64  if (Subtarget->hasUnimplementedSIMD128()) {
65  addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
66  addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
67  }
68  // Compute derived properties from the register classes.
70 
76 
77  // Take the default expansion for va_arg, va_copy, and va_end. There is no
78  // default action for va_start, so we do that custom.
83 
84  for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
85  // Don't expand the floating-point types to constant pools.
87  // Expand floating-point comparisons.
88  for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91  // Expand floating-point library function operators.
92  for (auto Op :
95  // Note supported floating-point library function operators that otherwise
96  // default to expand.
97  for (auto Op :
100  // Support minimum and maximum, which otherwise default to expand.
103  // WebAssembly currently has no builtin f16 support.
108  }
109 
110  // Expand unavailable integer operations.
111  for (auto Op :
115  for (auto T : {MVT::i32, MVT::i64})
117  if (Subtarget->hasSIMD128())
118  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
120  if (Subtarget->hasUnimplementedSIMD128())
122  }
123 
124  // SIMD-specific configuration
125  if (Subtarget->hasSIMD128()) {
126  // Support saturating add for i8x16 and i16x8
127  for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128  for (auto T : {MVT::v16i8, MVT::v8i16})
130 
131  // Custom lower BUILD_VECTORs to minimize number of replace_lanes
132  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
134  if (Subtarget->hasUnimplementedSIMD128())
135  for (auto T : {MVT::v2i64, MVT::v2f64})
137 
138  // We have custom shuffle lowering to expose the shuffle mask
139  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
141  if (Subtarget->hasUnimplementedSIMD128())
142  for (auto T: {MVT::v2i64, MVT::v2f64})
144 
145  // Custom lowering since wasm shifts must have a scalar shift amount
146  for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
147  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
149  if (Subtarget->hasUnimplementedSIMD128())
151  }
152 
153  // Custom lower lane accesses to expand out variable indices
155  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
157  if (Subtarget->hasUnimplementedSIMD128())
158  for (auto T : {MVT::v2i64, MVT::v2f64})
160  }
161 
162  // There is no i64x2.mul instruction
164 
165  // There are no vector select instructions
166  for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
167  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
169  if (Subtarget->hasUnimplementedSIMD128())
170  for (auto T : {MVT::v2i64, MVT::v2f64})
172  }
173 
174  // Expand integer operations supported for scalars but not SIMD
177  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
179  if (Subtarget->hasUnimplementedSIMD128())
181  }
182 
183  // Expand float operations supported for scalars but not SIMD
188  if (Subtarget->hasUnimplementedSIMD128())
190  }
191 
192  // Expand additional SIMD ops that V8 hasn't implemented yet
193  if (!Subtarget->hasUnimplementedSIMD128()) {
196  }
197  }
198 
199  // As a special case, these operators use the type to mean the type to
200  // sign-extend from.
202  if (!Subtarget->hasSignExt()) {
203  // Sign extends are legal only when extending a vector extract
204  auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
205  for (auto T : {MVT::i8, MVT::i16, MVT::i32})
207  }
208  for (auto T : MVT::integer_vector_valuetypes())
210 
211  // Dynamic stack allocation: use the default expansion.
215 
218 
219  // Expand these forms; we pattern-match the forms that we can handle in isel.
220  for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
221  for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
223 
224  // We have custom switch handling.
226 
227  // WebAssembly doesn't have:
228  // - Floating-point extending loads.
229  // - Floating-point truncating stores.
230  // - i1 extending loads.
231  // - extending/truncating SIMD loads/stores
234  for (auto T : MVT::integer_valuetypes())
237  if (Subtarget->hasSIMD128()) {
239  MVT::v2f64}) {
240  for (auto MemT : MVT::vector_valuetypes()) {
241  if (MVT(T) != MemT) {
242  setTruncStoreAction(T, MemT, Expand);
244  setLoadExtAction(Ext, T, MemT, Expand);
245  }
246  }
247  }
248  }
249 
250  // Don't do anything clever with build_pairs
252 
253  // Trap lowers to wasm unreachable
255 
256  // Exception handling intrinsics
259 
261 
262  if (Subtarget->hasBulkMemory()) {
263  // Use memory.copy and friends over multiple loads and stores
264  MaxStoresPerMemcpy = 1;
268  MaxStoresPerMemset = 1;
270  }
271 
272  // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
273  // consistent with the f64 and f128 names.
274  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
275  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
276 
277  // Define the emscripten name for return address helper.
278  // TODO: when implementing other WASM backends, make this generic or only do
279  // this on emscripten depending on what they end up doing.
280  setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
281 
282  // Always convert switches to br_tables unless there is only one case, which
283  // is equivalent to a simple branch. This reduces code size for wasm, and we
284  // defer possible jump table optimizations to the VM.
286 }
287 
289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
290  // We have wasm instructions for these
291  switch (AI->getOperation()) {
292  case AtomicRMWInst::Add:
293  case AtomicRMWInst::Sub:
294  case AtomicRMWInst::And:
295  case AtomicRMWInst::Or:
296  case AtomicRMWInst::Xor:
297  case AtomicRMWInst::Xchg:
299  default:
300  break;
301  }
303 }
304 
305 FastISel *WebAssemblyTargetLowering::createFastISel(
306  FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
307  return WebAssembly::createFastISel(FuncInfo, LibInfo);
308 }
309 
310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
311  EVT VT) const {
312  unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
313  if (BitWidth > 1 && BitWidth < 8)
314  BitWidth = 8;
315 
316  if (BitWidth > 64) {
317  // The shift will be lowered to a libcall, and compiler-rt libcalls expect
318  // the count to be an i32.
319  BitWidth = 32;
320  assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
321  "32-bit shift counts ought to be enough for anyone");
322  }
323 
324  MVT Result = MVT::getIntegerVT(BitWidth);
326  "Unable to represent scalar shift amount type");
327  return Result;
328 }
329 
330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
331 // undefined result on invalid/overflow, to the WebAssembly opcode, which
332 // traps on invalid/overflow.
334  MachineBasicBlock *BB,
335  const TargetInstrInfo &TII,
336  bool IsUnsigned, bool Int64,
337  bool Float64, unsigned LoweredOpcode) {
339 
340  Register OutReg = MI.getOperand(0).getReg();
341  Register InReg = MI.getOperand(1).getReg();
342 
343  unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
344  unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
345  unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
346  unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
347  unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
348  unsigned Eqz = WebAssembly::EQZ_I32;
349  unsigned And = WebAssembly::AND_I32;
350  int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
351  int64_t Substitute = IsUnsigned ? 0 : Limit;
352  double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
353  auto &Context = BB->getParent()->getFunction().getContext();
355 
356  const BasicBlock *LLVMBB = BB->getBasicBlock();
357  MachineFunction *F = BB->getParent();
358  MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
359  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
360  MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
361 
363  F->insert(It, FalseMBB);
364  F->insert(It, TrueMBB);
365  F->insert(It, DoneMBB);
366 
367  // Transfer the remainder of BB and its successor edges to DoneMBB.
368  DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
369  DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
370 
371  BB->addSuccessor(TrueMBB);
372  BB->addSuccessor(FalseMBB);
373  TrueMBB->addSuccessor(DoneMBB);
374  FalseMBB->addSuccessor(DoneMBB);
375 
376  unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
377  Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
379  CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380  EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
381  FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382  TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
383 
384  MI.eraseFromParent();
385  // For signed numbers, we can do a single comparison to determine whether
386  // fabs(x) is within range.
387  if (IsUnsigned) {
388  Tmp0 = InReg;
389  } else {
390  BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
391  }
392  BuildMI(BB, DL, TII.get(FConst), Tmp1)
393  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
394  BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
395 
396  // For unsigned numbers, we have to do a separate comparison with zero.
397  if (IsUnsigned) {
398  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
399  Register SecondCmpReg =
400  MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401  Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
402  BuildMI(BB, DL, TII.get(FConst), Tmp1)
403  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
404  BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
405  BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
406  CmpReg = AndReg;
407  }
408 
409  BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
410 
411  // Create the CFG diamond to select between doing the conversion or using
412  // the substitute value.
413  BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
414  BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
415  BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
416  BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
417  BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
418  .addReg(FalseReg)
419  .addMBB(FalseMBB)
420  .addReg(TrueReg)
421  .addMBB(TrueMBB);
422 
423  return DoneMBB;
424 }
425 
426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
427  MachineInstr &MI, MachineBasicBlock *BB) const {
428  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
429  DebugLoc DL = MI.getDebugLoc();
430 
431  switch (MI.getOpcode()) {
432  default:
433  llvm_unreachable("Unexpected instr type to insert");
434  case WebAssembly::FP_TO_SINT_I32_F32:
435  return LowerFPToInt(MI, DL, BB, TII, false, false, false,
436  WebAssembly::I32_TRUNC_S_F32);
437  case WebAssembly::FP_TO_UINT_I32_F32:
438  return LowerFPToInt(MI, DL, BB, TII, true, false, false,
439  WebAssembly::I32_TRUNC_U_F32);
440  case WebAssembly::FP_TO_SINT_I64_F32:
441  return LowerFPToInt(MI, DL, BB, TII, false, true, false,
442  WebAssembly::I64_TRUNC_S_F32);
443  case WebAssembly::FP_TO_UINT_I64_F32:
444  return LowerFPToInt(MI, DL, BB, TII, true, true, false,
445  WebAssembly::I64_TRUNC_U_F32);
446  case WebAssembly::FP_TO_SINT_I32_F64:
447  return LowerFPToInt(MI, DL, BB, TII, false, false, true,
448  WebAssembly::I32_TRUNC_S_F64);
449  case WebAssembly::FP_TO_UINT_I32_F64:
450  return LowerFPToInt(MI, DL, BB, TII, true, false, true,
451  WebAssembly::I32_TRUNC_U_F64);
452  case WebAssembly::FP_TO_SINT_I64_F64:
453  return LowerFPToInt(MI, DL, BB, TII, false, true, true,
454  WebAssembly::I64_TRUNC_S_F64);
455  case WebAssembly::FP_TO_UINT_I64_F64:
456  return LowerFPToInt(MI, DL, BB, TII, true, true, true,
457  WebAssembly::I64_TRUNC_U_F64);
458  llvm_unreachable("Unexpected instruction to emit with custom inserter");
459  }
460 }
461 
462 const char *
463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
464  switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
466  break;
467 #define HANDLE_NODETYPE(NODE) \
468  case WebAssemblyISD::NODE: \
469  return "WebAssemblyISD::" #NODE;
470 #include "WebAssemblyISD.def"
471 #undef HANDLE_NODETYPE
472  }
473  return nullptr;
474 }
475 
476 std::pair<unsigned, const TargetRegisterClass *>
477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
478  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
479  // First, see if this is a constraint that directly corresponds to a
480  // WebAssembly register class.
481  if (Constraint.size() == 1) {
482  switch (Constraint[0]) {
483  case 'r':
484  assert(VT != MVT::iPTR && "Pointer MVT not expected here");
485  if (Subtarget->hasSIMD128() && VT.isVector()) {
486  if (VT.getSizeInBits() == 128)
487  return std::make_pair(0U, &WebAssembly::V128RegClass);
488  }
489  if (VT.isInteger() && !VT.isVector()) {
490  if (VT.getSizeInBits() <= 32)
491  return std::make_pair(0U, &WebAssembly::I32RegClass);
492  if (VT.getSizeInBits() <= 64)
493  return std::make_pair(0U, &WebAssembly::I64RegClass);
494  }
495  break;
496  default:
497  break;
498  }
499  }
500 
501  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
502 }
503 
504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
505  // Assume ctz is a relatively cheap operation.
506  return true;
507 }
508 
509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
510  // Assume clz is a relatively cheap operation.
511  return true;
512 }
513 
514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
515  const AddrMode &AM,
516  Type *Ty, unsigned AS,
517  Instruction *I) const {
518  // WebAssembly offsets are added as unsigned without wrapping. The
519  // isLegalAddressingMode gives us no way to determine if wrapping could be
520  // happening, so we approximate this by accepting only non-negative offsets.
521  if (AM.BaseOffs < 0)
522  return false;
523 
524  // WebAssembly has no scale register operands.
525  if (AM.Scale != 0)
526  return false;
527 
528  // Everything else is legal.
529  return true;
530 }
531 
532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
533  EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
534  MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
535  // WebAssembly supports unaligned accesses, though it should be declared
536  // with the p2align attribute on loads and stores which do so, and there
537  // may be a performance impact. We tell LLVM they're "fast" because
538  // for the kinds of things that LLVM uses this for (merging adjacent stores
539  // of constants, etc.), WebAssembly implementations will either want the
540  // unaligned access or they'll split anyway.
541  if (Fast)
542  *Fast = true;
543  return true;
544 }
545 
546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
547  AttributeList Attr) const {
548  // The current thinking is that wasm engines will perform this optimization,
549  // so we can save on code size.
550  return true;
551 }
552 
553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
554  LLVMContext &C,
555  EVT VT) const {
556  if (VT.isVector())
558 
559  return TargetLowering::getSetCCResultType(DL, C, VT);
560 }
561 
562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563  const CallInst &I,
564  MachineFunction &MF,
565  unsigned Intrinsic) const {
566  switch (Intrinsic) {
567  case Intrinsic::wasm_atomic_notify:
569  Info.memVT = MVT::i32;
570  Info.ptrVal = I.getArgOperand(0);
571  Info.offset = 0;
572  Info.align = Align(4);
573  // atomic.notify instruction does not really load the memory specified with
574  // this argument, but MachineMemOperand should either be load or store, so
575  // we set this to a load.
576  // FIXME Volatile isn't really correct, but currently all LLVM atomic
577  // instructions are treated as volatiles in the backend, so we should be
578  // consistent. The same applies for wasm_atomic_wait intrinsics too.
580  return true;
581  case Intrinsic::wasm_atomic_wait_i32:
583  Info.memVT = MVT::i32;
584  Info.ptrVal = I.getArgOperand(0);
585  Info.offset = 0;
586  Info.align = Align(4);
588  return true;
589  case Intrinsic::wasm_atomic_wait_i64:
591  Info.memVT = MVT::i64;
592  Info.ptrVal = I.getArgOperand(0);
593  Info.offset = 0;
594  Info.align = Align(8);
596  return true;
597  default:
598  return false;
599  }
600 }
601 
602 //===----------------------------------------------------------------------===//
603 // WebAssembly Lowering private implementation.
604 //===----------------------------------------------------------------------===//
605 
606 //===----------------------------------------------------------------------===//
607 // Lowering Code
608 //===----------------------------------------------------------------------===//
609 
610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
612  DAG.getContext()->diagnose(
614 }
615 
616 // Test whether the given calling convention is supported.
617 static bool callingConvSupported(CallingConv::ID CallConv) {
618  // We currently support the language-independent target-independent
619  // conventions. We don't yet have a way to annotate calls with properties like
620  // "cold", and we don't have any call-clobbered registers, so these are mostly
621  // all handled the same.
622  return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
623  CallConv == CallingConv::Cold ||
624  CallConv == CallingConv::PreserveMost ||
625  CallConv == CallingConv::PreserveAll ||
626  CallConv == CallingConv::CXX_FAST_TLS ||
628 }
629 
630 SDValue
631 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
632  SmallVectorImpl<SDValue> &InVals) const {
633  SelectionDAG &DAG = CLI.DAG;
634  SDLoc DL = CLI.DL;
635  SDValue Chain = CLI.Chain;
636  SDValue Callee = CLI.Callee;
638  auto Layout = MF.getDataLayout();
639 
640  CallingConv::ID CallConv = CLI.CallConv;
641  if (!callingConvSupported(CallConv))
642  fail(DL, DAG,
643  "WebAssembly doesn't support language-specific or target-specific "
644  "calling conventions yet");
645  if (CLI.IsPatchPoint)
646  fail(DL, DAG, "WebAssembly doesn't support patch point yet");
647 
648  if (CLI.IsTailCall) {
649  bool MustTail = CLI.CS && CLI.CS.isMustTailCall();
650  if (Subtarget->hasTailCall() && !CLI.IsVarArg) {
651  // Do not tail call unless caller and callee return types match
652  const Function &F = MF.getFunction();
653  const TargetMachine &TM = getTargetMachine();
654  Type *RetTy = F.getReturnType();
655  SmallVector<MVT, 4> CallerRetTys;
656  SmallVector<MVT, 4> CalleeRetTys;
657  computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
658  computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
659  bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
660  std::equal(CallerRetTys.begin(), CallerRetTys.end(),
661  CalleeRetTys.begin());
662  if (!TypesMatch) {
663  // musttail in this case would be an LLVM IR validation failure
664  assert(!MustTail);
665  CLI.IsTailCall = false;
666  }
667  } else {
668  CLI.IsTailCall = false;
669  if (MustTail) {
670  if (CLI.IsVarArg) {
671  // The return would pop the argument buffer
672  fail(DL, DAG, "WebAssembly does not support varargs tail calls");
673  } else {
674  fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled");
675  }
676  }
677  }
678  }
679 
681  if (Ins.size() > 1)
682  fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
683 
684  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
685  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
686 
687  // The generic code may have added an sret argument. If we're lowering an
688  // invoke function, the ABI requires that the function pointer be the first
689  // argument, so we may have to swap the arguments.
690  if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
691  Outs[0].Flags.isSRet()) {
692  std::swap(Outs[0], Outs[1]);
693  std::swap(OutVals[0], OutVals[1]);
694  }
695 
696  unsigned NumFixedArgs = 0;
697  for (unsigned I = 0; I < Outs.size(); ++I) {
698  const ISD::OutputArg &Out = Outs[I];
699  SDValue &OutVal = OutVals[I];
700  if (Out.Flags.isNest())
701  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
702  if (Out.Flags.isInAlloca())
703  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
704  if (Out.Flags.isInConsecutiveRegs())
705  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
706  if (Out.Flags.isInConsecutiveRegsLast())
707  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
708  if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
709  auto &MFI = MF.getFrameInfo();
710  int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
711  Out.Flags.getByValAlign(),
712  /*isSS=*/false);
713  SDValue SizeNode =
714  DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
715  SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
716  Chain = DAG.getMemcpy(
717  Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
718  /*isVolatile*/ false, /*AlwaysInline=*/false,
719  /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
720  OutVal = FINode;
721  }
722  // Count the number of fixed args *after* legalization.
723  NumFixedArgs += Out.IsFixed;
724  }
725 
726  bool IsVarArg = CLI.IsVarArg;
727  auto PtrVT = getPointerTy(Layout);
728 
729  // Analyze operands of the call, assigning locations to each operand.
731  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
732 
733  if (IsVarArg) {
734  // Outgoing non-fixed arguments are placed in a buffer. First
735  // compute their offsets and the total amount of buffer space needed.
736  for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
737  const ISD::OutputArg &Out = Outs[I];
738  SDValue &Arg = OutVals[I];
739  EVT VT = Arg.getValueType();
740  assert(VT != MVT::iPTR && "Legalized args should be concrete");
741  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
742  unsigned Align = std::max(Out.Flags.getOrigAlign(),
743  Layout.getABITypeAlignment(Ty));
744  unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
745  Align);
746  CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
747  Offset, VT.getSimpleVT(),
749  }
750  }
751 
752  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
753 
754  SDValue FINode;
755  if (IsVarArg && NumBytes) {
756  // For non-fixed arguments, next emit stores to store the argument values
757  // to the stack buffer at the offsets computed above.
758  int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
759  Layout.getStackAlignment(),
760  /*isSS=*/false);
761  unsigned ValNo = 0;
763  for (SDValue Arg :
764  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
765  assert(ArgLocs[ValNo].getValNo() == ValNo &&
766  "ArgLocs should remain in order and only hold varargs args");
767  unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
768  FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
769  SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
770  DAG.getConstant(Offset, DL, PtrVT));
771  Chains.push_back(
772  DAG.getStore(Chain, DL, Arg, Add,
773  MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
774  }
775  if (!Chains.empty())
776  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
777  } else if (IsVarArg) {
778  FINode = DAG.getIntPtrConstant(0, DL);
779  }
780 
781  if (Callee->getOpcode() == ISD::GlobalAddress) {
782  // If the callee is a GlobalAddress node (quite common, every direct call
783  // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
784  // doesn't at MO_GOT which is not needed for direct calls.
785  GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
786  Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
788  GA->getOffset());
789  Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
790  getPointerTy(DAG.getDataLayout()), Callee);
791  }
792 
793  // Compute the operands for the CALLn node.
795  Ops.push_back(Chain);
796  Ops.push_back(Callee);
797 
798  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
799  // isn't reliable.
800  Ops.append(OutVals.begin(),
801  IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
802  // Add a pointer to the vararg buffer.
803  if (IsVarArg)
804  Ops.push_back(FINode);
805 
806  SmallVector<EVT, 8> InTys;
807  for (const auto &In : Ins) {
808  assert(!In.Flags.isByVal() && "byval is not valid for return values");
809  assert(!In.Flags.isNest() && "nest is not valid for return values");
810  if (In.Flags.isInAlloca())
811  fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
812  if (In.Flags.isInConsecutiveRegs())
813  fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
814  if (In.Flags.isInConsecutiveRegsLast())
815  fail(DL, DAG,
816  "WebAssembly hasn't implemented cons regs last return values");
817  // Ignore In.getOrigAlign() because all our arguments are passed in
818  // registers.
819  InTys.push_back(In.VT);
820  }
821 
822  if (CLI.IsTailCall) {
823  // ret_calls do not return values to the current frame
824  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
825  return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
826  }
827 
828  InTys.push_back(MVT::Other);
829  SDVTList InTyList = DAG.getVTList(InTys);
830  SDValue Res =
831  DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
832  DL, InTyList, Ops);
833  if (Ins.empty()) {
834  Chain = Res;
835  } else {
836  InVals.push_back(Res);
837  Chain = Res.getValue(1);
838  }
839 
840  return Chain;
841 }
842 
843 bool WebAssemblyTargetLowering::CanLowerReturn(
844  CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
846  LLVMContext & /*Context*/) const {
847  // WebAssembly can't currently handle returning tuples.
848  return Outs.size() <= 1;
849 }
850 
851 SDValue WebAssemblyTargetLowering::LowerReturn(
852  SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
854  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
855  SelectionDAG &DAG) const {
856  assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
857  if (!callingConvSupported(CallConv))
858  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
859 
860  SmallVector<SDValue, 4> RetOps(1, Chain);
861  RetOps.append(OutVals.begin(), OutVals.end());
862  Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
863 
864  // Record the number and types of the return values.
865  for (const ISD::OutputArg &Out : Outs) {
866  assert(!Out.Flags.isByVal() && "byval is not valid for return values");
867  assert(!Out.Flags.isNest() && "nest is not valid for return values");
868  assert(Out.IsFixed && "non-fixed return value is not valid");
869  if (Out.Flags.isInAlloca())
870  fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
871  if (Out.Flags.isInConsecutiveRegs())
872  fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
873  if (Out.Flags.isInConsecutiveRegsLast())
874  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
875  }
876 
877  return Chain;
878 }
879 
880 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
881  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
882  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
883  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
884  if (!callingConvSupported(CallConv))
885  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
886 
888  auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
889 
890  // Set up the incoming ARGUMENTS value, which serves to represent the liveness
891  // of the incoming values before they're represented by virtual registers.
892  MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
893 
894  for (const ISD::InputArg &In : Ins) {
895  if (In.Flags.isInAlloca())
896  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
897  if (In.Flags.isNest())
898  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
899  if (In.Flags.isInConsecutiveRegs())
900  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
901  if (In.Flags.isInConsecutiveRegsLast())
902  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
903  // Ignore In.getOrigAlign() because all our arguments are passed in
904  // registers.
905  InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
906  DAG.getTargetConstant(InVals.size(),
907  DL, MVT::i32))
908  : DAG.getUNDEF(In.VT));
909 
910  // Record the number and types of arguments.
911  MFI->addParam(In.VT);
912  }
913 
914  // Varargs are copied into a buffer allocated by the caller, and a pointer to
915  // the buffer is passed as an argument.
916  if (IsVarArg) {
917  MVT PtrVT = getPointerTy(MF.getDataLayout());
918  Register VarargVreg =
920  MFI->setVarargBufferVreg(VarargVreg);
921  Chain = DAG.getCopyToReg(
922  Chain, DL, VarargVreg,
923  DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
924  DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
925  MFI->addParam(PtrVT);
926  }
927 
928  // Record the number and types of arguments and results.
929  SmallVector<MVT, 4> Params;
932  DAG.getTarget(), Params, Results);
933  for (MVT VT : Results)
934  MFI->addResult(VT);
935  // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
936  // the param logic here with ComputeSignatureVTs
937  assert(MFI->getParams().size() == Params.size() &&
938  std::equal(MFI->getParams().begin(), MFI->getParams().end(),
939  Params.begin()));
940 
941  return Chain;
942 }
943 
944 void WebAssemblyTargetLowering::ReplaceNodeResults(
946  switch (N->getOpcode()) {
948  // Do not add any results, signifying that N should not be custom lowered
949  // after all. This happens because simd128 turns on custom lowering for
950  // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
951  // illegal type.
952  break;
953  default:
955  "ReplaceNodeResults not implemented for this op for WebAssembly!");
956  }
957 }
958 
959 //===----------------------------------------------------------------------===//
960 // Custom lowering hooks.
961 //===----------------------------------------------------------------------===//
962 
963 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
964  SelectionDAG &DAG) const {
965  SDLoc DL(Op);
966  switch (Op.getOpcode()) {
967  default:
968  llvm_unreachable("unimplemented operation lowering");
969  return SDValue();
970  case ISD::FrameIndex:
971  return LowerFrameIndex(Op, DAG);
972  case ISD::GlobalAddress:
973  return LowerGlobalAddress(Op, DAG);
974  case ISD::ExternalSymbol:
975  return LowerExternalSymbol(Op, DAG);
976  case ISD::JumpTable:
977  return LowerJumpTable(Op, DAG);
978  case ISD::BR_JT:
979  return LowerBR_JT(Op, DAG);
980  case ISD::VASTART:
981  return LowerVASTART(Op, DAG);
982  case ISD::BlockAddress:
983  case ISD::BRIND:
984  fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
985  return SDValue();
986  case ISD::RETURNADDR:
987  return LowerRETURNADDR(Op, DAG);
988  case ISD::FRAMEADDR:
989  return LowerFRAMEADDR(Op, DAG);
990  case ISD::CopyToReg:
991  return LowerCopyToReg(Op, DAG);
994  return LowerAccessVectorElement(Op, DAG);
995  case ISD::INTRINSIC_VOID:
998  return LowerIntrinsic(Op, DAG);
1000  return LowerSIGN_EXTEND_INREG(Op, DAG);
1001  case ISD::BUILD_VECTOR:
1002  return LowerBUILD_VECTOR(Op, DAG);
1003  case ISD::VECTOR_SHUFFLE:
1004  return LowerVECTOR_SHUFFLE(Op, DAG);
1005  case ISD::SHL:
1006  case ISD::SRA:
1007  case ISD::SRL:
1008  return LowerShift(Op, DAG);
1009  }
1010 }
1011 
1012 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1013  SelectionDAG &DAG) const {
1014  SDValue Src = Op.getOperand(2);
1015  if (isa<FrameIndexSDNode>(Src.getNode())) {
1016  // CopyToReg nodes don't support FrameIndex operands. Other targets select
1017  // the FI to some LEA-like instruction, but since we don't have that, we
1018  // need to insert some kind of instruction that can take an FI operand and
1019  // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1020  // local.copy between Op and its FI operand.
1021  SDValue Chain = Op.getOperand(0);
1022  SDLoc DL(Op);
1023  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1024  EVT VT = Src.getValueType();
1025  SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1026  : WebAssembly::COPY_I64,
1027  DL, VT, Src),
1028  0);
1029  return Op.getNode()->getNumValues() == 1
1030  ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1031  : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1032  Op.getNumOperands() == 4 ? Op.getOperand(3)
1033  : SDValue());
1034  }
1035  return SDValue();
1036 }
1037 
1038 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1039  SelectionDAG &DAG) const {
1040  int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1041  return DAG.getTargetFrameIndex(FI, Op.getValueType());
1042 }
1043 
1044 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1045  SelectionDAG &DAG) const {
1046  SDLoc DL(Op);
1047 
1048  if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1049  fail(DL, DAG,
1050  "Non-Emscripten WebAssembly hasn't implemented "
1051  "__builtin_return_address");
1052  return SDValue();
1053  }
1054 
1056  return SDValue();
1057 
1058  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1059  MakeLibCallOptions CallOptions;
1060  return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1061  {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1062  .first;
1063 }
1064 
1065 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1066  SelectionDAG &DAG) const {
1067  // Non-zero depths are not supported by WebAssembly currently. Use the
1068  // legalizer's default expansion, which is to return 0 (what this function is
1069  // documented to do).
1070  if (Op.getConstantOperandVal(0) > 0)
1071  return SDValue();
1072 
1074  EVT VT = Op.getValueType();
1075  Register FP =
1077  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1078 }
1079 
1080 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1081  SelectionDAG &DAG) const {
1082  SDLoc DL(Op);
1083  const auto *GA = cast<GlobalAddressSDNode>(Op);
1084  EVT VT = Op.getValueType();
1085  assert(GA->getTargetFlags() == 0 &&
1086  "Unexpected target flags on generic GlobalAddressSDNode");
1087  if (GA->getAddressSpace() != 0)
1088  fail(DL, DAG, "WebAssembly only expects the 0 address space");
1089 
1090  unsigned OperandFlags = 0;
1091  if (isPositionIndependent()) {
1092  const GlobalValue *GV = GA->getGlobal();
1093  if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1094  MachineFunction &MF = DAG.getMachineFunction();
1095  MVT PtrVT = getPointerTy(MF.getDataLayout());
1096  const char *BaseName;
1097  if (GV->getValueType()->isFunctionTy()) {
1098  BaseName = MF.createExternalSymbolName("__table_base");
1099  OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1100  }
1101  else {
1102  BaseName = MF.createExternalSymbolName("__memory_base");
1103  OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1104  }
1105  SDValue BaseAddr =
1106  DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1107  DAG.getTargetExternalSymbol(BaseName, PtrVT));
1108 
1109  SDValue SymAddr = DAG.getNode(
1111  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1112  OperandFlags));
1113 
1114  return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1115  } else {
1116  OperandFlags = WebAssemblyII::MO_GOT;
1117  }
1118  }
1119 
1120  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1121  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1122  GA->getOffset(), OperandFlags));
1123 }
1124 
1125 SDValue
1126 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1127  SelectionDAG &DAG) const {
1128  SDLoc DL(Op);
1129  const auto *ES = cast<ExternalSymbolSDNode>(Op);
1130  EVT VT = Op.getValueType();
1131  assert(ES->getTargetFlags() == 0 &&
1132  "Unexpected target flags on generic ExternalSymbolSDNode");
1133  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1134  DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1135 }
1136 
1137 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1138  SelectionDAG &DAG) const {
1139  // There's no need for a Wrapper node because we always incorporate a jump
1140  // table operand into a BR_TABLE instruction, rather than ever
1141  // materializing it in a register.
1142  const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1143  return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1144  JT->getTargetFlags());
1145 }
1146 
1147 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1148  SelectionDAG &DAG) const {
1149  SDLoc DL(Op);
1150  SDValue Chain = Op.getOperand(0);
1151  const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1152  SDValue Index = Op.getOperand(2);
1153  assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1154 
1156  Ops.push_back(Chain);
1157  Ops.push_back(Index);
1158 
1160  const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1161 
1162  // Add an operand for each case.
1163  for (auto MBB : MBBs)
1164  Ops.push_back(DAG.getBasicBlock(MBB));
1165 
1166  // TODO: For now, we just pick something arbitrary for a default case for now.
1167  // We really want to sniff out the guard and put in the real default case (and
1168  // delete the guard).
1169  Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1170 
1171  return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1172 }
1173 
1174 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1175  SelectionDAG &DAG) const {
1176  SDLoc DL(Op);
1178 
1179  auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1180  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1181 
1182  SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1183  MFI->getVarargBufferVreg(), PtrVT);
1184  return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1185  MachinePointerInfo(SV), 0);
1186 }
1187 
1188 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1189  SelectionDAG &DAG) const {
1190  MachineFunction &MF = DAG.getMachineFunction();
1191  unsigned IntNo;
1192  switch (Op.getOpcode()) {
1193  case ISD::INTRINSIC_VOID:
1195  IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1196  break;
1198  IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1199  break;
1200  default:
1201  llvm_unreachable("Invalid intrinsic");
1202  }
1203  SDLoc DL(Op);
1204 
1205  switch (IntNo) {
1206  default:
1207  return SDValue(); // Don't custom lower most intrinsics.
1208 
1209  case Intrinsic::wasm_lsda: {
1210  EVT VT = Op.getValueType();
1211  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1212  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1213  auto &Context = MF.getMMI().getContext();
1214  MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1215  Twine(MF.getFunctionNumber()));
1216  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1217  DAG.getMCSymbol(S, PtrVT));
1218  }
1219 
1220  case Intrinsic::wasm_throw: {
1221  // We only support C++ exceptions for now
1222  int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1223  if (Tag != CPP_EXCEPTION)
1224  llvm_unreachable("Invalid tag!");
1225  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1226  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1227  const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1228  SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1229  DAG.getTargetExternalSymbol(SymName, PtrVT));
1230  return DAG.getNode(WebAssemblyISD::THROW, DL,
1231  MVT::Other, // outchain type
1232  {
1233  Op.getOperand(0), // inchain
1234  SymNode, // exception symbol
1235  Op.getOperand(3) // thrown value
1236  });
1237  }
1238  }
1239 }
1240 
1241 SDValue
1242 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1243  SelectionDAG &DAG) const {
1244  SDLoc DL(Op);
1245  // If sign extension operations are disabled, allow sext_inreg only if operand
1246  // is a vector extract. SIMD does not depend on sign extension operations, but
1247  // allowing sext_inreg in this context lets us have simple patterns to select
1248  // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1249  // simpler in this file, but would necessitate large and brittle patterns to
1250  // undo the expansion and select extract_lane_s instructions.
1251  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1252  if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1253  const SDValue &Extract = Op.getOperand(0);
1254  MVT VecT = Extract.getOperand(0).getSimpleValueType();
1255  MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1256  ->getVT()
1257  .getSimpleVT();
1258  MVT ExtractedVecT =
1259  MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1260  if (ExtractedVecT == VecT)
1261  return Op;
1262  // Bitcast vector to appropriate type to ensure ISel pattern coverage
1263  const SDValue &Index = Extract.getOperand(1);
1264  unsigned IndexVal =
1265  static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1266  unsigned Scale =
1267  ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1268  assert(Scale > 1);
1269  SDValue NewIndex =
1270  DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1271  SDValue NewExtract = DAG.getNode(
1272  ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1273  DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1274  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(),
1275  NewExtract, Op.getOperand(1));
1276  }
1277  // Otherwise expand
1278  return SDValue();
1279 }
1280 
1281 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1282  SelectionDAG &DAG) const {
1283  SDLoc DL(Op);
1284  const EVT VecT = Op.getValueType();
1285  const EVT LaneT = Op.getOperand(0).getValueType();
1286  const size_t Lanes = Op.getNumOperands();
1287  auto IsConstant = [](const SDValue &V) {
1288  return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1289  };
1290 
1291  // Find the most common operand, which is approximately the best to splat
1292  using Entry = std::pair<SDValue, size_t>;
1293  SmallVector<Entry, 16> ValueCounts;
1294  size_t NumConst = 0, NumDynamic = 0;
1295  for (const SDValue &Lane : Op->op_values()) {
1296  if (Lane.isUndef()) {
1297  continue;
1298  } else if (IsConstant(Lane)) {
1299  NumConst++;
1300  } else {
1301  NumDynamic++;
1302  }
1303  auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1304  [&Lane](Entry A) { return A.first == Lane; });
1305  if (CountIt == ValueCounts.end()) {
1306  ValueCounts.emplace_back(Lane, 1);
1307  } else {
1308  CountIt->second++;
1309  }
1310  }
1311  auto CommonIt =
1312  std::max_element(ValueCounts.begin(), ValueCounts.end(),
1313  [](Entry A, Entry B) { return A.second < B.second; });
1314  assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1315  SDValue SplatValue = CommonIt->first;
1316  size_t NumCommon = CommonIt->second;
1317 
1318  // If v128.const is available, consider using it instead of a splat
1319  if (Subtarget->hasUnimplementedSIMD128()) {
1320  // {i32,i64,f32,f64}.const opcode, and value
1321  const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1322  // SIMD prefix and opcode
1323  const size_t SplatBytes = 2;
1324  const size_t SplatConstBytes = SplatBytes + ConstBytes;
1325  // SIMD prefix, opcode, and lane index
1326  const size_t ReplaceBytes = 3;
1327  const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1328  // SIMD prefix, v128.const opcode, and 128-bit value
1329  const size_t VecConstBytes = 18;
1330  // Initial v128.const and a replace_lane for each non-const operand
1331  const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1332  // Initial splat and all necessary replace_lanes
1333  const size_t SplatInitBytes =
1334  IsConstant(SplatValue)
1335  // Initial constant splat
1336  ? (SplatConstBytes +
1337  // Constant replace_lanes
1338  (NumConst - NumCommon) * ReplaceConstBytes +
1339  // Dynamic replace_lanes
1340  (NumDynamic * ReplaceBytes))
1341  // Initial dynamic splat
1342  : (SplatBytes +
1343  // Constant replace_lanes
1344  (NumConst * ReplaceConstBytes) +
1345  // Dynamic replace_lanes
1346  (NumDynamic - NumCommon) * ReplaceBytes);
1347  if (ConstInitBytes < SplatInitBytes) {
1348  // Create build_vector that will lower to initial v128.const
1349  SmallVector<SDValue, 16> ConstLanes;
1350  for (const SDValue &Lane : Op->op_values()) {
1351  if (IsConstant(Lane)) {
1352  ConstLanes.push_back(Lane);
1353  } else if (LaneT.isFloatingPoint()) {
1354  ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1355  } else {
1356  ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1357  }
1358  }
1359  SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1360  // Add replace_lane instructions for non-const lanes
1361  for (size_t I = 0; I < Lanes; ++I) {
1362  const SDValue &Lane = Op->getOperand(I);
1363  if (!Lane.isUndef() && !IsConstant(Lane))
1364  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1365  DAG.getConstant(I, DL, MVT::i32));
1366  }
1367  return Result;
1368  }
1369  }
1370  // Use a splat for the initial vector
1371  SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1372  // Add replace_lane instructions for other values
1373  for (size_t I = 0; I < Lanes; ++I) {
1374  const SDValue &Lane = Op->getOperand(I);
1375  if (Lane != SplatValue)
1376  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1377  DAG.getConstant(I, DL, MVT::i32));
1378  }
1379  return Result;
1380 }
1381 
1382 SDValue
1383 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1384  SelectionDAG &DAG) const {
1385  SDLoc DL(Op);
1386  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1388  assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1389  size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1390 
1391  // Space for two vector args and sixteen mask indices
1392  SDValue Ops[18];
1393  size_t OpIdx = 0;
1394  Ops[OpIdx++] = Op.getOperand(0);
1395  Ops[OpIdx++] = Op.getOperand(1);
1396 
1397  // Expand mask indices to byte indices and materialize them as operands
1398  for (int M : Mask) {
1399  for (size_t J = 0; J < LaneBytes; ++J) {
1400  // Lower undefs (represented by -1 in mask) to zero
1401  uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1402  Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1403  }
1404  }
1405 
1406  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1407 }
1408 
1409 SDValue
1410 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1411  SelectionDAG &DAG) const {
1412  // Allow constant lane indices, expand variable lane indices
1413  SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1414  if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1415  return Op;
1416  else
1417  // Perform default expansion
1418  return SDValue();
1419 }
1420 
1423  // 32-bit and 64-bit unrolled shifts will have proper semantics
1424  if (LaneT.bitsGE(MVT::i32))
1425  return DAG.UnrollVectorOp(Op.getNode());
1426  // Otherwise mask the shift value to get proper semantics from 32-bit shift
1427  SDLoc DL(Op);
1428  SDValue ShiftVal = Op.getOperand(1);
1429  uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1430  SDValue MaskedShiftVal = DAG.getNode(
1431  ISD::AND, // mask opcode
1432  DL, ShiftVal.getValueType(), // masked value type
1433  ShiftVal, // original shift value operand
1434  DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1435  );
1436 
1437  return DAG.UnrollVectorOp(
1438  DAG.getNode(Op.getOpcode(), // original shift opcode
1439  DL, Op.getValueType(), // original return type
1440  Op.getOperand(0), // original vector operand,
1441  MaskedShiftVal // new masked shift value operand
1442  )
1443  .getNode());
1444 }
1445 
1446 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1447  SelectionDAG &DAG) const {
1448  SDLoc DL(Op);
1449 
1450  // Only manually lower vector shifts
1452 
1453  // Unroll non-splat vector shifts
1454  BuildVectorSDNode *ShiftVec;
1455  SDValue SplatVal;
1456  if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1457  !(SplatVal = ShiftVec->getSplatValue()))
1458  return unrollVectorShift(Op, DAG);
1459 
1460  // All splats except i64x2 const splats are handled by patterns
1461  auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1462  if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1463  return Op;
1464 
1465  // i64x2 const splats are custom lowered to avoid unnecessary wraps
1466  unsigned Opcode;
1467  switch (Op.getOpcode()) {
1468  case ISD::SHL:
1469  Opcode = WebAssemblyISD::VEC_SHL;
1470  break;
1471  case ISD::SRA:
1472  Opcode = WebAssemblyISD::VEC_SHR_S;
1473  break;
1474  case ISD::SRL:
1475  Opcode = WebAssemblyISD::VEC_SHR_U;
1476  break;
1477  default:
1478  llvm_unreachable("unexpected opcode");
1479  }
1480  APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1481  return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1482  DAG.getConstant(Shift, DL, MVT::i32));
1483 }
1484 
1485 //===----------------------------------------------------------------------===//
1486 // WebAssembly Optimization Hooks
1487 //===----------------------------------------------------------------------===//
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static MVT getIntegerVT(unsigned BitWidth)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:606
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static Type * getDoubleTy(LLVMContext &C)
Definition: Type.cpp:164
bool isUndef() const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
const GlobalValue * getGlobal() const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
Diagnostic information for unsupported feature in backend.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:678
This class represents lattice values for constants.
Definition: AllocatorList.h:23
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Definition: ISDOpcodes.h:633
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:391
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:730
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
SDValue getBasicBlock(MachineBasicBlock *MBB)
unsigned getVectorNumElements() const
Function Alias Analysis Results
Calling convention for emscripten __invoke_* functions.
Definition: CallingConv.h:231
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
F(f)
MachineModuleInfo & getMMI() const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:878
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:693
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
void computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:459
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
Register getFrameRegister(const MachineFunction &MF) const override
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:135
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:480
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:163
#define INT64_MIN
Definition: DataTypes.h:80
Shift and rotation operations.
Definition: ISDOpcodes.h:434
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:261
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:752
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
uint64_t getConstantOperandVal(unsigned i) const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:648
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
bool isInConsecutiveRegs() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:473
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:413
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT&#39;s, which are used to parameterize some operations.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:726
unsigned getSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
This file declares the WebAssembly-specific subclass of TargetMachine.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:410
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:643
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:414
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:453
const MCContext & getContext() const
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
unsigned getTargetFlags() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:874
static mvt_range integer_vector_valuetypes()
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:592
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:763
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl) const
Returns a pair of (return value, chain).
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:351
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
The memory access is volatile.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:234
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:168
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:657
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
unsigned getOrigAlign() const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
const Triple & getTargetTriple() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:759
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:356
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:40
This file provides WebAssembly-specific target descriptions.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:798
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static mvt_range vector_valuetypes()
self_iterator getIterator()
Definition: ilist_node.h:81
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:750
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1193
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:33
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:644
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:52
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
unsigned first
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:264
int CreateStackObject(uint64_t Size, llvm::Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:416
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:638
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:363
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:746
Provides information about what library functions are available for the current target.
const DebugLoc & getDebugLoc() const
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:95
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:437
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:605
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:701
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:691
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static mvt_range integer_valuetypes()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getByValAlign() const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:444
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:336
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
const WebAssemblyRegisterInfo * getRegisterInfo() const override
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:666
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:755
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:411
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:717
static bool callingConvSupported(CallingConv::ID CallConv)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:510
bool isInConsecutiveRegsLast() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
This file declares WebAssembly-specific per-machine-function information.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Type * getValueType() const
Definition: GlobalValue.h:279
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:636
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:73
void computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:326
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
bool isUndef() const
Return true if the type of the node type undefined.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
const WebAssemblyInstrInfo * getInstrInfo() const override
unsigned getNumOperands() const
Register getReg() const
getReg - Returns the register number.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
LLVMContext * getContext() const
Definition: SelectionDAG.h:420
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:404
BRIND - Indirect branch.
Definition: ISDOpcodes.h:662
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:651