LLVM  9.0.0svn
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
17 #include "WebAssemblySubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
40  const TargetMachine &TM, const WebAssemblySubtarget &STI)
41  : TargetLowering(TM), Subtarget(&STI) {
42  auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44  // Booleans always contain 0 or 1.
46  // Except in SIMD vectors
48  // WebAssembly does not produce floating-point exceptions on normal floating
49  // point operations.
51  // We don't know the microarchitecture here, so just reduce register pressure.
53  // Tell ISel that we have a stack pointer.
55  Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56  // Set up the register classes.
57  addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58  addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59  addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60  addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61  if (Subtarget->hasSIMD128()) {
62  addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63  addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64  addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65  addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66  }
67  if (Subtarget->hasUnimplementedSIMD128()) {
68  addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
69  addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
70  }
71  // Compute derived properties from the register classes.
73 
79 
80  // Take the default expansion for va_arg, va_copy, and va_end. There is no
81  // default action for va_start, so we do that custom.
86 
87  for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
88  // Don't expand the floating-point types to constant pools.
90  // Expand floating-point comparisons.
91  for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
94  // Expand floating-point library function operators.
95  for (auto Op :
98  // Note supported floating-point library function operators that otherwise
99  // default to expand.
100  for (auto Op :
103  // Support minimum and maximum, which otherwise default to expand.
106  // WebAssembly currently has no builtin f16 support.
111  }
112 
113  // Expand unavailable integer operations.
114  for (auto Op :
118  for (auto T : {MVT::i32, MVT::i64})
120  if (Subtarget->hasSIMD128())
121  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
123  if (Subtarget->hasUnimplementedSIMD128())
125  }
126 
127  // SIMD-specific configuration
128  if (Subtarget->hasSIMD128()) {
129  // Support saturating add for i8x16 and i16x8
130  for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
131  for (auto T : {MVT::v16i8, MVT::v8i16})
133 
134  // Custom lower BUILD_VECTORs to minimize number of replace_lanes
135  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
137  if (Subtarget->hasUnimplementedSIMD128())
138  for (auto T : {MVT::v2i64, MVT::v2f64})
140 
141  // We have custom shuffle lowering to expose the shuffle mask
142  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
144  if (Subtarget->hasUnimplementedSIMD128())
145  for (auto T: {MVT::v2i64, MVT::v2f64})
147 
148  // Custom lowering since wasm shifts must have a scalar shift amount
149  for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
150  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
152  if (Subtarget->hasUnimplementedSIMD128())
154  }
155 
156  // Custom lower lane accesses to expand out variable indices
158  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
160  if (Subtarget->hasUnimplementedSIMD128())
161  for (auto T : {MVT::v2i64, MVT::v2f64})
163  }
164 
165  // There is no i64x2.mul instruction
167 
168  // There are no vector select instructions
169  for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
170  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
172  if (Subtarget->hasUnimplementedSIMD128())
173  for (auto T : {MVT::v2i64, MVT::v2f64})
175  }
176 
177  // Expand additional SIMD ops that V8 hasn't implemented yet
178  if (!Subtarget->hasUnimplementedSIMD128()) {
181  }
182  }
183 
184  // As a special case, these operators use the type to mean the type to
185  // sign-extend from.
187  if (!Subtarget->hasSignExt()) {
188  // Sign extends are legal only when extending a vector extract
189  auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
190  for (auto T : {MVT::i8, MVT::i16, MVT::i32})
192  }
193  for (auto T : MVT::integer_vector_valuetypes())
195 
196  // Dynamic stack allocation: use the default expansion.
200 
203 
204  // Expand these forms; we pattern-match the forms that we can handle in isel.
205  for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
206  for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
208 
209  // We have custom switch handling.
211 
212  // WebAssembly doesn't have:
213  // - Floating-point extending loads.
214  // - Floating-point truncating stores.
215  // - i1 extending loads.
216  // - extending/truncating SIMD loads/stores
219  for (auto T : MVT::integer_valuetypes())
222  if (Subtarget->hasSIMD128()) {
224  MVT::v2f64}) {
225  for (auto MemT : MVT::vector_valuetypes()) {
226  if (MVT(T) != MemT) {
227  setTruncStoreAction(T, MemT, Expand);
229  setLoadExtAction(Ext, T, MemT, Expand);
230  }
231  }
232  }
233  }
234 
235  // Don't do anything clever with build_pairs
237 
238  // Trap lowers to wasm unreachable
240 
241  // Exception handling intrinsics
244 
246 
247  if (Subtarget->hasBulkMemory()) {
248  // Use memory.copy and friends over multiple loads and stores
249  MaxStoresPerMemcpy = 1;
253  MaxStoresPerMemset = 1;
255  }
256 }
257 
259 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
260  // We have wasm instructions for these
261  switch (AI->getOperation()) {
262  case AtomicRMWInst::Add:
263  case AtomicRMWInst::Sub:
264  case AtomicRMWInst::And:
265  case AtomicRMWInst::Or:
266  case AtomicRMWInst::Xor:
267  case AtomicRMWInst::Xchg:
269  default:
270  break;
271  }
273 }
274 
275 FastISel *WebAssemblyTargetLowering::createFastISel(
276  FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
277  return WebAssembly::createFastISel(FuncInfo, LibInfo);
278 }
279 
280 bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
281  const GlobalAddressSDNode * /*GA*/) const {
282  // All offsets can be folded.
283  return true;
284 }
285 
286 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
287  EVT VT) const {
288  unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
289  if (BitWidth > 1 && BitWidth < 8)
290  BitWidth = 8;
291 
292  if (BitWidth > 64) {
293  // The shift will be lowered to a libcall, and compiler-rt libcalls expect
294  // the count to be an i32.
295  BitWidth = 32;
296  assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
297  "32-bit shift counts ought to be enough for anyone");
298  }
299 
300  MVT Result = MVT::getIntegerVT(BitWidth);
302  "Unable to represent scalar shift amount type");
303  return Result;
304 }
305 
306 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
307 // undefined result on invalid/overflow, to the WebAssembly opcode, which
308 // traps on invalid/overflow.
310  MachineBasicBlock *BB,
311  const TargetInstrInfo &TII,
312  bool IsUnsigned, bool Int64,
313  bool Float64, unsigned LoweredOpcode) {
315 
316  unsigned OutReg = MI.getOperand(0).getReg();
317  unsigned InReg = MI.getOperand(1).getReg();
318 
319  unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
320  unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
321  unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
322  unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
323  unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
324  unsigned Eqz = WebAssembly::EQZ_I32;
325  unsigned And = WebAssembly::AND_I32;
326  int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
327  int64_t Substitute = IsUnsigned ? 0 : Limit;
328  double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
329  auto &Context = BB->getParent()->getFunction().getContext();
331 
332  const BasicBlock *LLVMBB = BB->getBasicBlock();
333  MachineFunction *F = BB->getParent();
334  MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
335  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
336  MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
337 
339  F->insert(It, FalseMBB);
340  F->insert(It, TrueMBB);
341  F->insert(It, DoneMBB);
342 
343  // Transfer the remainder of BB and its successor edges to DoneMBB.
344  DoneMBB->splice(DoneMBB->begin(), BB,
345  std::next(MachineBasicBlock::iterator(MI)), BB->end());
346  DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
347 
348  BB->addSuccessor(TrueMBB);
349  BB->addSuccessor(FalseMBB);
350  TrueMBB->addSuccessor(DoneMBB);
351  FalseMBB->addSuccessor(DoneMBB);
352 
353  unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
354  Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
355  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
356  CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
357  EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
358  FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
359  TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
360 
361  MI.eraseFromParent();
362  // For signed numbers, we can do a single comparison to determine whether
363  // fabs(x) is within range.
364  if (IsUnsigned) {
365  Tmp0 = InReg;
366  } else {
367  BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
368  }
369  BuildMI(BB, DL, TII.get(FConst), Tmp1)
370  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
371  BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
372 
373  // For unsigned numbers, we have to do a separate comparison with zero.
374  if (IsUnsigned) {
375  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
376  unsigned SecondCmpReg =
377  MRI.createVirtualRegister(&WebAssembly::I32RegClass);
378  unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
379  BuildMI(BB, DL, TII.get(FConst), Tmp1)
380  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
381  BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
382  BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
383  CmpReg = AndReg;
384  }
385 
386  BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
387 
388  // Create the CFG diamond to select between doing the conversion or using
389  // the substitute value.
390  BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
391  BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
392  BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
393  BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
394  BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
395  .addReg(FalseReg)
396  .addMBB(FalseMBB)
397  .addReg(TrueReg)
398  .addMBB(TrueMBB);
399 
400  return DoneMBB;
401 }
402 
403 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
404  MachineInstr &MI, MachineBasicBlock *BB) const {
405  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
406  DebugLoc DL = MI.getDebugLoc();
407 
408  switch (MI.getOpcode()) {
409  default:
410  llvm_unreachable("Unexpected instr type to insert");
411  case WebAssembly::FP_TO_SINT_I32_F32:
412  return LowerFPToInt(MI, DL, BB, TII, false, false, false,
413  WebAssembly::I32_TRUNC_S_F32);
414  case WebAssembly::FP_TO_UINT_I32_F32:
415  return LowerFPToInt(MI, DL, BB, TII, true, false, false,
416  WebAssembly::I32_TRUNC_U_F32);
417  case WebAssembly::FP_TO_SINT_I64_F32:
418  return LowerFPToInt(MI, DL, BB, TII, false, true, false,
419  WebAssembly::I64_TRUNC_S_F32);
420  case WebAssembly::FP_TO_UINT_I64_F32:
421  return LowerFPToInt(MI, DL, BB, TII, true, true, false,
422  WebAssembly::I64_TRUNC_U_F32);
423  case WebAssembly::FP_TO_SINT_I32_F64:
424  return LowerFPToInt(MI, DL, BB, TII, false, false, true,
425  WebAssembly::I32_TRUNC_S_F64);
426  case WebAssembly::FP_TO_UINT_I32_F64:
427  return LowerFPToInt(MI, DL, BB, TII, true, false, true,
428  WebAssembly::I32_TRUNC_U_F64);
429  case WebAssembly::FP_TO_SINT_I64_F64:
430  return LowerFPToInt(MI, DL, BB, TII, false, true, true,
431  WebAssembly::I64_TRUNC_S_F64);
432  case WebAssembly::FP_TO_UINT_I64_F64:
433  return LowerFPToInt(MI, DL, BB, TII, true, true, true,
434  WebAssembly::I64_TRUNC_U_F64);
435  llvm_unreachable("Unexpected instruction to emit with custom inserter");
436  }
437 }
438 
439 const char *
440 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
441  switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
443  break;
444 #define HANDLE_NODETYPE(NODE) \
445  case WebAssemblyISD::NODE: \
446  return "WebAssemblyISD::" #NODE;
447 #include "WebAssemblyISD.def"
448 #undef HANDLE_NODETYPE
449  }
450  return nullptr;
451 }
452 
453 std::pair<unsigned, const TargetRegisterClass *>
454 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
455  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
456  // First, see if this is a constraint that directly corresponds to a
457  // WebAssembly register class.
458  if (Constraint.size() == 1) {
459  switch (Constraint[0]) {
460  case 'r':
461  assert(VT != MVT::iPTR && "Pointer MVT not expected here");
462  if (Subtarget->hasSIMD128() && VT.isVector()) {
463  if (VT.getSizeInBits() == 128)
464  return std::make_pair(0U, &WebAssembly::V128RegClass);
465  }
466  if (VT.isInteger() && !VT.isVector()) {
467  if (VT.getSizeInBits() <= 32)
468  return std::make_pair(0U, &WebAssembly::I32RegClass);
469  if (VT.getSizeInBits() <= 64)
470  return std::make_pair(0U, &WebAssembly::I64RegClass);
471  }
472  break;
473  default:
474  break;
475  }
476  }
477 
478  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
479 }
480 
481 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
482  // Assume ctz is a relatively cheap operation.
483  return true;
484 }
485 
486 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
487  // Assume clz is a relatively cheap operation.
488  return true;
489 }
490 
491 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
492  const AddrMode &AM,
493  Type *Ty, unsigned AS,
494  Instruction *I) const {
495  // WebAssembly offsets are added as unsigned without wrapping. The
496  // isLegalAddressingMode gives us no way to determine if wrapping could be
497  // happening, so we approximate this by accepting only non-negative offsets.
498  if (AM.BaseOffs < 0)
499  return false;
500 
501  // WebAssembly has no scale register operands.
502  if (AM.Scale != 0)
503  return false;
504 
505  // Everything else is legal.
506  return true;
507 }
508 
509 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
510  EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
511  // WebAssembly supports unaligned accesses, though it should be declared
512  // with the p2align attribute on loads and stores which do so, and there
513  // may be a performance impact. We tell LLVM they're "fast" because
514  // for the kinds of things that LLVM uses this for (merging adjacent stores
515  // of constants, etc.), WebAssembly implementations will either want the
516  // unaligned access or they'll split anyway.
517  if (Fast)
518  *Fast = true;
519  return true;
520 }
521 
522 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
523  AttributeList Attr) const {
524  // The current thinking is that wasm engines will perform this optimization,
525  // so we can save on code size.
526  return true;
527 }
528 
529 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
530  LLVMContext &C,
531  EVT VT) const {
532  if (VT.isVector())
534 
535  return TargetLowering::getSetCCResultType(DL, C, VT);
536 }
537 
538 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
539  const CallInst &I,
540  MachineFunction &MF,
541  unsigned Intrinsic) const {
542  switch (Intrinsic) {
543  case Intrinsic::wasm_atomic_notify:
545  Info.memVT = MVT::i32;
546  Info.ptrVal = I.getArgOperand(0);
547  Info.offset = 0;
548  Info.align = 4;
549  // atomic.notify instruction does not really load the memory specified with
550  // this argument, but MachineMemOperand should either be load or store, so
551  // we set this to a load.
552  // FIXME Volatile isn't really correct, but currently all LLVM atomic
553  // instructions are treated as volatiles in the backend, so we should be
554  // consistent. The same applies for wasm_atomic_wait intrinsics too.
556  return true;
557  case Intrinsic::wasm_atomic_wait_i32:
559  Info.memVT = MVT::i32;
560  Info.ptrVal = I.getArgOperand(0);
561  Info.offset = 0;
562  Info.align = 4;
564  return true;
565  case Intrinsic::wasm_atomic_wait_i64:
567  Info.memVT = MVT::i64;
568  Info.ptrVal = I.getArgOperand(0);
569  Info.offset = 0;
570  Info.align = 8;
572  return true;
573  default:
574  return false;
575  }
576 }
577 
578 //===----------------------------------------------------------------------===//
579 // WebAssembly Lowering private implementation.
580 //===----------------------------------------------------------------------===//
581 
582 //===----------------------------------------------------------------------===//
583 // Lowering Code
584 //===----------------------------------------------------------------------===//
585 
586 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
588  DAG.getContext()->diagnose(
590 }
591 
592 // Test whether the given calling convention is supported.
593 static bool callingConvSupported(CallingConv::ID CallConv) {
594  // We currently support the language-independent target-independent
595  // conventions. We don't yet have a way to annotate calls with properties like
596  // "cold", and we don't have any call-clobbered registers, so these are mostly
597  // all handled the same.
598  return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
599  CallConv == CallingConv::Cold ||
600  CallConv == CallingConv::PreserveMost ||
601  CallConv == CallingConv::PreserveAll ||
602  CallConv == CallingConv::CXX_FAST_TLS;
603 }
604 
605 SDValue
606 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
607  SmallVectorImpl<SDValue> &InVals) const {
608  SelectionDAG &DAG = CLI.DAG;
609  SDLoc DL = CLI.DL;
610  SDValue Chain = CLI.Chain;
611  SDValue Callee = CLI.Callee;
613  auto Layout = MF.getDataLayout();
614 
615  CallingConv::ID CallConv = CLI.CallConv;
616  if (!callingConvSupported(CallConv))
617  fail(DL, DAG,
618  "WebAssembly doesn't support language-specific or target-specific "
619  "calling conventions yet");
620  if (CLI.IsPatchPoint)
621  fail(DL, DAG, "WebAssembly doesn't support patch point yet");
622 
623  // WebAssembly doesn't currently support explicit tail calls. If they are
624  // required, fail. Otherwise, just disable them.
625  if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
627  (CLI.CS && CLI.CS.isMustTailCall()))
628  fail(DL, DAG, "WebAssembly doesn't support tail call yet");
629  CLI.IsTailCall = false;
630 
632  if (Ins.size() > 1)
633  fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
634 
635  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
636  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
637  unsigned NumFixedArgs = 0;
638  for (unsigned I = 0; I < Outs.size(); ++I) {
639  const ISD::OutputArg &Out = Outs[I];
640  SDValue &OutVal = OutVals[I];
641  if (Out.Flags.isNest())
642  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
643  if (Out.Flags.isInAlloca())
644  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
645  if (Out.Flags.isInConsecutiveRegs())
646  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
647  if (Out.Flags.isInConsecutiveRegsLast())
648  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
649  if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
650  auto &MFI = MF.getFrameInfo();
651  int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
652  Out.Flags.getByValAlign(),
653  /*isSS=*/false);
654  SDValue SizeNode =
655  DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
656  SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
657  Chain = DAG.getMemcpy(
658  Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
659  /*isVolatile*/ false, /*AlwaysInline=*/false,
660  /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
661  OutVal = FINode;
662  }
663  // Count the number of fixed args *after* legalization.
664  NumFixedArgs += Out.IsFixed;
665  }
666 
667  bool IsVarArg = CLI.IsVarArg;
668  auto PtrVT = getPointerTy(Layout);
669 
670  // Analyze operands of the call, assigning locations to each operand.
672  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
673 
674  if (IsVarArg) {
675  // Outgoing non-fixed arguments are placed in a buffer. First
676  // compute their offsets and the total amount of buffer space needed.
677  for (SDValue Arg :
678  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
679  EVT VT = Arg.getValueType();
680  assert(VT != MVT::iPTR && "Legalized args should be concrete");
681  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
682  unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
683  Layout.getABITypeAlignment(Ty));
684  CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
685  Offset, VT.getSimpleVT(),
687  }
688  }
689 
690  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
691 
692  SDValue FINode;
693  if (IsVarArg && NumBytes) {
694  // For non-fixed arguments, next emit stores to store the argument values
695  // to the stack buffer at the offsets computed above.
696  int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
697  Layout.getStackAlignment(),
698  /*isSS=*/false);
699  unsigned ValNo = 0;
701  for (SDValue Arg :
702  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
703  assert(ArgLocs[ValNo].getValNo() == ValNo &&
704  "ArgLocs should remain in order and only hold varargs args");
705  unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
706  FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
707  SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
708  DAG.getConstant(Offset, DL, PtrVT));
709  Chains.push_back(
710  DAG.getStore(Chain, DL, Arg, Add,
711  MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
712  }
713  if (!Chains.empty())
714  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
715  } else if (IsVarArg) {
716  FINode = DAG.getIntPtrConstant(0, DL);
717  }
718 
719  // Compute the operands for the CALLn node.
721  Ops.push_back(Chain);
722  Ops.push_back(Callee);
723 
724  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
725  // isn't reliable.
726  Ops.append(OutVals.begin(),
727  IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
728  // Add a pointer to the vararg buffer.
729  if (IsVarArg)
730  Ops.push_back(FINode);
731 
732  SmallVector<EVT, 8> InTys;
733  for (const auto &In : Ins) {
734  assert(!In.Flags.isByVal() && "byval is not valid for return values");
735  assert(!In.Flags.isNest() && "nest is not valid for return values");
736  if (In.Flags.isInAlloca())
737  fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
738  if (In.Flags.isInConsecutiveRegs())
739  fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
740  if (In.Flags.isInConsecutiveRegsLast())
741  fail(DL, DAG,
742  "WebAssembly hasn't implemented cons regs last return values");
743  // Ignore In.getOrigAlign() because all our arguments are passed in
744  // registers.
745  InTys.push_back(In.VT);
746  }
747  InTys.push_back(MVT::Other);
748  SDVTList InTyList = DAG.getVTList(InTys);
749  SDValue Res =
750  DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
751  DL, InTyList, Ops);
752  if (Ins.empty()) {
753  Chain = Res;
754  } else {
755  InVals.push_back(Res);
756  Chain = Res.getValue(1);
757  }
758 
759  return Chain;
760 }
761 
762 bool WebAssemblyTargetLowering::CanLowerReturn(
763  CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
765  LLVMContext & /*Context*/) const {
766  // WebAssembly can't currently handle returning tuples.
767  return Outs.size() <= 1;
768 }
769 
770 SDValue WebAssemblyTargetLowering::LowerReturn(
771  SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
773  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
774  SelectionDAG &DAG) const {
775  assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
776  if (!callingConvSupported(CallConv))
777  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
778 
779  SmallVector<SDValue, 4> RetOps(1, Chain);
780  RetOps.append(OutVals.begin(), OutVals.end());
781  Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
782 
783  // Record the number and types of the return values.
784  for (const ISD::OutputArg &Out : Outs) {
785  assert(!Out.Flags.isByVal() && "byval is not valid for return values");
786  assert(!Out.Flags.isNest() && "nest is not valid for return values");
787  assert(Out.IsFixed && "non-fixed return value is not valid");
788  if (Out.Flags.isInAlloca())
789  fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
790  if (Out.Flags.isInConsecutiveRegs())
791  fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
792  if (Out.Flags.isInConsecutiveRegsLast())
793  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
794  }
795 
796  return Chain;
797 }
798 
799 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
800  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
801  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
802  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
803  if (!callingConvSupported(CallConv))
804  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
805 
807  auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
808 
809  // Set up the incoming ARGUMENTS value, which serves to represent the liveness
810  // of the incoming values before they're represented by virtual registers.
811  MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
812 
813  for (const ISD::InputArg &In : Ins) {
814  if (In.Flags.isInAlloca())
815  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
816  if (In.Flags.isNest())
817  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
818  if (In.Flags.isInConsecutiveRegs())
819  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
820  if (In.Flags.isInConsecutiveRegsLast())
821  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
822  // Ignore In.getOrigAlign() because all our arguments are passed in
823  // registers.
824  InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
825  DAG.getTargetConstant(InVals.size(),
826  DL, MVT::i32))
827  : DAG.getUNDEF(In.VT));
828 
829  // Record the number and types of arguments.
830  MFI->addParam(In.VT);
831  }
832 
833  // Varargs are copied into a buffer allocated by the caller, and a pointer to
834  // the buffer is passed as an argument.
835  if (IsVarArg) {
836  MVT PtrVT = getPointerTy(MF.getDataLayout());
837  unsigned VarargVreg =
839  MFI->setVarargBufferVreg(VarargVreg);
840  Chain = DAG.getCopyToReg(
841  Chain, DL, VarargVreg,
842  DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
843  DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
844  MFI->addParam(PtrVT);
845  }
846 
847  // Record the number and types of arguments and results.
848  SmallVector<MVT, 4> Params;
851  DAG.getTarget(), Params, Results);
852  for (MVT VT : Results)
853  MFI->addResult(VT);
854  // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
855  // the param logic here with ComputeSignatureVTs
856  assert(MFI->getParams().size() == Params.size() &&
857  std::equal(MFI->getParams().begin(), MFI->getParams().end(),
858  Params.begin()));
859 
860  return Chain;
861 }
862 
863 //===----------------------------------------------------------------------===//
864 // Custom lowering hooks.
865 //===----------------------------------------------------------------------===//
866 
867 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
868  SelectionDAG &DAG) const {
869  SDLoc DL(Op);
870  switch (Op.getOpcode()) {
871  default:
872  llvm_unreachable("unimplemented operation lowering");
873  return SDValue();
874  case ISD::FrameIndex:
875  return LowerFrameIndex(Op, DAG);
876  case ISD::GlobalAddress:
877  return LowerGlobalAddress(Op, DAG);
878  case ISD::ExternalSymbol:
879  return LowerExternalSymbol(Op, DAG);
880  case ISD::JumpTable:
881  return LowerJumpTable(Op, DAG);
882  case ISD::BR_JT:
883  return LowerBR_JT(Op, DAG);
884  case ISD::VASTART:
885  return LowerVASTART(Op, DAG);
886  case ISD::BlockAddress:
887  case ISD::BRIND:
888  fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
889  return SDValue();
890  case ISD::RETURNADDR: // Probably nothing meaningful can be returned here.
891  fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address");
892  return SDValue();
893  case ISD::FRAMEADDR:
894  return LowerFRAMEADDR(Op, DAG);
895  case ISD::CopyToReg:
896  return LowerCopyToReg(Op, DAG);
899  return LowerAccessVectorElement(Op, DAG);
900  case ISD::INTRINSIC_VOID:
903  return LowerIntrinsic(Op, DAG);
905  return LowerSIGN_EXTEND_INREG(Op, DAG);
906  case ISD::BUILD_VECTOR:
907  return LowerBUILD_VECTOR(Op, DAG);
908  case ISD::VECTOR_SHUFFLE:
909  return LowerVECTOR_SHUFFLE(Op, DAG);
910  case ISD::SHL:
911  case ISD::SRA:
912  case ISD::SRL:
913  return LowerShift(Op, DAG);
914  }
915 }
916 
917 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
918  SelectionDAG &DAG) const {
919  SDValue Src = Op.getOperand(2);
920  if (isa<FrameIndexSDNode>(Src.getNode())) {
921  // CopyToReg nodes don't support FrameIndex operands. Other targets select
922  // the FI to some LEA-like instruction, but since we don't have that, we
923  // need to insert some kind of instruction that can take an FI operand and
924  // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
925  // local.copy between Op and its FI operand.
926  SDValue Chain = Op.getOperand(0);
927  SDLoc DL(Op);
928  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
929  EVT VT = Src.getValueType();
930  SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
931  : WebAssembly::COPY_I64,
932  DL, VT, Src),
933  0);
934  return Op.getNode()->getNumValues() == 1
935  ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
936  : DAG.getCopyToReg(Chain, DL, Reg, Copy,
937  Op.getNumOperands() == 4 ? Op.getOperand(3)
938  : SDValue());
939  }
940  return SDValue();
941 }
942 
943 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
944  SelectionDAG &DAG) const {
945  int FI = cast<FrameIndexSDNode>(Op)->getIndex();
946  return DAG.getTargetFrameIndex(FI, Op.getValueType());
947 }
948 
949 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
950  SelectionDAG &DAG) const {
951  // Non-zero depths are not supported by WebAssembly currently. Use the
952  // legalizer's default expansion, which is to return 0 (what this function is
953  // documented to do).
954  if (Op.getConstantOperandVal(0) > 0)
955  return SDValue();
956 
958  EVT VT = Op.getValueType();
959  unsigned FP =
961  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
962 }
963 
964 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
965  SelectionDAG &DAG) const {
966  SDLoc DL(Op);
967  const auto *GA = cast<GlobalAddressSDNode>(Op);
968  EVT VT = Op.getValueType();
969  assert(GA->getTargetFlags() == 0 &&
970  "Unexpected target flags on generic GlobalAddressSDNode");
971  if (GA->getAddressSpace() != 0)
972  fail(DL, DAG, "WebAssembly only expects the 0 address space");
973  return DAG.getNode(
974  WebAssemblyISD::Wrapper, DL, VT,
975  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset()));
976 }
977 
978 SDValue
979 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
980  SelectionDAG &DAG) const {
981  SDLoc DL(Op);
982  const auto *ES = cast<ExternalSymbolSDNode>(Op);
983  EVT VT = Op.getValueType();
984  assert(ES->getTargetFlags() == 0 &&
985  "Unexpected target flags on generic ExternalSymbolSDNode");
986  // Set the TargetFlags to 0x1 which indicates that this is a "function"
987  // symbol rather than a data symbol. We do this unconditionally even though
988  // we don't know anything about the symbol other than its name, because all
989  // external symbols used in target-independent SelectionDAG code are for
990  // functions.
991  return DAG.getNode(
992  WebAssemblyISD::Wrapper, DL, VT,
993  DAG.getTargetExternalSymbol(ES->getSymbol(), VT,
995 }
996 
997 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
998  SelectionDAG &DAG) const {
999  // There's no need for a Wrapper node because we always incorporate a jump
1000  // table operand into a BR_TABLE instruction, rather than ever
1001  // materializing it in a register.
1002  const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1003  return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1004  JT->getTargetFlags());
1005 }
1006 
1007 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1008  SelectionDAG &DAG) const {
1009  SDLoc DL(Op);
1010  SDValue Chain = Op.getOperand(0);
1011  const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1012  SDValue Index = Op.getOperand(2);
1013  assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1014 
1016  Ops.push_back(Chain);
1017  Ops.push_back(Index);
1018 
1020  const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1021 
1022  // Add an operand for each case.
1023  for (auto MBB : MBBs)
1024  Ops.push_back(DAG.getBasicBlock(MBB));
1025 
1026  // TODO: For now, we just pick something arbitrary for a default case for now.
1027  // We really want to sniff out the guard and put in the real default case (and
1028  // delete the guard).
1029  Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1030 
1031  return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1032 }
1033 
1034 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1035  SelectionDAG &DAG) const {
1036  SDLoc DL(Op);
1038 
1039  auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1040  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1041 
1042  SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1043  MFI->getVarargBufferVreg(), PtrVT);
1044  return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1045  MachinePointerInfo(SV), 0);
1046 }
1047 
1048 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1049  SelectionDAG &DAG) const {
1050  MachineFunction &MF = DAG.getMachineFunction();
1051  unsigned IntNo;
1052  switch (Op.getOpcode()) {
1053  case ISD::INTRINSIC_VOID:
1055  IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1056  break;
1058  IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1059  break;
1060  default:
1061  llvm_unreachable("Invalid intrinsic");
1062  }
1063  SDLoc DL(Op);
1064 
1065  switch (IntNo) {
1066  default:
1067  return SDValue(); // Don't custom lower most intrinsics.
1068 
1069  case Intrinsic::wasm_lsda: {
1070  EVT VT = Op.getValueType();
1071  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1072  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1073  auto &Context = MF.getMMI().getContext();
1074  MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1075  Twine(MF.getFunctionNumber()));
1076  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1077  DAG.getMCSymbol(S, PtrVT));
1078  }
1079 
1080  case Intrinsic::wasm_throw: {
1081  // We only support C++ exceptions for now
1082  int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1083  if (Tag != CPP_EXCEPTION)
1084  llvm_unreachable("Invalid tag!");
1085  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1086  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1087  const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1088  SDValue SymNode =
1089  DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1091  SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT));
1092  return DAG.getNode(WebAssemblyISD::THROW, DL,
1093  MVT::Other, // outchain type
1094  {
1095  Op.getOperand(0), // inchain
1096  SymNode, // exception symbol
1097  Op.getOperand(3) // thrown value
1098  });
1099  }
1100  }
1101 }
1102 
1103 SDValue
1104 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1105  SelectionDAG &DAG) const {
1106  // If sign extension operations are disabled, allow sext_inreg only if operand
1107  // is a vector extract. SIMD does not depend on sign extension operations, but
1108  // allowing sext_inreg in this context lets us have simple patterns to select
1109  // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1110  // simpler in this file, but would necessitate large and brittle patterns to
1111  // undo the expansion and select extract_lane_s instructions.
1112  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1114  return Op;
1115  // Otherwise expand
1116  return SDValue();
1117 }
1118 
1119 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1120  SelectionDAG &DAG) const {
1121  SDLoc DL(Op);
1122  const EVT VecT = Op.getValueType();
1123  const EVT LaneT = Op.getOperand(0).getValueType();
1124  const size_t Lanes = Op.getNumOperands();
1125  auto IsConstant = [](const SDValue &V) {
1126  return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1127  };
1128 
1129  // Find the most common operand, which is approximately the best to splat
1130  using Entry = std::pair<SDValue, size_t>;
1131  SmallVector<Entry, 16> ValueCounts;
1132  size_t NumConst = 0, NumDynamic = 0;
1133  for (const SDValue &Lane : Op->op_values()) {
1134  if (Lane.isUndef()) {
1135  continue;
1136  } else if (IsConstant(Lane)) {
1137  NumConst++;
1138  } else {
1139  NumDynamic++;
1140  }
1141  auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1142  [&Lane](Entry A) { return A.first == Lane; });
1143  if (CountIt == ValueCounts.end()) {
1144  ValueCounts.emplace_back(Lane, 1);
1145  } else {
1146  CountIt->second++;
1147  }
1148  }
1149  auto CommonIt =
1150  std::max_element(ValueCounts.begin(), ValueCounts.end(),
1151  [](Entry A, Entry B) { return A.second < B.second; });
1152  assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1153  SDValue SplatValue = CommonIt->first;
1154  size_t NumCommon = CommonIt->second;
1155 
1156  // If v128.const is available, consider using it instead of a splat
1157  if (Subtarget->hasUnimplementedSIMD128()) {
1158  // {i32,i64,f32,f64}.const opcode, and value
1159  const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1160  // SIMD prefix and opcode
1161  const size_t SplatBytes = 2;
1162  const size_t SplatConstBytes = SplatBytes + ConstBytes;
1163  // SIMD prefix, opcode, and lane index
1164  const size_t ReplaceBytes = 3;
1165  const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1166  // SIMD prefix, v128.const opcode, and 128-bit value
1167  const size_t VecConstBytes = 18;
1168  // Initial v128.const and a replace_lane for each non-const operand
1169  const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1170  // Initial splat and all necessary replace_lanes
1171  const size_t SplatInitBytes =
1172  IsConstant(SplatValue)
1173  // Initial constant splat
1174  ? (SplatConstBytes +
1175  // Constant replace_lanes
1176  (NumConst - NumCommon) * ReplaceConstBytes +
1177  // Dynamic replace_lanes
1178  (NumDynamic * ReplaceBytes))
1179  // Initial dynamic splat
1180  : (SplatBytes +
1181  // Constant replace_lanes
1182  (NumConst * ReplaceConstBytes) +
1183  // Dynamic replace_lanes
1184  (NumDynamic - NumCommon) * ReplaceBytes);
1185  if (ConstInitBytes < SplatInitBytes) {
1186  // Create build_vector that will lower to initial v128.const
1187  SmallVector<SDValue, 16> ConstLanes;
1188  for (const SDValue &Lane : Op->op_values()) {
1189  if (IsConstant(Lane)) {
1190  ConstLanes.push_back(Lane);
1191  } else if (LaneT.isFloatingPoint()) {
1192  ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1193  } else {
1194  ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1195  }
1196  }
1197  SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1198  // Add replace_lane instructions for non-const lanes
1199  for (size_t I = 0; I < Lanes; ++I) {
1200  const SDValue &Lane = Op->getOperand(I);
1201  if (!Lane.isUndef() && !IsConstant(Lane))
1202  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1203  DAG.getConstant(I, DL, MVT::i32));
1204  }
1205  return Result;
1206  }
1207  }
1208  // Use a splat for the initial vector
1209  SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1210  // Add replace_lane instructions for other values
1211  for (size_t I = 0; I < Lanes; ++I) {
1212  const SDValue &Lane = Op->getOperand(I);
1213  if (Lane != SplatValue)
1214  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1215  DAG.getConstant(I, DL, MVT::i32));
1216  }
1217  return Result;
1218 }
1219 
1220 SDValue
1221 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1222  SelectionDAG &DAG) const {
1223  SDLoc DL(Op);
1224  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1226  assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1227  size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1228 
1229  // Space for two vector args and sixteen mask indices
1230  SDValue Ops[18];
1231  size_t OpIdx = 0;
1232  Ops[OpIdx++] = Op.getOperand(0);
1233  Ops[OpIdx++] = Op.getOperand(1);
1234 
1235  // Expand mask indices to byte indices and materialize them as operands
1236  for (int M : Mask) {
1237  for (size_t J = 0; J < LaneBytes; ++J) {
1238  // Lower undefs (represented by -1 in mask) to zero
1239  uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1240  Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1241  }
1242  }
1243 
1244  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1245 }
1246 
1247 SDValue
1248 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1249  SelectionDAG &DAG) const {
1250  // Allow constant lane indices, expand variable lane indices
1251  SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1252  if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1253  return Op;
1254  else
1255  // Perform default expansion
1256  return SDValue();
1257 }
1258 
1261  // 32-bit and 64-bit unrolled shifts will have proper semantics
1262  if (LaneT.bitsGE(MVT::i32))
1263  return DAG.UnrollVectorOp(Op.getNode());
1264  // Otherwise mask the shift value to get proper semantics from 32-bit shift
1265  SDLoc DL(Op);
1266  SDValue ShiftVal = Op.getOperand(1);
1267  uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1268  SDValue MaskedShiftVal = DAG.getNode(
1269  ISD::AND, // mask opcode
1270  DL, ShiftVal.getValueType(), // masked value type
1271  ShiftVal, // original shift value operand
1272  DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1273  );
1274 
1275  return DAG.UnrollVectorOp(
1276  DAG.getNode(Op.getOpcode(), // original shift opcode
1277  DL, Op.getValueType(), // original return type
1278  Op.getOperand(0), // original vector operand,
1279  MaskedShiftVal // new masked shift value operand
1280  )
1281  .getNode());
1282 }
1283 
1284 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1285  SelectionDAG &DAG) const {
1286  SDLoc DL(Op);
1287 
1288  // Only manually lower vector shifts
1290 
1291  // Expand all vector shifts until V8 fixes its implementation
1292  // TODO: remove this once V8 is fixed
1293  if (!Subtarget->hasUnimplementedSIMD128())
1294  return unrollVectorShift(Op, DAG);
1295 
1296  // Unroll non-splat vector shifts
1297  BuildVectorSDNode *ShiftVec;
1298  SDValue SplatVal;
1299  if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1300  !(SplatVal = ShiftVec->getSplatValue()))
1301  return unrollVectorShift(Op, DAG);
1302 
1303  // All splats except i64x2 const splats are handled by patterns
1304  auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1305  if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1306  return Op;
1307 
1308  // i64x2 const splats are custom lowered to avoid unnecessary wraps
1309  unsigned Opcode;
1310  switch (Op.getOpcode()) {
1311  case ISD::SHL:
1312  Opcode = WebAssemblyISD::VEC_SHL;
1313  break;
1314  case ISD::SRA:
1315  Opcode = WebAssemblyISD::VEC_SHR_S;
1316  break;
1317  case ISD::SRL:
1318  Opcode = WebAssemblyISD::VEC_SHR_U;
1319  break;
1320  default:
1321  llvm_unreachable("unexpected opcode");
1322  }
1323  APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1324  return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1325  DAG.getConstant(Shift, DL, MVT::i32));
1326 }
1327 
1328 //===----------------------------------------------------------------------===//
1329 // WebAssembly Optimization Hooks
1330 //===----------------------------------------------------------------------===//
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static MVT getIntegerVT(unsigned BitWidth)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static Type * getDoubleTy(LLVMContext &C)
Definition: Type.cpp:164
bool isUndef() const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & Context
Diagnostic information for unsupported feature in backend.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:651
This class represents lattice values for constants.
Definition: AllocatorList.h:23
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Definition: ISDOpcodes.h:606
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:366
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned getReg() const
getReg - Returns the register number.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:703
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
void setHasFloatingPointExceptions(bool FPExceptions=true)
Tells the code generator that this target supports floating point exceptions and cares about preservi...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
SDValue getBasicBlock(MachineBasicBlock *MBB)
Function Alias Analysis Results
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
F(f)
MachineModuleInfo & getMMI() const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:875
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:434
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1155
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:135
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:455
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:163
#define INT64_MIN
Definition: DataTypes.h:80
Shift and rotation operations.
Definition: ISDOpcodes.h:409
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:201
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:750
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
uint64_t getConstantOperandVal(unsigned i) const
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
bool isInConsecutiveRegs() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:459
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:400
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:699
unsigned getSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
This file declares the WebAssembly-specific subclass of TargetMachine.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:397
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:627
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:401
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:428
const MCContext & getContext() const
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:851
static mvt_range integer_vector_valuetypes()
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:575
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:750
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:326
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
The memory access is volatile.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:234
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:630
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:732
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:331
SDValue getSplatValue(BitVector *UndefElements=nullptr) const
Returns the splatted value or a null value if this is not a splat.
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
This file provides WebAssembly-specific target descriptions.
unsigned char getTargetFlags() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:771
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
static mvt_range vector_valuetypes()
self_iterator getIterator()
Definition: ilist_node.h:81
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:723
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1213
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:33
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:639
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
This class contains a discriminated union of information about pointers in memory operands...
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:264
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:632
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:403
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:338
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:733
Provides information about what library functions are available for the current target.
const DebugLoc & getDebugLoc() const
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:95
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:412
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:580
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:684
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:678
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static mvt_range integer_valuetypes()
unsigned getFrameRegister(const MachineFunction &MF) const override
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getByValAlign() const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:419
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:386
const WebAssemblyRegisterInfo * getRegisterInfo() const override
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:639
Representation of each machine instruction.
Definition: MachineInstr.h:63
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:728
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:386
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:704
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:644
static bool callingConvSupported(CallingConv::ID CallConv)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:485
bool isInConsecutiveRegsLast() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
TargetOptions Options
Definition: TargetMachine.h:96
#define I(x, y, z)
Definition: MD5.cpp:58
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
This file declares WebAssembly-specific per-machine-function information.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:609
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LLVM Value Representation.
Definition: Value.h:72
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:301
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
bool isUndef() const
Return true if the type of the node type undefined.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:58
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
const WebAssemblyInstrInfo * getInstrInfo() const override
unsigned getNumOperands() const
const SDValue & getOperand(unsigned i) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
LLVMContext * getContext() const
Definition: SelectionDAG.h:406
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:621
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:379
BRIND - Indirect branch.
Definition: ISDOpcodes.h:635
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:624