LLVM  12.0.0git
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
17 #include "WebAssemblySubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/IntrinsicsWebAssembly.h"
32 #include "llvm/Support/Debug.h"
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "wasm-lower"
39 
41  const TargetMachine &TM, const WebAssemblySubtarget &STI)
42  : TargetLowering(TM), Subtarget(&STI) {
43  auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
44 
45  // Booleans always contain 0 or 1.
47  // Except in SIMD vectors
49  // We don't know the microarchitecture here, so just reduce register pressure.
51  // Tell ISel that we have a stack pointer.
53  Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
54  // Set up the register classes.
55  addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
56  addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
57  addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
58  addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
59  if (Subtarget->hasSIMD128()) {
60  addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
61  addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
62  addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
63  addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
64  addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
65  addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
66  }
67  // Compute derived properties from the register classes.
69 
75 
76  // Take the default expansion for va_arg, va_copy, and va_end. There is no
77  // default action for va_start, so we do that custom.
82 
83  for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
84  // Don't expand the floating-point types to constant pools.
86  // Expand floating-point comparisons.
87  for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
90  // Expand floating-point library function operators.
91  for (auto Op :
94  // Note supported floating-point library function operators that otherwise
95  // default to expand.
96  for (auto Op :
99  // Support minimum and maximum, which otherwise default to expand.
102  // WebAssembly currently has no builtin f16 support.
107  }
108 
109  // Expand unavailable integer operations.
110  for (auto Op :
114  for (auto T : {MVT::i32, MVT::i64})
116  if (Subtarget->hasSIMD128())
117  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
119  }
120 
121  // SIMD-specific configuration
122  if (Subtarget->hasSIMD128()) {
123  // Hoist bitcasts out of shuffles
125 
126  // Support saturating add for i8x16 and i16x8
127  for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128  for (auto T : {MVT::v16i8, MVT::v8i16})
130 
131  // Support integer abs
132  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
134 
135  // Custom lower BUILD_VECTORs to minimize number of replace_lanes
137  MVT::v2f64})
139 
140  // We have custom shuffle lowering to expose the shuffle mask
142  MVT::v2f64})
144 
145  // Custom lowering since wasm shifts must have a scalar shift amount
146  for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
147  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
149 
150  // Custom lower lane accesses to expand out variable indices
153  MVT::v2f64})
155 
156  // There is no i8x16.mul instruction
158 
159  // There are no vector select instructions
160  for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT})
162  MVT::v2f64})
164 
165  // Expand integer operations supported for scalars but not SIMD
168  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
170 
171  // But we do have integer min and max operations
172  for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
173  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
175 
176  // Expand float operations supported for scalars but not SIMD
180  for (auto T : {MVT::v4f32, MVT::v2f64})
182 
183  // Expand operations not supported for i64x2 vectors
184  for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC)
185  setCondCodeAction(static_cast<ISD::CondCode>(CC), MVT::v2i64, Custom);
186 
187  // 64x2 conversions are not in the spec
188  for (auto Op :
190  for (auto T : {MVT::v2i64, MVT::v2f64})
192  }
193 
194  // As a special case, these operators use the type to mean the type to
195  // sign-extend from.
197  if (!Subtarget->hasSignExt()) {
198  // Sign extends are legal only when extending a vector extract
199  auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
200  for (auto T : {MVT::i8, MVT::i16, MVT::i32})
202  }
205 
206  // Dynamic stack allocation: use the default expansion.
210 
214 
215  // Expand these forms; we pattern-match the forms that we can handle in isel.
216  for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
217  for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
219 
220  // We have custom switch handling.
222 
223  // WebAssembly doesn't have:
224  // - Floating-point extending loads.
225  // - Floating-point truncating stores.
226  // - i1 extending loads.
227  // - truncating SIMD stores and most extending loads
230  for (auto T : MVT::integer_valuetypes())
233  if (Subtarget->hasSIMD128()) {
235  MVT::v2f64}) {
236  for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
237  if (MVT(T) != MemT) {
238  setTruncStoreAction(T, MemT, Expand);
240  setLoadExtAction(Ext, T, MemT, Expand);
241  }
242  }
243  }
244  // But some vector extending loads are legal
245  for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
249  }
250  }
251 
252  // Don't do anything clever with build_pairs
254 
255  // Trap lowers to wasm unreachable
258 
259  // Exception handling intrinsics
262 
264 
265  // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
266  // consistent with the f64 and f128 names.
267  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
268  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
269 
270  // Define the emscripten name for return address helper.
271  // TODO: when implementing other WASM backends, make this generic or only do
272  // this on emscripten depending on what they end up doing.
273  setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
274 
275  // Always convert switches to br_tables unless there is only one case, which
276  // is equivalent to a simple branch. This reduces code size for wasm, and we
277  // defer possible jump table optimizations to the VM.
279 }
280 
282 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
283  // We have wasm instructions for these
284  switch (AI->getOperation()) {
285  case AtomicRMWInst::Add:
286  case AtomicRMWInst::Sub:
287  case AtomicRMWInst::And:
288  case AtomicRMWInst::Or:
289  case AtomicRMWInst::Xor:
290  case AtomicRMWInst::Xchg:
292  default:
293  break;
294  }
296 }
297 
298 FastISel *WebAssemblyTargetLowering::createFastISel(
299  FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
300  return WebAssembly::createFastISel(FuncInfo, LibInfo);
301 }
302 
303 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
304  EVT VT) const {
305  unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
306  if (BitWidth > 1 && BitWidth < 8)
307  BitWidth = 8;
308 
309  if (BitWidth > 64) {
310  // The shift will be lowered to a libcall, and compiler-rt libcalls expect
311  // the count to be an i32.
312  BitWidth = 32;
313  assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
314  "32-bit shift counts ought to be enough for anyone");
315  }
316 
317  MVT Result = MVT::getIntegerVT(BitWidth);
319  "Unable to represent scalar shift amount type");
320  return Result;
321 }
322 
323 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
324 // undefined result on invalid/overflow, to the WebAssembly opcode, which
325 // traps on invalid/overflow.
327  MachineBasicBlock *BB,
328  const TargetInstrInfo &TII,
329  bool IsUnsigned, bool Int64,
330  bool Float64, unsigned LoweredOpcode) {
332 
333  Register OutReg = MI.getOperand(0).getReg();
334  Register InReg = MI.getOperand(1).getReg();
335 
336  unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
337  unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
338  unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
339  unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
340  unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
341  unsigned Eqz = WebAssembly::EQZ_I32;
342  unsigned And = WebAssembly::AND_I32;
343  int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
344  int64_t Substitute = IsUnsigned ? 0 : Limit;
345  double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
346  auto &Context = BB->getParent()->getFunction().getContext();
348 
349  const BasicBlock *LLVMBB = BB->getBasicBlock();
350  MachineFunction *F = BB->getParent();
351  MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
352  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
353  MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
354 
356  F->insert(It, FalseMBB);
357  F->insert(It, TrueMBB);
358  F->insert(It, DoneMBB);
359 
360  // Transfer the remainder of BB and its successor edges to DoneMBB.
361  DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
362  DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
363 
364  BB->addSuccessor(TrueMBB);
365  BB->addSuccessor(FalseMBB);
366  TrueMBB->addSuccessor(DoneMBB);
367  FalseMBB->addSuccessor(DoneMBB);
368 
369  unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
370  Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
371  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
372  CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
373  EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
374  FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
375  TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
376 
377  MI.eraseFromParent();
378  // For signed numbers, we can do a single comparison to determine whether
379  // fabs(x) is within range.
380  if (IsUnsigned) {
381  Tmp0 = InReg;
382  } else {
383  BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
384  }
385  BuildMI(BB, DL, TII.get(FConst), Tmp1)
386  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
387  BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
388 
389  // For unsigned numbers, we have to do a separate comparison with zero.
390  if (IsUnsigned) {
391  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
392  Register SecondCmpReg =
393  MRI.createVirtualRegister(&WebAssembly::I32RegClass);
394  Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
395  BuildMI(BB, DL, TII.get(FConst), Tmp1)
396  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
397  BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
398  BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
399  CmpReg = AndReg;
400  }
401 
402  BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
403 
404  // Create the CFG diamond to select between doing the conversion or using
405  // the substitute value.
406  BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
407  BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
408  BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
409  BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
410  BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
411  .addReg(FalseReg)
412  .addMBB(FalseMBB)
413  .addReg(TrueReg)
414  .addMBB(TrueMBB);
415 
416  return DoneMBB;
417 }
418 
421  const TargetInstrInfo &TII) {
422  MachineInstr &CallParams = *CallResults.getPrevNode();
423  assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
424  assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
425  CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
426 
427  bool IsIndirect = CallParams.getOperand(0).isReg();
428  bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
429 
430  unsigned CallOp;
431  if (IsIndirect && IsRetCall) {
432  CallOp = WebAssembly::RET_CALL_INDIRECT;
433  } else if (IsIndirect) {
434  CallOp = WebAssembly::CALL_INDIRECT;
435  } else if (IsRetCall) {
436  CallOp = WebAssembly::RET_CALL;
437  } else {
438  CallOp = WebAssembly::CALL;
439  }
440 
441  MachineFunction &MF = *BB->getParent();
442  const MCInstrDesc &MCID = TII.get(CallOp);
443  MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
444 
445  // Move the function pointer to the end of the arguments for indirect calls
446  if (IsIndirect) {
447  auto FnPtr = CallParams.getOperand(0);
448  CallParams.RemoveOperand(0);
449  CallParams.addOperand(FnPtr);
450  }
451 
452  for (auto Def : CallResults.defs())
453  MIB.add(Def);
454 
455  // Add placeholders for the type index and immediate flags
456  if (IsIndirect) {
457  MIB.addImm(0);
458  MIB.addImm(0);
459  }
460 
461  for (auto Use : CallParams.uses())
462  MIB.add(Use);
463 
464  BB->insert(CallResults.getIterator(), MIB);
465  CallParams.eraseFromParent();
466  CallResults.eraseFromParent();
467 
468  return BB;
469 }
470 
471 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
472  MachineInstr &MI, MachineBasicBlock *BB) const {
473  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
474  DebugLoc DL = MI.getDebugLoc();
475 
476  switch (MI.getOpcode()) {
477  default:
478  llvm_unreachable("Unexpected instr type to insert");
479  case WebAssembly::FP_TO_SINT_I32_F32:
480  return LowerFPToInt(MI, DL, BB, TII, false, false, false,
481  WebAssembly::I32_TRUNC_S_F32);
482  case WebAssembly::FP_TO_UINT_I32_F32:
483  return LowerFPToInt(MI, DL, BB, TII, true, false, false,
484  WebAssembly::I32_TRUNC_U_F32);
485  case WebAssembly::FP_TO_SINT_I64_F32:
486  return LowerFPToInt(MI, DL, BB, TII, false, true, false,
487  WebAssembly::I64_TRUNC_S_F32);
488  case WebAssembly::FP_TO_UINT_I64_F32:
489  return LowerFPToInt(MI, DL, BB, TII, true, true, false,
490  WebAssembly::I64_TRUNC_U_F32);
491  case WebAssembly::FP_TO_SINT_I32_F64:
492  return LowerFPToInt(MI, DL, BB, TII, false, false, true,
493  WebAssembly::I32_TRUNC_S_F64);
494  case WebAssembly::FP_TO_UINT_I32_F64:
495  return LowerFPToInt(MI, DL, BB, TII, true, false, true,
496  WebAssembly::I32_TRUNC_U_F64);
497  case WebAssembly::FP_TO_SINT_I64_F64:
498  return LowerFPToInt(MI, DL, BB, TII, false, true, true,
499  WebAssembly::I64_TRUNC_S_F64);
500  case WebAssembly::FP_TO_UINT_I64_F64:
501  return LowerFPToInt(MI, DL, BB, TII, true, true, true,
502  WebAssembly::I64_TRUNC_U_F64);
503  case WebAssembly::CALL_RESULTS:
504  case WebAssembly::RET_CALL_RESULTS:
505  return LowerCallResults(MI, DL, BB, TII);
506  }
507 }
508 
509 const char *
510 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
511  switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
514  break;
515 #define HANDLE_NODETYPE(NODE) \
516  case WebAssemblyISD::NODE: \
517  return "WebAssemblyISD::" #NODE;
518 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
519 #include "WebAssemblyISD.def"
520 #undef HANDLE_MEM_NODETYPE
521 #undef HANDLE_NODETYPE
522  }
523  return nullptr;
524 }
525 
526 std::pair<unsigned, const TargetRegisterClass *>
527 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
528  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
529  // First, see if this is a constraint that directly corresponds to a
530  // WebAssembly register class.
531  if (Constraint.size() == 1) {
532  switch (Constraint[0]) {
533  case 'r':
534  assert(VT != MVT::iPTR && "Pointer MVT not expected here");
535  if (Subtarget->hasSIMD128() && VT.isVector()) {
536  if (VT.getSizeInBits() == 128)
537  return std::make_pair(0U, &WebAssembly::V128RegClass);
538  }
539  if (VT.isInteger() && !VT.isVector()) {
540  if (VT.getSizeInBits() <= 32)
541  return std::make_pair(0U, &WebAssembly::I32RegClass);
542  if (VT.getSizeInBits() <= 64)
543  return std::make_pair(0U, &WebAssembly::I64RegClass);
544  }
545  break;
546  default:
547  break;
548  }
549  }
550 
551  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
552 }
553 
554 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
555  // Assume ctz is a relatively cheap operation.
556  return true;
557 }
558 
559 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
560  // Assume clz is a relatively cheap operation.
561  return true;
562 }
563 
564 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
565  const AddrMode &AM,
566  Type *Ty, unsigned AS,
567  Instruction *I) const {
568  // WebAssembly offsets are added as unsigned without wrapping. The
569  // isLegalAddressingMode gives us no way to determine if wrapping could be
570  // happening, so we approximate this by accepting only non-negative offsets.
571  if (AM.BaseOffs < 0)
572  return false;
573 
574  // WebAssembly has no scale register operands.
575  if (AM.Scale != 0)
576  return false;
577 
578  // Everything else is legal.
579  return true;
580 }
581 
582 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
583  EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
584  MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
585  // WebAssembly supports unaligned accesses, though it should be declared
586  // with the p2align attribute on loads and stores which do so, and there
587  // may be a performance impact. We tell LLVM they're "fast" because
588  // for the kinds of things that LLVM uses this for (merging adjacent stores
589  // of constants, etc.), WebAssembly implementations will either want the
590  // unaligned access or they'll split anyway.
591  if (Fast)
592  *Fast = true;
593  return true;
594 }
595 
596 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
597  AttributeList Attr) const {
598  // The current thinking is that wasm engines will perform this optimization,
599  // so we can save on code size.
600  return true;
601 }
602 
603 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
604  MVT ExtT = ExtVal.getSimpleValueType();
605  MVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getSimpleValueType(0);
606  return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
607  (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
608  (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
609 }
610 
611 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
612  LLVMContext &C,
613  EVT VT) const {
614  if (VT.isVector())
616 
617  // So far, all branch instructions in Wasm take an I32 condition.
618  // The default TargetLowering::getSetCCResultType returns the pointer size,
619  // which would be useful to reduce instruction counts when testing
620  // against 64-bit pointers/values if at some point Wasm supports that.
621  return EVT::getIntegerVT(C, 32);
622 }
623 
624 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
625  const CallInst &I,
626  MachineFunction &MF,
627  unsigned Intrinsic) const {
628  switch (Intrinsic) {
629  case Intrinsic::wasm_atomic_notify:
631  Info.memVT = MVT::i32;
632  Info.ptrVal = I.getArgOperand(0);
633  Info.offset = 0;
634  Info.align = Align(4);
635  // atomic.notify instruction does not really load the memory specified with
636  // this argument, but MachineMemOperand should either be load or store, so
637  // we set this to a load.
638  // FIXME Volatile isn't really correct, but currently all LLVM atomic
639  // instructions are treated as volatiles in the backend, so we should be
640  // consistent. The same applies for wasm_atomic_wait intrinsics too.
642  return true;
643  case Intrinsic::wasm_atomic_wait_i32:
645  Info.memVT = MVT::i32;
646  Info.ptrVal = I.getArgOperand(0);
647  Info.offset = 0;
648  Info.align = Align(4);
650  return true;
651  case Intrinsic::wasm_atomic_wait_i64:
653  Info.memVT = MVT::i64;
654  Info.ptrVal = I.getArgOperand(0);
655  Info.offset = 0;
656  Info.align = Align(8);
658  return true;
659  default:
660  return false;
661  }
662 }
663 
664 //===----------------------------------------------------------------------===//
665 // WebAssembly Lowering private implementation.
666 //===----------------------------------------------------------------------===//
667 
668 //===----------------------------------------------------------------------===//
669 // Lowering Code
670 //===----------------------------------------------------------------------===//
671 
672 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
674  DAG.getContext()->diagnose(
676 }
677 
678 // Test whether the given calling convention is supported.
679 static bool callingConvSupported(CallingConv::ID CallConv) {
680  // We currently support the language-independent target-independent
681  // conventions. We don't yet have a way to annotate calls with properties like
682  // "cold", and we don't have any call-clobbered registers, so these are mostly
683  // all handled the same.
684  return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
685  CallConv == CallingConv::Cold ||
686  CallConv == CallingConv::PreserveMost ||
687  CallConv == CallingConv::PreserveAll ||
688  CallConv == CallingConv::CXX_FAST_TLS ||
690  CallConv == CallingConv::Swift;
691 }
692 
693 SDValue
694 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
695  SmallVectorImpl<SDValue> &InVals) const {
696  SelectionDAG &DAG = CLI.DAG;
697  SDLoc DL = CLI.DL;
698  SDValue Chain = CLI.Chain;
699  SDValue Callee = CLI.Callee;
701  auto Layout = MF.getDataLayout();
702 
703  CallingConv::ID CallConv = CLI.CallConv;
704  if (!callingConvSupported(CallConv))
705  fail(DL, DAG,
706  "WebAssembly doesn't support language-specific or target-specific "
707  "calling conventions yet");
708  if (CLI.IsPatchPoint)
709  fail(DL, DAG, "WebAssembly doesn't support patch point yet");
710 
711  if (CLI.IsTailCall) {
712  auto NoTail = [&](const char *Msg) {
713  if (CLI.CB && CLI.CB->isMustTailCall())
714  fail(DL, DAG, Msg);
715  CLI.IsTailCall = false;
716  };
717 
718  if (!Subtarget->hasTailCall())
719  NoTail("WebAssembly 'tail-call' feature not enabled");
720 
721  // Varargs calls cannot be tail calls because the buffer is on the stack
722  if (CLI.IsVarArg)
723  NoTail("WebAssembly does not support varargs tail calls");
724 
725  // Do not tail call unless caller and callee return types match
726  const Function &F = MF.getFunction();
727  const TargetMachine &TM = getTargetMachine();
728  Type *RetTy = F.getReturnType();
729  SmallVector<MVT, 4> CallerRetTys;
730  SmallVector<MVT, 4> CalleeRetTys;
731  computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
732  computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
733  bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
734  std::equal(CallerRetTys.begin(), CallerRetTys.end(),
735  CalleeRetTys.begin());
736  if (!TypesMatch)
737  NoTail("WebAssembly tail call requires caller and callee return types to "
738  "match");
739 
740  // If pointers to local stack values are passed, we cannot tail call
741  if (CLI.CB) {
742  for (auto &Arg : CLI.CB->args()) {
743  Value *Val = Arg.get();
744  // Trace the value back through pointer operations
745  while (true) {
746  Value *Src = Val->stripPointerCastsAndAliases();
747  if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
748  Src = GEP->getPointerOperand();
749  if (Val == Src)
750  break;
751  Val = Src;
752  }
753  if (isa<AllocaInst>(Val)) {
754  NoTail(
755  "WebAssembly does not support tail calling with stack arguments");
756  break;
757  }
758  }
759  }
760  }
761 
763  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
764  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
765 
766  // The generic code may have added an sret argument. If we're lowering an
767  // invoke function, the ABI requires that the function pointer be the first
768  // argument, so we may have to swap the arguments.
769  if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
770  Outs[0].Flags.isSRet()) {
771  std::swap(Outs[0], Outs[1]);
772  std::swap(OutVals[0], OutVals[1]);
773  }
774 
775  bool HasSwiftSelfArg = false;
776  bool HasSwiftErrorArg = false;
777  unsigned NumFixedArgs = 0;
778  for (unsigned I = 0; I < Outs.size(); ++I) {
779  const ISD::OutputArg &Out = Outs[I];
780  SDValue &OutVal = OutVals[I];
781  HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
782  HasSwiftErrorArg |= Out.Flags.isSwiftError();
783  if (Out.Flags.isNest())
784  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
785  if (Out.Flags.isInAlloca())
786  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
787  if (Out.Flags.isInConsecutiveRegs())
788  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
789  if (Out.Flags.isInConsecutiveRegsLast())
790  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
791  if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
792  auto &MFI = MF.getFrameInfo();
793  int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
795  /*isSS=*/false);
796  SDValue SizeNode =
798  SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
799  Chain = DAG.getMemcpy(
800  Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
801  /*isVolatile*/ false, /*AlwaysInline=*/false,
802  /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
803  OutVal = FINode;
804  }
805  // Count the number of fixed args *after* legalization.
806  NumFixedArgs += Out.IsFixed;
807  }
808 
809  bool IsVarArg = CLI.IsVarArg;
810  auto PtrVT = getPointerTy(Layout);
811 
812  // For swiftcc, emit additional swiftself and swifterror arguments
813  // if there aren't. These additional arguments are also added for callee
814  // signature They are necessary to match callee and caller signature for
815  // indirect call.
816  if (CallConv == CallingConv::Swift) {
817  if (!HasSwiftSelfArg) {
818  NumFixedArgs++;
820  Arg.Flags.setSwiftSelf();
821  CLI.Outs.push_back(Arg);
822  SDValue ArgVal = DAG.getUNDEF(PtrVT);
823  CLI.OutVals.push_back(ArgVal);
824  }
825  if (!HasSwiftErrorArg) {
826  NumFixedArgs++;
828  Arg.Flags.setSwiftError();
829  CLI.Outs.push_back(Arg);
830  SDValue ArgVal = DAG.getUNDEF(PtrVT);
831  CLI.OutVals.push_back(ArgVal);
832  }
833  }
834 
835  // Analyze operands of the call, assigning locations to each operand.
837  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
838 
839  if (IsVarArg) {
840  // Outgoing non-fixed arguments are placed in a buffer. First
841  // compute their offsets and the total amount of buffer space needed.
842  for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
843  const ISD::OutputArg &Out = Outs[I];
844  SDValue &Arg = OutVals[I];
845  EVT VT = Arg.getValueType();
846  assert(VT != MVT::iPTR && "Legalized args should be concrete");
847  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
848  Align Alignment =
849  std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
850  unsigned Offset =
851  CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
852  CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
853  Offset, VT.getSimpleVT(),
855  }
856  }
857 
858  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
859 
860  SDValue FINode;
861  if (IsVarArg && NumBytes) {
862  // For non-fixed arguments, next emit stores to store the argument values
863  // to the stack buffer at the offsets computed above.
864  int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
865  Layout.getStackAlignment(),
866  /*isSS=*/false);
867  unsigned ValNo = 0;
869  for (SDValue Arg :
870  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
871  assert(ArgLocs[ValNo].getValNo() == ValNo &&
872  "ArgLocs should remain in order and only hold varargs args");
873  unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
874  FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
875  SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
876  DAG.getConstant(Offset, DL, PtrVT));
877  Chains.push_back(
878  DAG.getStore(Chain, DL, Arg, Add,
879  MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
880  }
881  if (!Chains.empty())
882  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
883  } else if (IsVarArg) {
884  FINode = DAG.getIntPtrConstant(0, DL);
885  }
886 
887  if (Callee->getOpcode() == ISD::GlobalAddress) {
888  // If the callee is a GlobalAddress node (quite common, every direct call
889  // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
890  // doesn't at MO_GOT which is not needed for direct calls.
891  GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
892  Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
894  GA->getOffset());
895  Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
896  getPointerTy(DAG.getDataLayout()), Callee);
897  }
898 
899  // Compute the operands for the CALLn node.
901  Ops.push_back(Chain);
902  Ops.push_back(Callee);
903 
904  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
905  // isn't reliable.
906  Ops.append(OutVals.begin(),
907  IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
908  // Add a pointer to the vararg buffer.
909  if (IsVarArg)
910  Ops.push_back(FINode);
911 
912  SmallVector<EVT, 8> InTys;
913  for (const auto &In : Ins) {
914  assert(!In.Flags.isByVal() && "byval is not valid for return values");
915  assert(!In.Flags.isNest() && "nest is not valid for return values");
916  if (In.Flags.isInAlloca())
917  fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
918  if (In.Flags.isInConsecutiveRegs())
919  fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
920  if (In.Flags.isInConsecutiveRegsLast())
921  fail(DL, DAG,
922  "WebAssembly hasn't implemented cons regs last return values");
923  // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
924  // registers.
925  InTys.push_back(In.VT);
926  }
927 
928  if (CLI.IsTailCall) {
929  // ret_calls do not return values to the current frame
930  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
931  return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
932  }
933 
934  InTys.push_back(MVT::Other);
935  SDVTList InTyList = DAG.getVTList(InTys);
936  SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
937 
938  for (size_t I = 0; I < Ins.size(); ++I)
939  InVals.push_back(Res.getValue(I));
940 
941  // Return the chain
942  return Res.getValue(Ins.size());
943 }
944 
945 bool WebAssemblyTargetLowering::CanLowerReturn(
946  CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
948  LLVMContext & /*Context*/) const {
949  // WebAssembly can only handle returning tuples with multivalue enabled
950  return Subtarget->hasMultivalue() || Outs.size() <= 1;
951 }
952 
953 SDValue WebAssemblyTargetLowering::LowerReturn(
954  SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
956  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
957  SelectionDAG &DAG) const {
958  assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
959  "MVP WebAssembly can only return up to one value");
960  if (!callingConvSupported(CallConv))
961  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
962 
963  SmallVector<SDValue, 4> RetOps(1, Chain);
964  RetOps.append(OutVals.begin(), OutVals.end());
965  Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
966 
967  // Record the number and types of the return values.
968  for (const ISD::OutputArg &Out : Outs) {
969  assert(!Out.Flags.isByVal() && "byval is not valid for return values");
970  assert(!Out.Flags.isNest() && "nest is not valid for return values");
971  assert(Out.IsFixed && "non-fixed return value is not valid");
972  if (Out.Flags.isInAlloca())
973  fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
974  if (Out.Flags.isInConsecutiveRegs())
975  fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
976  if (Out.Flags.isInConsecutiveRegsLast())
977  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
978  }
979 
980  return Chain;
981 }
982 
983 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
984  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
985  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
986  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
987  if (!callingConvSupported(CallConv))
988  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
989 
991  auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
992 
993  // Set up the incoming ARGUMENTS value, which serves to represent the liveness
994  // of the incoming values before they're represented by virtual registers.
995  MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
996 
997  bool HasSwiftErrorArg = false;
998  bool HasSwiftSelfArg = false;
999  for (const ISD::InputArg &In : Ins) {
1000  HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1001  HasSwiftErrorArg |= In.Flags.isSwiftError();
1002  if (In.Flags.isInAlloca())
1003  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1004  if (In.Flags.isNest())
1005  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1006  if (In.Flags.isInConsecutiveRegs())
1007  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1008  if (In.Flags.isInConsecutiveRegsLast())
1009  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1010  // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1011  // registers.
1012  InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1013  DAG.getTargetConstant(InVals.size(),
1014  DL, MVT::i32))
1015  : DAG.getUNDEF(In.VT));
1016 
1017  // Record the number and types of arguments.
1018  MFI->addParam(In.VT);
1019  }
1020 
1021  // For swiftcc, emit additional swiftself and swifterror arguments
1022  // if there aren't. These additional arguments are also added for callee
1023  // signature They are necessary to match callee and caller signature for
1024  // indirect call.
1025  auto PtrVT = getPointerTy(MF.getDataLayout());
1026  if (CallConv == CallingConv::Swift) {
1027  if (!HasSwiftSelfArg) {
1028  MFI->addParam(PtrVT);
1029  }
1030  if (!HasSwiftErrorArg) {
1031  MFI->addParam(PtrVT);
1032  }
1033  }
1034  // Varargs are copied into a buffer allocated by the caller, and a pointer to
1035  // the buffer is passed as an argument.
1036  if (IsVarArg) {
1037  MVT PtrVT = getPointerTy(MF.getDataLayout());
1038  Register VarargVreg =
1040  MFI->setVarargBufferVreg(VarargVreg);
1041  Chain = DAG.getCopyToReg(
1042  Chain, DL, VarargVreg,
1043  DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1044  DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1045  MFI->addParam(PtrVT);
1046  }
1047 
1048  // Record the number and types of arguments and results.
1049  SmallVector<MVT, 4> Params;
1052  MF.getFunction(), DAG.getTarget(), Params, Results);
1053  for (MVT VT : Results)
1054  MFI->addResult(VT);
1055  // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1056  // the param logic here with ComputeSignatureVTs
1057  assert(MFI->getParams().size() == Params.size() &&
1058  std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1059  Params.begin()));
1060 
1061  return Chain;
1062 }
1063 
1064 void WebAssemblyTargetLowering::ReplaceNodeResults(
1066  switch (N->getOpcode()) {
1068  // Do not add any results, signifying that N should not be custom lowered
1069  // after all. This happens because simd128 turns on custom lowering for
1070  // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1071  // illegal type.
1072  break;
1073  default:
1075  "ReplaceNodeResults not implemented for this op for WebAssembly!");
1076  }
1077 }
1078 
1079 //===----------------------------------------------------------------------===//
1080 // Custom lowering hooks.
1081 //===----------------------------------------------------------------------===//
1082 
1083 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1084  SelectionDAG &DAG) const {
1085  SDLoc DL(Op);
1086  switch (Op.getOpcode()) {
1087  default:
1088  llvm_unreachable("unimplemented operation lowering");
1089  return SDValue();
1090  case ISD::FrameIndex:
1091  return LowerFrameIndex(Op, DAG);
1092  case ISD::GlobalAddress:
1093  return LowerGlobalAddress(Op, DAG);
1094  case ISD::ExternalSymbol:
1095  return LowerExternalSymbol(Op, DAG);
1096  case ISD::JumpTable:
1097  return LowerJumpTable(Op, DAG);
1098  case ISD::BR_JT:
1099  return LowerBR_JT(Op, DAG);
1100  case ISD::VASTART:
1101  return LowerVASTART(Op, DAG);
1102  case ISD::BlockAddress:
1103  case ISD::BRIND:
1104  fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1105  return SDValue();
1106  case ISD::RETURNADDR:
1107  return LowerRETURNADDR(Op, DAG);
1108  case ISD::FRAMEADDR:
1109  return LowerFRAMEADDR(Op, DAG);
1110  case ISD::CopyToReg:
1111  return LowerCopyToReg(Op, DAG);
1114  return LowerAccessVectorElement(Op, DAG);
1115  case ISD::INTRINSIC_VOID:
1118  return LowerIntrinsic(Op, DAG);
1120  return LowerSIGN_EXTEND_INREG(Op, DAG);
1121  case ISD::BUILD_VECTOR:
1122  return LowerBUILD_VECTOR(Op, DAG);
1123  case ISD::VECTOR_SHUFFLE:
1124  return LowerVECTOR_SHUFFLE(Op, DAG);
1125  case ISD::SETCC:
1126  return LowerSETCC(Op, DAG);
1127  case ISD::SHL:
1128  case ISD::SRA:
1129  case ISD::SRL:
1130  return LowerShift(Op, DAG);
1131  }
1132 }
1133 
1134 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1135  SelectionDAG &DAG) const {
1136  SDValue Src = Op.getOperand(2);
1137  if (isa<FrameIndexSDNode>(Src.getNode())) {
1138  // CopyToReg nodes don't support FrameIndex operands. Other targets select
1139  // the FI to some LEA-like instruction, but since we don't have that, we
1140  // need to insert some kind of instruction that can take an FI operand and
1141  // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1142  // local.copy between Op and its FI operand.
1143  SDValue Chain = Op.getOperand(0);
1144  SDLoc DL(Op);
1145  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1146  EVT VT = Src.getValueType();
1147  SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1148  : WebAssembly::COPY_I64,
1149  DL, VT, Src),
1150  0);
1151  return Op.getNode()->getNumValues() == 1
1152  ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1153  : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1154  Op.getNumOperands() == 4 ? Op.getOperand(3)
1155  : SDValue());
1156  }
1157  return SDValue();
1158 }
1159 
1160 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1161  SelectionDAG &DAG) const {
1162  int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1163  return DAG.getTargetFrameIndex(FI, Op.getValueType());
1164 }
1165 
1166 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1167  SelectionDAG &DAG) const {
1168  SDLoc DL(Op);
1169 
1170  if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1171  fail(DL, DAG,
1172  "Non-Emscripten WebAssembly hasn't implemented "
1173  "__builtin_return_address");
1174  return SDValue();
1175  }
1176 
1178  return SDValue();
1179 
1180  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1181  MakeLibCallOptions CallOptions;
1182  return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1183  {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1184  .first;
1185 }
1186 
1187 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1188  SelectionDAG &DAG) const {
1189  // Non-zero depths are not supported by WebAssembly currently. Use the
1190  // legalizer's default expansion, which is to return 0 (what this function is
1191  // documented to do).
1192  if (Op.getConstantOperandVal(0) > 0)
1193  return SDValue();
1194 
1196  EVT VT = Op.getValueType();
1197  Register FP =
1199  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1200 }
1201 
1202 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1203  SelectionDAG &DAG) const {
1204  SDLoc DL(Op);
1205  const auto *GA = cast<GlobalAddressSDNode>(Op);
1206  EVT VT = Op.getValueType();
1207  assert(GA->getTargetFlags() == 0 &&
1208  "Unexpected target flags on generic GlobalAddressSDNode");
1209  if (GA->getAddressSpace() != 0)
1210  fail(DL, DAG, "WebAssembly only expects the 0 address space");
1211 
1212  unsigned OperandFlags = 0;
1213  if (isPositionIndependent()) {
1214  const GlobalValue *GV = GA->getGlobal();
1215  if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1216  MachineFunction &MF = DAG.getMachineFunction();
1217  MVT PtrVT = getPointerTy(MF.getDataLayout());
1218  const char *BaseName;
1219  if (GV->getValueType()->isFunctionTy()) {
1220  BaseName = MF.createExternalSymbolName("__table_base");
1221  OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1222  }
1223  else {
1224  BaseName = MF.createExternalSymbolName("__memory_base");
1225  OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1226  }
1227  SDValue BaseAddr =
1228  DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1229  DAG.getTargetExternalSymbol(BaseName, PtrVT));
1230 
1231  SDValue SymAddr = DAG.getNode(
1233  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1234  OperandFlags));
1235 
1236  return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1237  } else {
1238  OperandFlags = WebAssemblyII::MO_GOT;
1239  }
1240  }
1241 
1242  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1243  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1244  GA->getOffset(), OperandFlags));
1245 }
1246 
1247 SDValue
1248 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1249  SelectionDAG &DAG) const {
1250  SDLoc DL(Op);
1251  const auto *ES = cast<ExternalSymbolSDNode>(Op);
1252  EVT VT = Op.getValueType();
1253  assert(ES->getTargetFlags() == 0 &&
1254  "Unexpected target flags on generic ExternalSymbolSDNode");
1255  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1256  DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1257 }
1258 
1259 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1260  SelectionDAG &DAG) const {
1261  // There's no need for a Wrapper node because we always incorporate a jump
1262  // table operand into a BR_TABLE instruction, rather than ever
1263  // materializing it in a register.
1264  const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1265  return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1266  JT->getTargetFlags());
1267 }
1268 
1269 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1270  SelectionDAG &DAG) const {
1271  SDLoc DL(Op);
1272  SDValue Chain = Op.getOperand(0);
1273  const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1274  SDValue Index = Op.getOperand(2);
1275  assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1276 
1278  Ops.push_back(Chain);
1279  Ops.push_back(Index);
1280 
1282  const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1283 
1284  // Add an operand for each case.
1285  for (auto MBB : MBBs)
1286  Ops.push_back(DAG.getBasicBlock(MBB));
1287 
1288  // Add the first MBB as a dummy default target for now. This will be replaced
1289  // with the proper default target (and the preceding range check eliminated)
1290  // if possible by WebAssemblyFixBrTableDefaults.
1291  Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1292  return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1293 }
1294 
1295 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1296  SelectionDAG &DAG) const {
1297  SDLoc DL(Op);
1299 
1300  auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1301  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1302 
1303  SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1304  MFI->getVarargBufferVreg(), PtrVT);
1305  return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1306  MachinePointerInfo(SV), 0);
1307 }
1308 
1309 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1310  SelectionDAG &DAG) const {
1311  MachineFunction &MF = DAG.getMachineFunction();
1312  unsigned IntNo;
1313  switch (Op.getOpcode()) {
1314  case ISD::INTRINSIC_VOID:
1316  IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1317  break;
1319  IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1320  break;
1321  default:
1322  llvm_unreachable("Invalid intrinsic");
1323  }
1324  SDLoc DL(Op);
1325 
1326  switch (IntNo) {
1327  default:
1328  return SDValue(); // Don't custom lower most intrinsics.
1329 
1330  case Intrinsic::wasm_lsda: {
1331  EVT VT = Op.getValueType();
1332  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1333  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1334  auto &Context = MF.getMMI().getContext();
1335  MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1336  Twine(MF.getFunctionNumber()));
1337  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1338  DAG.getMCSymbol(S, PtrVT));
1339  }
1340 
1341  case Intrinsic::wasm_throw: {
1342  // We only support C++ exceptions for now
1343  int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1344  if (Tag != CPP_EXCEPTION)
1345  llvm_unreachable("Invalid tag!");
1346  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1347  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1348  const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1349  SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1350  DAG.getTargetExternalSymbol(SymName, PtrVT));
1351  return DAG.getNode(WebAssemblyISD::THROW, DL,
1352  MVT::Other, // outchain type
1353  {
1354  Op.getOperand(0), // inchain
1355  SymNode, // exception symbol
1356  Op.getOperand(3) // thrown value
1357  });
1358  }
1359 
1360  case Intrinsic::wasm_shuffle: {
1361  // Drop in-chain and replace undefs, but otherwise pass through unchanged
1362  SDValue Ops[18];
1363  size_t OpIdx = 0;
1364  Ops[OpIdx++] = Op.getOperand(1);
1365  Ops[OpIdx++] = Op.getOperand(2);
1366  while (OpIdx < 18) {
1367  const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1368  if (MaskIdx.isUndef() ||
1369  cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1370  Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1371  } else {
1372  Ops[OpIdx++] = MaskIdx;
1373  }
1374  }
1375  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1376  }
1377  }
1378 }
1379 
1380 SDValue
1381 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1382  SelectionDAG &DAG) const {
1383  SDLoc DL(Op);
1384  // If sign extension operations are disabled, allow sext_inreg only if operand
1385  // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1386  // extension operations, but allowing sext_inreg in this context lets us have
1387  // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1388  // everywhere would be simpler in this file, but would necessitate large and
1389  // brittle patterns to undo the expansion and select extract_lane_s
1390  // instructions.
1391  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1393  return SDValue();
1394 
1395  const SDValue &Extract = Op.getOperand(0);
1396  MVT VecT = Extract.getOperand(0).getSimpleValueType();
1397  if (VecT.getVectorElementType().getSizeInBits() > 32)
1398  return SDValue();
1399  MVT ExtractedLaneT =
1400  cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1401  MVT ExtractedVecT =
1402  MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1403  if (ExtractedVecT == VecT)
1404  return Op;
1405 
1406  // Bitcast vector to appropriate type to ensure ISel pattern coverage
1407  const SDNode *Index = Extract.getOperand(1).getNode();
1408  if (!isa<ConstantSDNode>(Index))
1409  return SDValue();
1410  unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1411  unsigned Scale =
1412  ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1413  assert(Scale > 1);
1414  SDValue NewIndex =
1415  DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1416  SDValue NewExtract = DAG.getNode(
1417  ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1418  DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1419  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1420  Op.getOperand(1));
1421 }
1422 
1423 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1424  SelectionDAG &DAG) const {
1425  SDLoc DL(Op);
1426  const EVT VecT = Op.getValueType();
1427  const EVT LaneT = Op.getOperand(0).getValueType();
1428  const size_t Lanes = Op.getNumOperands();
1429  bool CanSwizzle = VecT == MVT::v16i8;
1430 
1431  // BUILD_VECTORs are lowered to the instruction that initializes the highest
1432  // possible number of lanes at once followed by a sequence of replace_lane
1433  // instructions to individually initialize any remaining lanes.
1434 
1435  // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1436  // swizzled lanes should be given greater weight.
1437 
1438  // TODO: Investigate building vectors by shuffling together vectors built by
1439  // separately specialized means.
1440 
1441  auto IsConstant = [](const SDValue &V) {
1442  return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1443  };
1444 
1445  // Returns the source vector and index vector pair if they exist. Checks for:
1446  // (extract_vector_elt
1447  // $src,
1448  // (sign_extend_inreg (extract_vector_elt $indices, $i))
1449  // )
1450  auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1451  auto Bail = std::make_pair(SDValue(), SDValue());
1452  if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1453  return Bail;
1454  const SDValue &SwizzleSrc = Lane->getOperand(0);
1455  const SDValue &IndexExt = Lane->getOperand(1);
1456  if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1457  return Bail;
1458  const SDValue &Index = IndexExt->getOperand(0);
1459  if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1460  return Bail;
1461  const SDValue &SwizzleIndices = Index->getOperand(0);
1462  if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1463  SwizzleIndices.getValueType() != MVT::v16i8 ||
1464  Index->getOperand(1)->getOpcode() != ISD::Constant ||
1465  Index->getConstantOperandVal(1) != I)
1466  return Bail;
1467  return std::make_pair(SwizzleSrc, SwizzleIndices);
1468  };
1469 
1470  using ValueEntry = std::pair<SDValue, size_t>;
1471  SmallVector<ValueEntry, 16> SplatValueCounts;
1472 
1473  using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1474  SmallVector<SwizzleEntry, 16> SwizzleCounts;
1475 
1476  auto AddCount = [](auto &Counts, const auto &Val) {
1477  auto CountIt = std::find_if(Counts.begin(), Counts.end(),
1478  [&Val](auto E) { return E.first == Val; });
1479  if (CountIt == Counts.end()) {
1480  Counts.emplace_back(Val, 1);
1481  } else {
1482  CountIt->second++;
1483  }
1484  };
1485 
1486  auto GetMostCommon = [](auto &Counts) {
1487  auto CommonIt =
1488  std::max_element(Counts.begin(), Counts.end(),
1489  [](auto A, auto B) { return A.second < B.second; });
1490  assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
1491  return *CommonIt;
1492  };
1493 
1494  size_t NumConstantLanes = 0;
1495 
1496  // Count eligible lanes for each type of vector creation op
1497  for (size_t I = 0; I < Lanes; ++I) {
1498  const SDValue &Lane = Op->getOperand(I);
1499  if (Lane.isUndef())
1500  continue;
1501 
1502  AddCount(SplatValueCounts, Lane);
1503 
1504  if (IsConstant(Lane)) {
1505  NumConstantLanes++;
1506  } else if (CanSwizzle) {
1507  auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1508  if (SwizzleSrcs.first)
1509  AddCount(SwizzleCounts, SwizzleSrcs);
1510  }
1511  }
1512 
1513  SDValue SplatValue;
1514  size_t NumSplatLanes;
1515  std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1516 
1517  SDValue SwizzleSrc;
1518  SDValue SwizzleIndices;
1519  size_t NumSwizzleLanes = 0;
1520  if (SwizzleCounts.size())
1521  std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1522  NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1523 
1524  // Predicate returning true if the lane is properly initialized by the
1525  // original instruction
1526  std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1527  SDValue Result;
1528  // Prefer swizzles over vector consts over splats
1529  if (NumSwizzleLanes >= NumSplatLanes &&
1530  (!Subtarget->hasUnimplementedSIMD128() ||
1531  NumSwizzleLanes >= NumConstantLanes)) {
1532  Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1533  SwizzleIndices);
1534  auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1535  IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1536  return Swizzled == GetSwizzleSrcs(I, Lane);
1537  };
1538  } else if (NumConstantLanes >= NumSplatLanes &&
1539  Subtarget->hasUnimplementedSIMD128()) {
1540  SmallVector<SDValue, 16> ConstLanes;
1541  for (const SDValue &Lane : Op->op_values()) {
1542  if (IsConstant(Lane)) {
1543  ConstLanes.push_back(Lane);
1544  } else if (LaneT.isFloatingPoint()) {
1545  ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1546  } else {
1547  ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1548  }
1549  }
1550  Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1551  IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1552  return IsConstant(Lane);
1553  };
1554  }
1555  if (!Result) {
1556  // Use a splat, but possibly a load_splat
1557  LoadSDNode *SplattedLoad;
1558  if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
1559  SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1560  Result = DAG.getMemIntrinsicNode(
1561  WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1562  {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1563  SplattedLoad->getOffset()},
1564  SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1565  } else {
1566  Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1567  }
1568  IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1569  return Lane == SplatValue;
1570  };
1571  }
1572 
1573  // Add replace_lane instructions for any unhandled values
1574  for (size_t I = 0; I < Lanes; ++I) {
1575  const SDValue &Lane = Op->getOperand(I);
1576  if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1577  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1578  DAG.getConstant(I, DL, MVT::i32));
1579  }
1580 
1581  return Result;
1582 }
1583 
1584 SDValue
1585 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1586  SelectionDAG &DAG) const {
1587  SDLoc DL(Op);
1588  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1590  assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1591  size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1592 
1593  // Space for two vector args and sixteen mask indices
1594  SDValue Ops[18];
1595  size_t OpIdx = 0;
1596  Ops[OpIdx++] = Op.getOperand(0);
1597  Ops[OpIdx++] = Op.getOperand(1);
1598 
1599  // Expand mask indices to byte indices and materialize them as operands
1600  for (int M : Mask) {
1601  for (size_t J = 0; J < LaneBytes; ++J) {
1602  // Lower undefs (represented by -1 in mask) to zero
1603  uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1604  Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1605  }
1606  }
1607 
1608  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1609 }
1610 
1611 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1612  SelectionDAG &DAG) const {
1613  SDLoc DL(Op);
1614  // The legalizer does not know how to expand the comparison modes of i64x2
1615  // vectors because no comparison modes are supported. We could solve this by
1616  // expanding all i64x2 SETCC nodes, but that seems to expand f64x2 SETCC nodes
1617  // (which return i64x2 results) as well. So instead we manually unroll i64x2
1618  // comparisons here.
1620  SmallVector<SDValue, 2> LHS, RHS;
1621  DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1622  DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1623  const SDValue &CC = Op->getOperand(2);
1624  auto MakeLane = [&](unsigned I) {
1625  return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1626  DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1627  DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1628  };
1629  return DAG.getBuildVector(Op->getValueType(0), DL,
1630  {MakeLane(0), MakeLane(1)});
1631 }
1632 
1633 SDValue
1634 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1635  SelectionDAG &DAG) const {
1636  // Allow constant lane indices, expand variable lane indices
1637  SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1638  if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1639  return Op;
1640  else
1641  // Perform default expansion
1642  return SDValue();
1643 }
1644 
1647  // 32-bit and 64-bit unrolled shifts will have proper semantics
1648  if (LaneT.bitsGE(MVT::i32))
1649  return DAG.UnrollVectorOp(Op.getNode());
1650  // Otherwise mask the shift value to get proper semantics from 32-bit shift
1651  SDLoc DL(Op);
1652  size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1653  SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1654  unsigned ShiftOpcode = Op.getOpcode();
1655  SmallVector<SDValue, 16> ShiftedElements;
1656  DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1657  SmallVector<SDValue, 16> ShiftElements;
1658  DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1659  SmallVector<SDValue, 16> UnrolledOps;
1660  for (size_t i = 0; i < NumLanes; ++i) {
1661  SDValue MaskedShiftValue =
1662  DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
1663  SDValue ShiftedValue = ShiftedElements[i];
1664  if (ShiftOpcode == ISD::SRA)
1665  ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
1666  ShiftedValue, DAG.getValueType(LaneT));
1667  UnrolledOps.push_back(
1668  DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
1669  }
1670  return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
1671 }
1672 
1673 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1674  SelectionDAG &DAG) const {
1675  SDLoc DL(Op);
1676 
1677  // Only manually lower vector shifts
1679 
1680  auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
1681  if (!ShiftVal)
1682  return unrollVectorShift(Op, DAG);
1683 
1684  // Use anyext because none of the high bits can affect the shift
1685  ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
1686 
1687  unsigned Opcode;
1688  switch (Op.getOpcode()) {
1689  case ISD::SHL:
1690  Opcode = WebAssemblyISD::VEC_SHL;
1691  break;
1692  case ISD::SRA:
1693  Opcode = WebAssemblyISD::VEC_SHR_S;
1694  break;
1695  case ISD::SRL:
1696  Opcode = WebAssemblyISD::VEC_SHR_U;
1697  break;
1698  default:
1699  llvm_unreachable("unexpected opcode");
1700  }
1701 
1702  return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
1703 }
1704 
1705 //===----------------------------------------------------------------------===//
1706 // Custom DAG combine hooks
1707 //===----------------------------------------------------------------------===//
1708 static SDValue
1710  auto &DAG = DCI.DAG;
1711  auto Shuffle = cast<ShuffleVectorSDNode>(N);
1712 
1713  // Hoist vector bitcasts that don't change the number of lanes out of unary
1714  // shuffles, where they are less likely to get in the way of other combines.
1715  // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
1716  // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
1717  SDValue Bitcast = N->getOperand(0);
1718  if (Bitcast.getOpcode() != ISD::BITCAST)
1719  return SDValue();
1720  if (!N->getOperand(1).isUndef())
1721  return SDValue();
1722  SDValue CastOp = Bitcast.getOperand(0);
1723  MVT SrcType = CastOp.getSimpleValueType();
1724  MVT DstType = Bitcast.getSimpleValueType();
1725  if (!SrcType.is128BitVector() ||
1726  SrcType.getVectorNumElements() != DstType.getVectorNumElements())
1727  return SDValue();
1728  SDValue NewShuffle = DAG.getVectorShuffle(
1729  SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
1730  return DAG.getBitcast(DstType, NewShuffle);
1731 }
1732 
1733 SDValue
1734 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
1735  DAGCombinerInfo &DCI) const {
1736  switch (N->getOpcode()) {
1737  default:
1738  return SDValue();
1739  case ISD::VECTOR_SHUFFLE:
1740  return performVECTOR_SHUFFLECombine(N, DCI);
1741  }
1742 }
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:744
static MVT getIntegerVT(unsigned BitWidth)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:610
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG...
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:600
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const SDValue & getOffset() const
static Type * getDoubleTy(LLVMContext &C)
Definition: Type.cpp:175
bool isUndef() const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
const GlobalValue * getGlobal() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
Diagnostic information for unsupported feature in backend.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:854
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:603
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Definition: ISDOpcodes.h:807
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:520
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isVector() const
Return true if this is a vector value type.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:253
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:409
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:187
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:597
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:903
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:260
SDValue getBasicBlock(MachineBasicBlock *MBB)
unsigned getVectorNumElements() const
const SDValue & getChain() const
Function Alias Analysis Results
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
F(f)
MachineModuleInfo & getMMI() const
Align getNonZeroOrigAlign() const
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:545
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:701
Hexagon Common GEP
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII)
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:612
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
Register getFrameRegister(const MachineFunction &MF) const override
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
Function & getFunction()
Return the LLVM function that this machine code represents.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:176
MachineBasicBlock & MBB
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:239
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1259
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:141
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:633
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:174
#define INT64_MIN
Definition: DataTypes.h:74
Shift and rotation operations.
Definition: ISDOpcodes.h:576
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:177
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
Definition: ISDOpcodes.h:559
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:780
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:213
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
Calling convention for emscripten __invoke_* functions.
Definition: CallingConv.h:242
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:187
uint64_t getConstantOperandVal(unsigned i) const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:674
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
bool isInConsecutiveRegs() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:456
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:497
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:424
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:87
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
static mvt_range integer_fixedlen_vector_valuetypes()
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:899
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:65
This file declares the WebAssembly-specific subclass of TargetMachine.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:421
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:657
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:669
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:425
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:606
const MCContext & getContext() const
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:223
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:160
unsigned getTargetFlags() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:926
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:618
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:790
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:168
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Analysis containing CSE Info
Definition: CSEInfo.cpp:25
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:703
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified, possibly variable, elements.
Definition: ISDOpcodes.h:456
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
SDValue getSplatValue(SDValue V)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
bool is128BitVector() const
Return true if this is a 128-bit vector type.
The memory access is volatile.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:242
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:170
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:183
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:833
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
const Triple & getTargetTriple() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:932
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:465
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:263
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:39
This file provides WebAssembly-specific target descriptions.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:592
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:979
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:982
self_iterator getIterator()
Definition: ilist_node.h:81
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:923
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:252
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:35
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:684
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:315
bool isPositionIndependent() const
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:301
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:52
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:427
Iterator for intrusive lists based on ilist_node.
Perform the operation on a different, but equivalently sized type.
Definition: LegalizerInfo.h:73
CCState - This class holds information needed while lowering arguments and return values...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:664
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:350
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:476
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:272
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:223
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:883
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:773
Provides information about what library functions are available for the current target.
Align getNonZeroByValAlign() const
const DebugLoc & getDebugLoc() const
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:585
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:754
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:851
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:718
static mvt_range integer_valuetypes()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:962
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:165
EVT getMemoryVT() const
Return the type of the in-memory value.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:597
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:441
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:433
const WebAssemblyRegisterInfo * getRegisterInfo() const override
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:842
Representation of each machine instruction.
Definition: MachineInstr.h:62
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:928
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:156
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:216
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:551
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:744
static bool callingConvSupported(CallingConv::ID CallConv)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:234
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:665
bool isInConsecutiveRegsLast() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:62
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
#define I(x, y, z)
Definition: MD5.cpp:59
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
This file declares WebAssembly-specific per-machine-function information.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
Type * getValueType() const
Definition: GlobalValue.h:273
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:811
SDValue getValue(unsigned R) const
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool isReg() const
isReg - Tests if this is a MO_Register operand.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
LLVM Value Representation.
Definition: Value.h:74
void computeLegalValueVTs(const Function &F, const TargetMachine &TM, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:431
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getValueType(EVT)
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isUndef() const
Return true if the type of the node type undefined.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
static mvt_range fixedlen_vector_valuetypes()
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:620
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
const WebAssemblyInstrInfo * getInstrInfo() const override
unsigned getNumOperands() const
Register getReg() const
getReg - Returns the register number.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:466
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1518
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
#define _
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
LLVMContext * getContext() const
Definition: SelectionDAG.h:431
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:540
BRIND - Indirect branch.
Definition: ISDOpcodes.h:838
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:827