LLVM  7.0.0svn
SIISelLowering.cpp
Go to the documentation of this file.
1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Custom DAG lowering for SI
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifdef _MSC_VER
16 // Provide M_PI.
17 #define _USE_MATH_DEFINES
18 #endif
19 
20 #include "SIISelLowering.h"
21 #include "AMDGPU.h"
22 #include "AMDGPUIntrinsicInfo.h"
23 #include "AMDGPUSubtarget.h"
24 #include "AMDGPUTargetMachine.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIRegisterInfo.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/BitVector.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/CodeGen/Analysis.h"
58 #include "llvm/IR/Constants.h"
59 #include "llvm/IR/DataLayout.h"
60 #include "llvm/IR/DebugLoc.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/DiagnosticInfo.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/GlobalValue.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
73 #include "llvm/Support/Compiler.h"
75 #include "llvm/Support/KnownBits.h"
78 #include <cassert>
79 #include <cmath>
80 #include <cstdint>
81 #include <iterator>
82 #include <tuple>
83 #include <utility>
84 #include <vector>
85 
86 using namespace llvm;
87 
88 #define DEBUG_TYPE "si-lower"
89 
90 STATISTIC(NumTailCalls, "Number of tail calls");
91 
93  "amdgpu-vgpr-index-mode",
94  cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
95  cl::init(false));
96 
98  "amdgpu-frame-index-zero-bits",
99  cl::desc("High bits of frame index assumed to be zero"),
100  cl::init(5),
102 
103 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
104  unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
105  for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
106  if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
107  return AMDGPU::SGPR0 + Reg;
108  }
109  }
110  llvm_unreachable("Cannot allocate sgpr");
111 }
112 
114  const SISubtarget &STI)
115  : AMDGPUTargetLowering(TM, STI) {
116  addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
117  addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
118 
119  addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
120  addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
121 
122  addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
123  addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
124  addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
125 
126  addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
127  addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
128 
129  addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
130  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
131 
132  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
133  addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
134 
135  addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
136  addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
137 
138  if (Subtarget->has16BitInsts()) {
139  addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
140  addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
141  }
142 
143  if (Subtarget->hasVOP3PInsts()) {
144  addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
145  addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
146  }
147 
149 
150  // We need to custom lower vector stores from local memory
156 
162 
173 
177 
182 
188 
193 
196 
204 
210 
214 
219 
226 
229 
232 
233 #if 0
236 #endif
237 
238  //setOperationAction(ISD::ADDC, MVT::i64, Expand);
239  //setOperationAction(ISD::SUBC, MVT::i64, Expand);
240 
241  // We only support LOAD/STORE and vector manipulation ops for vectors
242  // with > 4 elements.
245  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
246  switch (Op) {
247  case ISD::LOAD:
248  case ISD::STORE:
249  case ISD::BUILD_VECTOR:
250  case ISD::BITCAST:
256  break;
257  case ISD::CONCAT_VECTORS:
259  break;
260  default:
262  break;
263  }
264  }
265  }
266 
267  // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
268  // is expanded to avoid having two separate loops in case the index is a VGPR.
269 
270  // Most operations are naturally 32-bit vector operations. We only support
271  // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
272  for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
275 
278 
281 
284  }
285 
290 
291  // Avoid stack access for these.
292  // TODO: Generalize to more vector types.
297 
298  // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
299  // and output demarshalling
302 
303  // We can't return success/failure, only the old value,
304  // let LLVM add the comparison
307 
308  if (getSubtarget()->hasFlatAddressSpace()) {
311  }
312 
315 
316  // On SI this is s_memtime and s_memrealtime on VI.
320 
323 
328  }
329 
331 
336 
337  if (Subtarget->has16BitInsts()) {
339 
342 
345 
348 
351 
356 
359 
364 
366 
368 
370 
372 
377 
382 
383  // F16 - Constant Actions.
385 
386  // F16 - Load/Store Actions.
391 
392  // F16 - VOP1 Actions.
401 
402  // F16 - VOP2 Actions.
408 
409  // F16 - VOP3 Actions.
411  if (!Subtarget->hasFP16Denormals())
413  }
414 
415  if (Subtarget->hasVOP3PInsts()) {
416  for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
417  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
418  switch (Op) {
419  case ISD::LOAD:
420  case ISD::STORE:
421  case ISD::BUILD_VECTOR:
422  case ISD::BITCAST:
428  break;
429  case ISD::CONCAT_VECTORS:
431  break;
432  default:
434  break;
435  }
436  }
437  }
438 
439  // XXX - Do these do anything? Vector constants turn into build_vector.
442 
447 
452 
463 
474 
481 
482  // This isn't really legal, but this avoids the legalizer unrolling it (and
483  // allows matching fneg (fabs x) patterns)
485 
488 
493  } else {
496  }
497 
500  }
501 
525 
526  // All memory operations. Some folding on the pointer operand is done to help
527  // matching the constant offsets in the addressing modes.
545 
547 }
548 
550  return static_cast<const SISubtarget *>(Subtarget);
551 }
552 
553 //===----------------------------------------------------------------------===//
554 // TargetLowering queries
555 //===----------------------------------------------------------------------===//
556 
558  // SI has some legal vector types, but no legal vector operations. Say no
559  // shuffles are legal in order to prefer scalarizing some vector operations.
560  return false;
561 }
562 
564  const CallInst &CI,
565  MachineFunction &MF,
566  unsigned IntrID) const {
567  switch (IntrID) {
568  case Intrinsic::amdgcn_atomic_inc:
569  case Intrinsic::amdgcn_atomic_dec:
570  case Intrinsic::amdgcn_ds_fadd:
571  case Intrinsic::amdgcn_ds_fmin:
572  case Intrinsic::amdgcn_ds_fmax: {
574  Info.memVT = MVT::getVT(CI.getType());
575  Info.ptrVal = CI.getOperand(0);
576  Info.align = 0;
578 
579  const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
580  if (!Vol || !Vol->isZero())
582 
583  return true;
584  }
585 
586  // Image load.
587  case Intrinsic::amdgcn_image_load:
588  case Intrinsic::amdgcn_image_load_mip:
589 
590  // Sample.
591  case Intrinsic::amdgcn_image_sample:
592  case Intrinsic::amdgcn_image_sample_cl:
593  case Intrinsic::amdgcn_image_sample_d:
594  case Intrinsic::amdgcn_image_sample_d_cl:
595  case Intrinsic::amdgcn_image_sample_l:
596  case Intrinsic::amdgcn_image_sample_b:
597  case Intrinsic::amdgcn_image_sample_b_cl:
598  case Intrinsic::amdgcn_image_sample_lz:
599  case Intrinsic::amdgcn_image_sample_cd:
600  case Intrinsic::amdgcn_image_sample_cd_cl:
601 
602  // Sample with comparison.
603  case Intrinsic::amdgcn_image_sample_c:
604  case Intrinsic::amdgcn_image_sample_c_cl:
605  case Intrinsic::amdgcn_image_sample_c_d:
606  case Intrinsic::amdgcn_image_sample_c_d_cl:
607  case Intrinsic::amdgcn_image_sample_c_l:
608  case Intrinsic::amdgcn_image_sample_c_b:
609  case Intrinsic::amdgcn_image_sample_c_b_cl:
610  case Intrinsic::amdgcn_image_sample_c_lz:
611  case Intrinsic::amdgcn_image_sample_c_cd:
612  case Intrinsic::amdgcn_image_sample_c_cd_cl:
613 
614  // Sample with offsets.
615  case Intrinsic::amdgcn_image_sample_o:
616  case Intrinsic::amdgcn_image_sample_cl_o:
617  case Intrinsic::amdgcn_image_sample_d_o:
618  case Intrinsic::amdgcn_image_sample_d_cl_o:
619  case Intrinsic::amdgcn_image_sample_l_o:
620  case Intrinsic::amdgcn_image_sample_b_o:
621  case Intrinsic::amdgcn_image_sample_b_cl_o:
622  case Intrinsic::amdgcn_image_sample_lz_o:
623  case Intrinsic::amdgcn_image_sample_cd_o:
624  case Intrinsic::amdgcn_image_sample_cd_cl_o:
625 
626  // Sample with comparison and offsets.
627  case Intrinsic::amdgcn_image_sample_c_o:
628  case Intrinsic::amdgcn_image_sample_c_cl_o:
629  case Intrinsic::amdgcn_image_sample_c_d_o:
630  case Intrinsic::amdgcn_image_sample_c_d_cl_o:
631  case Intrinsic::amdgcn_image_sample_c_l_o:
632  case Intrinsic::amdgcn_image_sample_c_b_o:
633  case Intrinsic::amdgcn_image_sample_c_b_cl_o:
634  case Intrinsic::amdgcn_image_sample_c_lz_o:
635  case Intrinsic::amdgcn_image_sample_c_cd_o:
636  case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
637 
638  // Basic gather4
639  case Intrinsic::amdgcn_image_gather4:
640  case Intrinsic::amdgcn_image_gather4_cl:
641  case Intrinsic::amdgcn_image_gather4_l:
642  case Intrinsic::amdgcn_image_gather4_b:
643  case Intrinsic::amdgcn_image_gather4_b_cl:
644  case Intrinsic::amdgcn_image_gather4_lz:
645 
646  // Gather4 with comparison
647  case Intrinsic::amdgcn_image_gather4_c:
648  case Intrinsic::amdgcn_image_gather4_c_cl:
649  case Intrinsic::amdgcn_image_gather4_c_l:
650  case Intrinsic::amdgcn_image_gather4_c_b:
651  case Intrinsic::amdgcn_image_gather4_c_b_cl:
652  case Intrinsic::amdgcn_image_gather4_c_lz:
653 
654  // Gather4 with offsets
655  case Intrinsic::amdgcn_image_gather4_o:
656  case Intrinsic::amdgcn_image_gather4_cl_o:
657  case Intrinsic::amdgcn_image_gather4_l_o:
658  case Intrinsic::amdgcn_image_gather4_b_o:
659  case Intrinsic::amdgcn_image_gather4_b_cl_o:
660  case Intrinsic::amdgcn_image_gather4_lz_o:
661 
662  // Gather4 with comparison and offsets
663  case Intrinsic::amdgcn_image_gather4_c_o:
664  case Intrinsic::amdgcn_image_gather4_c_cl_o:
665  case Intrinsic::amdgcn_image_gather4_c_l_o:
666  case Intrinsic::amdgcn_image_gather4_c_b_o:
667  case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
668  case Intrinsic::amdgcn_image_gather4_c_lz_o: {
671  Info.memVT = MVT::getVT(CI.getType());
672  Info.ptrVal = MFI->getImagePSV(
674  CI.getArgOperand(1));
675  Info.align = 0;
678  return true;
679  }
680  case Intrinsic::amdgcn_image_store:
681  case Intrinsic::amdgcn_image_store_mip: {
683  Info.opc = ISD::INTRINSIC_VOID;
684  Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
685  Info.ptrVal = MFI->getImagePSV(
687  CI.getArgOperand(2));
690  Info.align = 0;
691  return true;
692  }
693  case Intrinsic::amdgcn_image_atomic_swap:
694  case Intrinsic::amdgcn_image_atomic_add:
695  case Intrinsic::amdgcn_image_atomic_sub:
696  case Intrinsic::amdgcn_image_atomic_smin:
697  case Intrinsic::amdgcn_image_atomic_umin:
698  case Intrinsic::amdgcn_image_atomic_smax:
699  case Intrinsic::amdgcn_image_atomic_umax:
700  case Intrinsic::amdgcn_image_atomic_and:
701  case Intrinsic::amdgcn_image_atomic_or:
702  case Intrinsic::amdgcn_image_atomic_xor:
703  case Intrinsic::amdgcn_image_atomic_inc:
704  case Intrinsic::amdgcn_image_atomic_dec: {
707  Info.memVT = MVT::getVT(CI.getType());
708  Info.ptrVal = MFI->getImagePSV(
710  CI.getArgOperand(2));
711 
715 
716  // XXX - Should this be volatile without known ordering?
718  return true;
719  }
720  case Intrinsic::amdgcn_image_atomic_cmpswap: {
723  Info.memVT = MVT::getVT(CI.getType());
724  Info.ptrVal = MFI->getImagePSV(
726  CI.getArgOperand(3));
727 
731 
732  // XXX - Should this be volatile without known ordering?
734  return true;
735  }
736  case Intrinsic::amdgcn_tbuffer_load:
737  case Intrinsic::amdgcn_buffer_load:
738  case Intrinsic::amdgcn_buffer_load_format: {
741  Info.ptrVal = MFI->getBufferPSV(
743  CI.getArgOperand(0));
744  Info.memVT = MVT::getVT(CI.getType());
747 
748  // There is a constant offset component, but there are additional register
749  // offsets which could break AA if we set the offset to anything non-0.
750  return true;
751  }
752  case Intrinsic::amdgcn_tbuffer_store:
753  case Intrinsic::amdgcn_buffer_store:
754  case Intrinsic::amdgcn_buffer_store_format: {
756  Info.opc = ISD::INTRINSIC_VOID;
757  Info.ptrVal = MFI->getBufferPSV(
759  CI.getArgOperand(1));
760  Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
763  return true;
764  }
765  case Intrinsic::amdgcn_buffer_atomic_swap:
766  case Intrinsic::amdgcn_buffer_atomic_add:
767  case Intrinsic::amdgcn_buffer_atomic_sub:
768  case Intrinsic::amdgcn_buffer_atomic_smin:
769  case Intrinsic::amdgcn_buffer_atomic_umin:
770  case Intrinsic::amdgcn_buffer_atomic_smax:
771  case Intrinsic::amdgcn_buffer_atomic_umax:
772  case Intrinsic::amdgcn_buffer_atomic_and:
773  case Intrinsic::amdgcn_buffer_atomic_or:
774  case Intrinsic::amdgcn_buffer_atomic_xor: {
777  Info.ptrVal = MFI->getBufferPSV(
779  CI.getArgOperand(1));
780  Info.memVT = MVT::getVT(CI.getType());
785  return true;
786  }
787  case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
790  Info.ptrVal = MFI->getBufferPSV(
792  CI.getArgOperand(2));
793  Info.memVT = MVT::getVT(CI.getType());
798  return true;
799  }
800  default:
801  return false;
802  }
803 }
804 
807  Type *&AccessTy) const {
808  switch (II->getIntrinsicID()) {
809  case Intrinsic::amdgcn_atomic_inc:
810  case Intrinsic::amdgcn_atomic_dec:
811  case Intrinsic::amdgcn_ds_fadd:
812  case Intrinsic::amdgcn_ds_fmin:
813  case Intrinsic::amdgcn_ds_fmax: {
814  Value *Ptr = II->getArgOperand(0);
815  AccessTy = II->getType();
816  Ops.push_back(Ptr);
817  return true;
818  }
819  default:
820  return false;
821  }
822 }
823 
824 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
825  if (!Subtarget->hasFlatInstOffsets()) {
826  // Flat instructions do not have offsets, and only have the register
827  // address.
828  return AM.BaseOffs == 0 && AM.Scale == 0;
829  }
830 
831  // GFX9 added a 13-bit signed offset. When using regular flat instructions,
832  // the sign bit is ignored and is treated as a 12-bit unsigned offset.
833 
834  // Just r + i
835  return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
836 }
837 
838 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
840  return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
841 
843  // Assume the we will use FLAT for all global memory accesses
844  // on VI.
845  // FIXME: This assumption is currently wrong. On VI we still use
846  // MUBUF instructions for the r + i addressing mode. As currently
847  // implemented, the MUBUF instructions only work on buffer < 4GB.
848  // It may be possible to support > 4GB buffers with MUBUF instructions,
849  // by setting the stride value in the resource descriptor which would
850  // increase the size limit to (stride * 4GB). However, this is risky,
851  // because it has never been validated.
852  return isLegalFlatAddressingMode(AM);
853  }
854 
855  return isLegalMUBUFAddressingMode(AM);
856 }
857 
858 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
859  // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
860  // additionally can do r + r + i with addr64. 32-bit has more addressing
861  // mode options. Depending on the resource constant, it can also do
862  // (i64 r0) + (i32 r1) * (i14 i).
863  //
864  // Private arrays end up using a scratch buffer most of the time, so also
865  // assume those use MUBUF instructions. Scratch loads / stores are currently
866  // implemented as mubuf instructions with offen bit set, so slightly
867  // different than the normal addr64.
868  if (!isUInt<12>(AM.BaseOffs))
869  return false;
870 
871  // FIXME: Since we can split immediate into soffset and immediate offset,
872  // would it make sense to allow any immediate?
873 
874  switch (AM.Scale) {
875  case 0: // r + i or just i, depending on HasBaseReg.
876  return true;
877  case 1:
878  return true; // We have r + r or r + i.
879  case 2:
880  if (AM.HasBaseReg) {
881  // Reject 2 * r + r.
882  return false;
883  }
884 
885  // Allow 2 * r as r + r
886  // Or 2 * r + i is allowed as r + r + i.
887  return true;
888  default: // Don't allow n * r
889  return false;
890  }
891 }
892 
894  const AddrMode &AM, Type *Ty,
895  unsigned AS, Instruction *I) const {
896  // No global is ever allowed as a base.
897  if (AM.BaseGV)
898  return false;
899 
900  if (AS == AMDGPUASI.GLOBAL_ADDRESS)
901  return isLegalGlobalAddressingMode(AM);
902 
903  if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
905  // If the offset isn't a multiple of 4, it probably isn't going to be
906  // correctly aligned.
907  // FIXME: Can we get the real alignment here?
908  if (AM.BaseOffs % 4 != 0)
909  return isLegalMUBUFAddressingMode(AM);
910 
911  // There are no SMRD extloads, so if we have to do a small type access we
912  // will use a MUBUF load.
913  // FIXME?: We also need to do this if unaligned, but we don't know the
914  // alignment here.
915  if (DL.getTypeStoreSize(Ty) < 4)
916  return isLegalGlobalAddressingMode(AM);
917 
919  // SMRD instructions have an 8-bit, dword offset on SI.
920  if (!isUInt<8>(AM.BaseOffs / 4))
921  return false;
923  // On CI+, this can also be a 32-bit literal constant offset. If it fits
924  // in 8-bits, it can use a smaller encoding.
925  if (!isUInt<32>(AM.BaseOffs / 4))
926  return false;
928  // On VI, these use the SMEM format and the offset is 20-bit in bytes.
929  if (!isUInt<20>(AM.BaseOffs))
930  return false;
931  } else
932  llvm_unreachable("unhandled generation");
933 
934  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
935  return true;
936 
937  if (AM.Scale == 1 && AM.HasBaseReg)
938  return true;
939 
940  return false;
941 
942  } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
943  return isLegalMUBUFAddressingMode(AM);
944  } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
945  AS == AMDGPUASI.REGION_ADDRESS) {
946  // Basic, single offset DS instructions allow a 16-bit unsigned immediate
947  // field.
948  // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
949  // an 8-bit dword offset but we don't know the alignment here.
950  if (!isUInt<16>(AM.BaseOffs))
951  return false;
952 
953  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
954  return true;
955 
956  if (AM.Scale == 1 && AM.HasBaseReg)
957  return true;
958 
959  return false;
960  } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
962  // For an unknown address space, this usually means that this is for some
963  // reason being used for pure arithmetic, and not based on some addressing
964  // computation. We don't have instructions that compute pointers with any
965  // addressing modes, so treat them as having no offset like flat
966  // instructions.
967  return isLegalFlatAddressingMode(AM);
968  } else {
969  llvm_unreachable("unhandled address space");
970  }
971 }
972 
974  const SelectionDAG &DAG) const {
975  if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
976  return (MemVT.getSizeInBits() <= 4 * 32);
977  } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
978  unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
979  return (MemVT.getSizeInBits() <= MaxPrivateBits);
980  } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
981  return (MemVT.getSizeInBits() <= 2 * 32);
982  }
983  return true;
984 }
985 
987  unsigned AddrSpace,
988  unsigned Align,
989  bool *IsFast) const {
990  if (IsFast)
991  *IsFast = false;
992 
993  // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
994  // which isn't a simple VT.
995  // Until MVT is extended to handle this, simply check for the size and
996  // rely on the condition below: allow accesses if the size is a multiple of 4.
997  if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
998  VT.getStoreSize() > 16)) {
999  return false;
1000  }
1001 
1002  if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
1003  AddrSpace == AMDGPUASI.REGION_ADDRESS) {
1004  // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1005  // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1006  // with adjacent offsets.
1007  bool AlignedBy4 = (Align % 4 == 0);
1008  if (IsFast)
1009  *IsFast = AlignedBy4;
1010 
1011  return AlignedBy4;
1012  }
1013 
1014  // FIXME: We have to be conservative here and assume that flat operations
1015  // will access scratch. If we had access to the IR function, then we
1016  // could determine if any private memory was used in the function.
1018  (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
1019  AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
1020  return false;
1021  }
1022 
1024  // If we have an uniform constant load, it still requires using a slow
1025  // buffer instruction if unaligned.
1026  if (IsFast) {
1027  *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
1028  AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
1029  (Align % 4 == 0) : true;
1030  }
1031 
1032  return true;
1033  }
1034 
1035  // Smaller than dword value must be aligned.
1036  if (VT.bitsLT(MVT::i32))
1037  return false;
1038 
1039  // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1040  // byte-address are ignored, thus forcing Dword alignment.
1041  // This applies to private, global, and constant memory.
1042  if (IsFast)
1043  *IsFast = true;
1044 
1045  return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1046 }
1047 
1048 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1049  unsigned SrcAlign, bool IsMemset,
1050  bool ZeroMemset,
1051  bool MemcpyStrSrc,
1052  MachineFunction &MF) const {
1053  // FIXME: Should account for address space here.
1054 
1055  // The default fallback uses the private pointer size as a guess for a type to
1056  // use. Make sure we switch these to 64-bit accesses.
1057 
1058  if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1059  return MVT::v4i32;
1060 
1061  if (Size >= 8 && DstAlign >= 4)
1062  return MVT::v2i32;
1063 
1064  // Use the default.
1065  return MVT::Other;
1066 }
1067 
1068 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
1069  return AS == AMDGPUASI.GLOBAL_ADDRESS ||
1070  AS == AMDGPUASI.FLAT_ADDRESS ||
1071  AS == AMDGPUASI.CONSTANT_ADDRESS ||
1072  AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
1073 }
1074 
1076  unsigned DestAS) const {
1077  return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
1079 }
1080 
1082  const MemSDNode *MemNode = cast<MemSDNode>(N);
1083  const Value *Ptr = MemNode->getMemOperand()->getValue();
1084  const Instruction *I = dyn_cast<Instruction>(Ptr);
1085  return I && I->getMetadata("amdgpu.noclobber");
1086 }
1087 
1089  unsigned DestAS) const {
1090  // Flat -> private/local is a simple truncate.
1091  // Flat -> global is no-op
1092  if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
1093  return true;
1094 
1095  return isNoopAddrSpaceCast(SrcAS, DestAS);
1096 }
1097 
1099  const MemSDNode *MemNode = cast<MemSDNode>(N);
1100 
1101  return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1102 }
1103 
1106  if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1107  return TypeSplitVector;
1108 
1110 }
1111 
1113  Type *Ty) const {
1114  // FIXME: Could be smarter if called for vector constants.
1115  return true;
1116 }
1117 
1119  if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1120  switch (Op) {
1121  case ISD::LOAD:
1122  case ISD::STORE:
1123 
1124  // These operations are done with 32-bit instructions anyway.
1125  case ISD::AND:
1126  case ISD::OR:
1127  case ISD::XOR:
1128  case ISD::SELECT:
1129  // TODO: Extensions?
1130  return true;
1131  default:
1132  return false;
1133  }
1134  }
1135 
1136  // SimplifySetCC uses this function to determine whether or not it should
1137  // create setcc with i1 operands. We don't have instructions for i1 setcc.
1138  if (VT == MVT::i1 && Op == ISD::SETCC)
1139  return false;
1140 
1141  return TargetLowering::isTypeDesirableForOp(Op, VT);
1142 }
1143 
1144 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1145  const SDLoc &SL,
1146  SDValue Chain,
1147  uint64_t Offset) const {
1148  const DataLayout &DL = DAG.getDataLayout();
1149  MachineFunction &MF = DAG.getMachineFunction();
1151 
1152  const ArgDescriptor *InputPtrReg;
1153  const TargetRegisterClass *RC;
1154 
1155  std::tie(InputPtrReg, RC)
1157 
1160  SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1161  MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1162 
1163  return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1164  DAG.getConstant(Offset, SL, PtrVT));
1165 }
1166 
1167 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1168  const SDLoc &SL) const {
1169  auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
1170  uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
1171  return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1172 }
1173 
1174 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1175  const SDLoc &SL, SDValue Val,
1176  bool Signed,
1177  const ISD::InputArg *Arg) const {
1178  if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1179  VT.bitsLT(MemVT)) {
1180  unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1181  Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1182  }
1183 
1184  if (MemVT.isFloatingPoint())
1185  Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1186  else if (Signed)
1187  Val = DAG.getSExtOrTrunc(Val, SL, VT);
1188  else
1189  Val = DAG.getZExtOrTrunc(Val, SL, VT);
1190 
1191  return Val;
1192 }
1193 
1194 SDValue SITargetLowering::lowerKernargMemParameter(
1195  SelectionDAG &DAG, EVT VT, EVT MemVT,
1196  const SDLoc &SL, SDValue Chain,
1197  uint64_t Offset, bool Signed,
1198  const ISD::InputArg *Arg) const {
1199  const DataLayout &DL = DAG.getDataLayout();
1200  Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1202  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1203 
1204  unsigned Align = DL.getABITypeAlignment(Ty);
1205 
1206  SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1207  SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1210 
1211  SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1212  return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1213 }
1214 
1215 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1216  const SDLoc &SL, SDValue Chain,
1217  const ISD::InputArg &Arg) const {
1218  MachineFunction &MF = DAG.getMachineFunction();
1219  MachineFrameInfo &MFI = MF.getFrameInfo();
1220 
1221  if (Arg.Flags.isByVal()) {
1222  unsigned Size = Arg.Flags.getByValSize();
1223  int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1224  return DAG.getFrameIndex(FrameIdx, MVT::i32);
1225  }
1226 
1227  unsigned ArgOffset = VA.getLocMemOffset();
1228  unsigned ArgSize = VA.getValVT().getStoreSize();
1229 
1230  int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1231 
1232  // Create load nodes to retrieve arguments from the stack.
1233  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1234  SDValue ArgValue;
1235 
1236  // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1238  MVT MemVT = VA.getValVT();
1239 
1240  switch (VA.getLocInfo()) {
1241  default:
1242  break;
1243  case CCValAssign::BCvt:
1244  MemVT = VA.getLocVT();
1245  break;
1246  case CCValAssign::SExt:
1247  ExtType = ISD::SEXTLOAD;
1248  break;
1249  case CCValAssign::ZExt:
1250  ExtType = ISD::ZEXTLOAD;
1251  break;
1252  case CCValAssign::AExt:
1253  ExtType = ISD::EXTLOAD;
1254  break;
1255  }
1256 
1257  ArgValue = DAG.getExtLoad(
1258  ExtType, SL, VA.getLocVT(), Chain, FIN,
1260  MemVT);
1261  return ArgValue;
1262 }
1263 
1264 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1265  const SIMachineFunctionInfo &MFI,
1266  EVT VT,
1268  const ArgDescriptor *Reg;
1269  const TargetRegisterClass *RC;
1270 
1271  std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1272  return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1273 }
1274 
1276  CallingConv::ID CallConv,
1278  BitVector &Skipped,
1279  FunctionType *FType,
1280  SIMachineFunctionInfo *Info) {
1281  for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1282  const ISD::InputArg &Arg = Ins[I];
1283 
1284  // First check if it's a PS input addr.
1285  if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1286  !Arg.Flags.isByVal() && PSInputNum <= 15) {
1287 
1288  if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1289  // We can safely skip PS inputs.
1290  Skipped.set(I);
1291  ++PSInputNum;
1292  continue;
1293  }
1294 
1295  Info->markPSInputAllocated(PSInputNum);
1296  if (Arg.Used)
1297  Info->markPSInputEnabled(PSInputNum);
1298 
1299  ++PSInputNum;
1300  }
1301 
1302  // Second split vertices into their elements.
1303  if (Arg.VT.isVector()) {
1304  ISD::InputArg NewArg = Arg;
1305  NewArg.Flags.setSplit();
1306  NewArg.VT = Arg.VT.getVectorElementType();
1307 
1308  // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1309  // three or five element vertex only needs three or five registers,
1310  // NOT four or eight.
1311  Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1312  unsigned NumElements = ParamType->getVectorNumElements();
1313 
1314  for (unsigned J = 0; J != NumElements; ++J) {
1315  Splits.push_back(NewArg);
1316  NewArg.PartOffset += NewArg.VT.getStoreSize();
1317  }
1318  } else {
1319  Splits.push_back(Arg);
1320  }
1321  }
1322 }
1323 
1324 // Allocate special inputs passed in VGPRs.
1326  MachineFunction &MF,
1327  const SIRegisterInfo &TRI,
1328  SIMachineFunctionInfo &Info) {
1329  if (Info.hasWorkItemIDX()) {
1330  unsigned Reg = AMDGPU::VGPR0;
1331  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1332 
1333  CCInfo.AllocateReg(Reg);
1335  }
1336 
1337  if (Info.hasWorkItemIDY()) {
1338  unsigned Reg = AMDGPU::VGPR1;
1339  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1340 
1341  CCInfo.AllocateReg(Reg);
1343  }
1344 
1345  if (Info.hasWorkItemIDZ()) {
1346  unsigned Reg = AMDGPU::VGPR2;
1347  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1348 
1349  CCInfo.AllocateReg(Reg);
1351  }
1352 }
1353 
1354 // Try to allocate a VGPR at the end of the argument list, or if no argument
1355 // VGPRs are left allocating a stack slot.
1357  ArrayRef<MCPhysReg> ArgVGPRs
1358  = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1359  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1360  if (RegIdx == ArgVGPRs.size()) {
1361  // Spill to stack required.
1362  int64_t Offset = CCInfo.AllocateStack(4, 4);
1363 
1364  return ArgDescriptor::createStack(Offset);
1365  }
1366 
1367  unsigned Reg = ArgVGPRs[RegIdx];
1368  Reg = CCInfo.AllocateReg(Reg);
1369  assert(Reg != AMDGPU::NoRegister);
1370 
1371  MachineFunction &MF = CCInfo.getMachineFunction();
1372  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1373  return ArgDescriptor::createRegister(Reg);
1374 }
1375 
1377  const TargetRegisterClass *RC,
1378  unsigned NumArgRegs) {
1379  ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1380  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1381  if (RegIdx == ArgSGPRs.size())
1382  report_fatal_error("ran out of SGPRs for arguments");
1383 
1384  unsigned Reg = ArgSGPRs[RegIdx];
1385  Reg = CCInfo.AllocateReg(Reg);
1386  assert(Reg != AMDGPU::NoRegister);
1387 
1388  MachineFunction &MF = CCInfo.getMachineFunction();
1389  MF.addLiveIn(Reg, RC);
1390  return ArgDescriptor::createRegister(Reg);
1391 }
1392 
1394  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1395 }
1396 
1398  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1399 }
1400 
1402  MachineFunction &MF,
1403  const SIRegisterInfo &TRI,
1404  SIMachineFunctionInfo &Info) {
1405  if (Info.hasWorkItemIDX())
1406  Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1407 
1408  if (Info.hasWorkItemIDY())
1409  Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1410 
1411  if (Info.hasWorkItemIDZ())
1412  Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1413 }
1414 
1416  MachineFunction &MF,
1417  const SIRegisterInfo &TRI,
1418  SIMachineFunctionInfo &Info) {
1419  auto &ArgInfo = Info.getArgInfo();
1420 
1421  // TODO: Unify handling with private memory pointers.
1422 
1423  if (Info.hasDispatchPtr())
1424  ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1425 
1426  if (Info.hasQueuePtr())
1427  ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1428 
1429  if (Info.hasKernargSegmentPtr())
1430  ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1431 
1432  if (Info.hasDispatchID())
1433  ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1434 
1435  // flat_scratch_init is not applicable for non-kernel functions.
1436 
1437  if (Info.hasWorkGroupIDX())
1438  ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1439 
1440  if (Info.hasWorkGroupIDY())
1441  ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1442 
1443  if (Info.hasWorkGroupIDZ())
1444  ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1445 
1446  if (Info.hasImplicitArgPtr())
1447  ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1448 }
1449 
1450 // Allocate special inputs passed in user SGPRs.
1451 static void allocateHSAUserSGPRs(CCState &CCInfo,
1452  MachineFunction &MF,
1453  const SIRegisterInfo &TRI,
1454  SIMachineFunctionInfo &Info) {
1455  if (Info.hasImplicitBufferPtr()) {
1456  unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1457  MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1458  CCInfo.AllocateReg(ImplicitBufferPtrReg);
1459  }
1460 
1461  // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1462  if (Info.hasPrivateSegmentBuffer()) {
1463  unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1464  MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1465  CCInfo.AllocateReg(PrivateSegmentBufferReg);
1466  }
1467 
1468  if (Info.hasDispatchPtr()) {
1469  unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1470  MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1471  CCInfo.AllocateReg(DispatchPtrReg);
1472  }
1473 
1474  if (Info.hasQueuePtr()) {
1475  unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1476  MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1477  CCInfo.AllocateReg(QueuePtrReg);
1478  }
1479 
1480  if (Info.hasKernargSegmentPtr()) {
1481  unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1482  MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1483  CCInfo.AllocateReg(InputPtrReg);
1484  }
1485 
1486  if (Info.hasDispatchID()) {
1487  unsigned DispatchIDReg = Info.addDispatchID(TRI);
1488  MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1489  CCInfo.AllocateReg(DispatchIDReg);
1490  }
1491 
1492  if (Info.hasFlatScratchInit()) {
1493  unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1494  MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1495  CCInfo.AllocateReg(FlatScratchInitReg);
1496  }
1497 
1498  // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1499  // these from the dispatch pointer.
1500 }
1501 
1502 // Allocate special input registers that are initialized per-wave.
1503 static void allocateSystemSGPRs(CCState &CCInfo,
1504  MachineFunction &MF,
1505  SIMachineFunctionInfo &Info,
1506  CallingConv::ID CallConv,
1507  bool IsShader) {
1508  if (Info.hasWorkGroupIDX()) {
1509  unsigned Reg = Info.addWorkGroupIDX();
1510  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1511  CCInfo.AllocateReg(Reg);
1512  }
1513 
1514  if (Info.hasWorkGroupIDY()) {
1515  unsigned Reg = Info.addWorkGroupIDY();
1516  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1517  CCInfo.AllocateReg(Reg);
1518  }
1519 
1520  if (Info.hasWorkGroupIDZ()) {
1521  unsigned Reg = Info.addWorkGroupIDZ();
1522  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1523  CCInfo.AllocateReg(Reg);
1524  }
1525 
1526  if (Info.hasWorkGroupInfo()) {
1527  unsigned Reg = Info.addWorkGroupInfo();
1528  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1529  CCInfo.AllocateReg(Reg);
1530  }
1531 
1532  if (Info.hasPrivateSegmentWaveByteOffset()) {
1533  // Scratch wave offset passed in system SGPR.
1534  unsigned PrivateSegmentWaveByteOffsetReg;
1535 
1536  if (IsShader) {
1537  PrivateSegmentWaveByteOffsetReg =
1539 
1540  // This is true if the scratch wave byte offset doesn't have a fixed
1541  // location.
1542  if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1543  PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1544  Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1545  }
1546  } else
1547  PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1548 
1549  MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1550  CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1551  }
1552 }
1553 
1555  MachineFunction &MF,
1556  const SIRegisterInfo &TRI,
1557  SIMachineFunctionInfo &Info) {
1558  // Now that we've figured out where the scratch register inputs are, see if
1559  // should reserve the arguments and use them directly.
1560  MachineFrameInfo &MFI = MF.getFrameInfo();
1561  bool HasStackObjects = MFI.hasStackObjects();
1562 
1563  // Record that we know we have non-spill stack objects so we don't need to
1564  // check all stack objects later.
1565  if (HasStackObjects)
1566  Info.setHasNonSpillStackObjects(true);
1567 
1568  // Everything live out of a block is spilled with fast regalloc, so it's
1569  // almost certain that spilling will be required.
1570  if (TM.getOptLevel() == CodeGenOpt::None)
1571  HasStackObjects = true;
1572 
1573  // For now assume stack access is needed in any callee functions, so we need
1574  // the scratch registers to pass in.
1575  bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1576 
1577  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1578  if (ST.isAmdCodeObjectV2(MF)) {
1579  if (RequiresStackAccess) {
1580  // If we have stack objects, we unquestionably need the private buffer
1581  // resource. For the Code Object V2 ABI, this will be the first 4 user
1582  // SGPR inputs. We can reserve those and use them directly.
1583 
1584  unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1586  Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1587 
1588  if (MFI.hasCalls()) {
1589  // If we have calls, we need to keep the frame register in a register
1590  // that won't be clobbered by a call, so ensure it is copied somewhere.
1591 
1592  // This is not a problem for the scratch wave offset, because the same
1593  // registers are reserved in all functions.
1594 
1595  // FIXME: Nothing is really ensuring this is a call preserved register,
1596  // it's just selected from the end so it happens to be.
1597  unsigned ReservedOffsetReg
1599  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1600  } else {
1601  unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1603  Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1604  }
1605  } else {
1606  unsigned ReservedBufferReg
1608  unsigned ReservedOffsetReg
1610 
1611  // We tentatively reserve the last registers (skipping the last two
1612  // which may contain VCC). After register allocation, we'll replace
1613  // these with the ones immediately after those which were really
1614  // allocated. In the prologue copies will be inserted from the argument
1615  // to these reserved registers.
1616  Info.setScratchRSrcReg(ReservedBufferReg);
1617  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1618  }
1619  } else {
1620  unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1621 
1622  // Without HSA, relocations are used for the scratch pointer and the
1623  // buffer resource setup is always inserted in the prologue. Scratch wave
1624  // offset is still in an input SGPR.
1625  Info.setScratchRSrcReg(ReservedBufferReg);
1626 
1627  if (HasStackObjects && !MFI.hasCalls()) {
1628  unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1630  Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1631  } else {
1632  unsigned ReservedOffsetReg
1634  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1635  }
1636  }
1637 }
1638 
1641  return !Info->isEntryFunction();
1642 }
1643 
1645 
1646 }
1647 
1649  MachineBasicBlock *Entry,
1650  const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1651  const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1652 
1653  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1654  if (!IStart)
1655  return;
1656 
1658  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1659  MachineBasicBlock::iterator MBBI = Entry->begin();
1660  for (const MCPhysReg *I = IStart; *I; ++I) {
1661  const TargetRegisterClass *RC = nullptr;
1662  if (AMDGPU::SReg_64RegClass.contains(*I))
1663  RC = &AMDGPU::SGPR_64RegClass;
1664  else if (AMDGPU::SReg_32RegClass.contains(*I))
1665  RC = &AMDGPU::SGPR_32RegClass;
1666  else
1667  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1668 
1669  unsigned NewVR = MRI->createVirtualRegister(RC);
1670  // Create copy from CSR to a virtual register.
1671  Entry->addLiveIn(*I);
1672  BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1673  .addReg(*I);
1674 
1675  // Insert the copy-back instructions right before the terminator.
1676  for (auto *Exit : Exits)
1677  BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1678  TII->get(TargetOpcode::COPY), *I)
1679  .addReg(NewVR);
1680  }
1681 }
1682 
1684  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1685  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1686  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1687  const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1688 
1689  MachineFunction &MF = DAG.getMachineFunction();
1690  FunctionType *FType = MF.getFunction().getFunctionType();
1692  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1693 
1694  if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1695  const Function &Fn = MF.getFunction();
1696  DiagnosticInfoUnsupported NoGraphicsHSA(
1697  Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1698  DAG.getContext()->diagnose(NoGraphicsHSA);
1699  return DAG.getEntryNode();
1700  }
1701 
1702  // Create stack objects that are used for emitting debugger prologue if
1703  // "amdgpu-debugger-emit-prologue" attribute was specified.
1704  if (ST.debuggerEmitPrologue())
1705  createDebuggerPrologueStackObjects(MF);
1706 
1709  BitVector Skipped(Ins.size());
1710  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1711  *DAG.getContext());
1712 
1713  bool IsShader = AMDGPU::isShader(CallConv);
1714  bool IsKernel = AMDGPU::isKernel(CallConv);
1715  bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1716 
1717  if (!IsEntryFunc) {
1718  // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1719  // this when allocating argument fixed offsets.
1720  CCInfo.AllocateStack(4, 4);
1721  }
1722 
1723  if (IsShader) {
1724  processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1725 
1726  // At least one interpolation mode must be enabled or else the GPU will
1727  // hang.
1728  //
1729  // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1730  // set PSInputAddr, the user wants to enable some bits after the compilation
1731  // based on run-time states. Since we can't know what the final PSInputEna
1732  // will look like, so we shouldn't do anything here and the user should take
1733  // responsibility for the correct programming.
1734  //
1735  // Otherwise, the following restrictions apply:
1736  // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1737  // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1738  // enabled too.
1739  if (CallConv == CallingConv::AMDGPU_PS) {
1740  if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1741  ((Info->getPSInputAddr() & 0xF) == 0 &&
1742  Info->isPSInputAllocated(11))) {
1743  CCInfo.AllocateReg(AMDGPU::VGPR0);
1744  CCInfo.AllocateReg(AMDGPU::VGPR1);
1745  Info->markPSInputAllocated(0);
1746  Info->markPSInputEnabled(0);
1747  }
1748  if (Subtarget->isAmdPalOS()) {
1749  // For isAmdPalOS, the user does not enable some bits after compilation
1750  // based on run-time states; the register values being generated here are
1751  // the final ones set in hardware. Therefore we need to apply the
1752  // workaround to PSInputAddr and PSInputEnable together. (The case where
1753  // a bit is set in PSInputAddr but not PSInputEnable is where the
1754  // frontend set up an input arg for a particular interpolation mode, but
1755  // nothing uses that input arg. Really we should have an earlier pass
1756  // that removes such an arg.)
1757  unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1758  if ((PsInputBits & 0x7F) == 0 ||
1759  ((PsInputBits & 0xF) == 0 &&
1760  (PsInputBits >> 11 & 1)))
1761  Info->markPSInputEnabled(
1763  }
1764  }
1765 
1766  assert(!Info->hasDispatchPtr() &&
1767  !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1768  !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1769  !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1770  !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1771  !Info->hasWorkItemIDZ());
1772  } else if (IsKernel) {
1773  assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1774  } else {
1775  Splits.append(Ins.begin(), Ins.end());
1776  }
1777 
1778  if (IsEntryFunc) {
1779  allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1780  allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1781  }
1782 
1783  if (IsKernel) {
1784  analyzeFormalArgumentsCompute(CCInfo, Ins);
1785  } else {
1786  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1787  CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1788  }
1789 
1790  SmallVector<SDValue, 16> Chains;
1791 
1792  for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1793  const ISD::InputArg &Arg = Ins[i];
1794  if (Skipped[i]) {
1795  InVals.push_back(DAG.getUNDEF(Arg.VT));
1796  continue;
1797  }
1798 
1799  CCValAssign &VA = ArgLocs[ArgIdx++];
1800  MVT VT = VA.getLocVT();
1801 
1802  if (IsEntryFunc && VA.isMemLoc()) {
1803  VT = Ins[i].VT;
1804  EVT MemVT = VA.getLocVT();
1805 
1806  const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1807  VA.getLocMemOffset();
1808  Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1809 
1810  // The first 36 bytes of the input buffer contains information about
1811  // thread group and global sizes.
1812  SDValue Arg = lowerKernargMemParameter(
1813  DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
1814  Chains.push_back(Arg.getValue(1));
1815 
1816  auto *ParamTy =
1817  dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1819  ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1820  // On SI local pointers are just offsets into LDS, so they are always
1821  // less than 16-bits. On CI and newer they could potentially be
1822  // real pointers, so we can't guarantee their size.
1823  Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1824  DAG.getValueType(MVT::i16));
1825  }
1826 
1827  InVals.push_back(Arg);
1828  continue;
1829  } else if (!IsEntryFunc && VA.isMemLoc()) {
1830  SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1831  InVals.push_back(Val);
1832  if (!Arg.Flags.isByVal())
1833  Chains.push_back(Val.getValue(1));
1834  continue;
1835  }
1836 
1837  assert(VA.isRegLoc() && "Parameter must be in a register!");
1838 
1839  unsigned Reg = VA.getLocReg();
1840  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1841  EVT ValVT = VA.getValVT();
1842 
1843  Reg = MF.addLiveIn(Reg, RC);
1844  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1845 
1846  if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1847  // The return object should be reasonably addressable.
1848 
1849  // FIXME: This helps when the return is a real sret. If it is a
1850  // automatically inserted sret (i.e. CanLowerReturn returns false), an
1851  // extra copy is inserted in SelectionDAGBuilder which obscures this.
1852  unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1853  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1854  DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1855  }
1856 
1857  // If this is an 8 or 16-bit value, it is really passed promoted
1858  // to 32 bits. Insert an assert[sz]ext to capture this, then
1859  // truncate to the right size.
1860  switch (VA.getLocInfo()) {
1861  case CCValAssign::Full:
1862  break;
1863  case CCValAssign::BCvt:
1864  Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1865  break;
1866  case CCValAssign::SExt:
1867  Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1868  DAG.getValueType(ValVT));
1869  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1870  break;
1871  case CCValAssign::ZExt:
1872  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1873  DAG.getValueType(ValVT));
1874  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1875  break;
1876  case CCValAssign::AExt:
1877  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1878  break;
1879  default:
1880  llvm_unreachable("Unknown loc info!");
1881  }
1882 
1883  if (IsShader && Arg.VT.isVector()) {
1884  // Build a vector from the registers
1885  Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1886  unsigned NumElements = ParamType->getVectorNumElements();
1887 
1889  Regs.push_back(Val);
1890  for (unsigned j = 1; j != NumElements; ++j) {
1891  Reg = ArgLocs[ArgIdx++].getLocReg();
1892  Reg = MF.addLiveIn(Reg, RC);
1893 
1894  SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1895  Regs.push_back(Copy);
1896  }
1897 
1898  // Fill up the missing vector elements
1899  NumElements = Arg.VT.getVectorNumElements() - NumElements;
1900  Regs.append(NumElements, DAG.getUNDEF(VT));
1901 
1902  InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
1903  continue;
1904  }
1905 
1906  InVals.push_back(Val);
1907  }
1908 
1909  if (!IsEntryFunc) {
1910  // Special inputs come after user arguments.
1911  allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1912  }
1913 
1914  // Start adding system SGPRs.
1915  if (IsEntryFunc) {
1916  allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
1917  } else {
1918  CCInfo.AllocateReg(Info->getScratchRSrcReg());
1919  CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1920  CCInfo.AllocateReg(Info->getFrameOffsetReg());
1921  allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
1922  }
1923 
1924  auto &ArgUsageInfo =
1926  ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
1927 
1928  unsigned StackArgSize = CCInfo.getNextStackOffset();
1929  Info->setBytesInStackArgArea(StackArgSize);
1930 
1931  return Chains.empty() ? Chain :
1932  DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1933 }
1934 
1935 // TODO: If return values can't fit in registers, we should return as many as
1936 // possible in registers before passing on stack.
1938  CallingConv::ID CallConv,
1939  MachineFunction &MF, bool IsVarArg,
1940  const SmallVectorImpl<ISD::OutputArg> &Outs,
1941  LLVMContext &Context) const {
1942  // Replacing returns with sret/stack usage doesn't make sense for shaders.
1943  // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1944  // for shaders. Vector types should be explicitly handled by CC.
1945  if (AMDGPU::isEntryFunctionCC(CallConv))
1946  return true;
1947 
1949  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1950  return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1951 }
1952 
1953 SDValue
1955  bool isVarArg,
1956  const SmallVectorImpl<ISD::OutputArg> &Outs,
1957  const SmallVectorImpl<SDValue> &OutVals,
1958  const SDLoc &DL, SelectionDAG &DAG) const {
1959  MachineFunction &MF = DAG.getMachineFunction();
1961 
1962  if (AMDGPU::isKernel(CallConv)) {
1963  return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1964  OutVals, DL, DAG);
1965  }
1966 
1967  bool IsShader = AMDGPU::isShader(CallConv);
1968 
1969  Info->setIfReturnsVoid(Outs.size() == 0);
1970  bool IsWaveEnd = Info->returnsVoid() && IsShader;
1971 
1973  SmallVector<SDValue, 48> SplitVals;
1974 
1975  // Split vectors into their elements.
1976  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1977  const ISD::OutputArg &Out = Outs[i];
1978 
1979  if (IsShader && Out.VT.isVector()) {
1980  MVT VT = Out.VT.getVectorElementType();
1981  ISD::OutputArg NewOut = Out;
1982  NewOut.Flags.setSplit();
1983  NewOut.VT = VT;
1984 
1985  // We want the original number of vector elements here, e.g.
1986  // three or five, not four or eight.
1987  unsigned NumElements = Out.ArgVT.getVectorNumElements();
1988 
1989  for (unsigned j = 0; j != NumElements; ++j) {
1990  SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1991  DAG.getConstant(j, DL, MVT::i32));
1992  SplitVals.push_back(Elem);
1993  Splits.push_back(NewOut);
1994  NewOut.PartOffset += NewOut.VT.getStoreSize();
1995  }
1996  } else {
1997  SplitVals.push_back(OutVals[i]);
1998  Splits.push_back(Out);
1999  }
2000  }
2001 
2002  // CCValAssign - represent the assignment of the return value to a location.
2004 
2005  // CCState - Info about the registers and stack slots.
2006  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2007  *DAG.getContext());
2008 
2009  // Analyze outgoing return values.
2010  CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
2011 
2012  SDValue Flag;
2013  SmallVector<SDValue, 48> RetOps;
2014  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2015 
2016  // Add return address for callable functions.
2017  if (!Info->isEntryFunction()) {
2018  const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2019  SDValue ReturnAddrReg = CreateLiveInRegister(
2020  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2021 
2022  // FIXME: Should be able to use a vreg here, but need a way to prevent it
2023  // from being allcoated to a CSR.
2024 
2025  SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2026  MVT::i64);
2027 
2028  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2029  Flag = Chain.getValue(1);
2030 
2031  RetOps.push_back(PhysReturnAddrReg);
2032  }
2033 
2034  // Copy the result values into the output registers.
2035  for (unsigned i = 0, realRVLocIdx = 0;
2036  i != RVLocs.size();
2037  ++i, ++realRVLocIdx) {
2038  CCValAssign &VA = RVLocs[i];
2039  assert(VA.isRegLoc() && "Can only return in registers!");
2040  // TODO: Partially return in registers if return values don't fit.
2041 
2042  SDValue Arg = SplitVals[realRVLocIdx];
2043 
2044  // Copied from other backends.
2045  switch (VA.getLocInfo()) {
2046  case CCValAssign::Full:
2047  break;
2048  case CCValAssign::BCvt:
2049  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2050  break;
2051  case CCValAssign::SExt:
2052  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2053  break;
2054  case CCValAssign::ZExt:
2055  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2056  break;
2057  case CCValAssign::AExt:
2058  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2059  break;
2060  default:
2061  llvm_unreachable("Unknown loc info!");
2062  }
2063 
2064  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2065  Flag = Chain.getValue(1);
2066  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2067  }
2068 
2069  // FIXME: Does sret work properly?
2070  if (!Info->isEntryFunction()) {
2071  const SIRegisterInfo *TRI
2072  = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
2073  const MCPhysReg *I =
2075  if (I) {
2076  for (; *I; ++I) {
2077  if (AMDGPU::SReg_64RegClass.contains(*I))
2078  RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2079  else if (AMDGPU::SReg_32RegClass.contains(*I))
2080  RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2081  else
2082  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2083  }
2084  }
2085  }
2086 
2087  // Update chain and glue.
2088  RetOps[0] = Chain;
2089  if (Flag.getNode())
2090  RetOps.push_back(Flag);
2091 
2092  unsigned Opc = AMDGPUISD::ENDPGM;
2093  if (!IsWaveEnd)
2095  return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2096 }
2097 
2099  SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2100  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2101  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2102  SDValue ThisVal) const {
2103  CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2104 
2105  // Assign locations to each value returned by this call.
2107  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2108  *DAG.getContext());
2109  CCInfo.AnalyzeCallResult(Ins, RetCC);
2110 
2111  // Copy all of the result registers out of their specified physreg.
2112  for (unsigned i = 0; i != RVLocs.size(); ++i) {
2113  CCValAssign VA = RVLocs[i];
2114  SDValue Val;
2115 
2116  if (VA.isRegLoc()) {
2117  Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2118  Chain = Val.getValue(1);
2119  InFlag = Val.getValue(2);
2120  } else if (VA.isMemLoc()) {
2121  report_fatal_error("TODO: return values in memory");
2122  } else
2123  llvm_unreachable("unknown argument location type");
2124 
2125  switch (VA.getLocInfo()) {
2126  case CCValAssign::Full:
2127  break;
2128  case CCValAssign::BCvt:
2129  Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2130  break;
2131  case CCValAssign::ZExt:
2132  Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2133  DAG.getValueType(VA.getValVT()));
2134  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2135  break;
2136  case CCValAssign::SExt:
2137  Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2138  DAG.getValueType(VA.getValVT()));
2139  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2140  break;
2141  case CCValAssign::AExt:
2142  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2143  break;
2144  default:
2145  llvm_unreachable("Unknown loc info!");
2146  }
2147 
2148  InVals.push_back(Val);
2149  }
2150 
2151  return Chain;
2152 }
2153 
2154 // Add code to pass special inputs required depending on used features separate
2155 // from the explicit user arguments present in the IR.
2157  CallLoweringInfo &CLI,
2158  const SIMachineFunctionInfo &Info,
2159  SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2160  SmallVectorImpl<SDValue> &MemOpChains,
2161  SDValue Chain,
2162  SDValue StackPtr) const {
2163  // If we don't have a call site, this was a call inserted by
2164  // legalization. These can never use special inputs.
2165  if (!CLI.CS)
2166  return;
2167 
2168  const Function *CalleeFunc = CLI.CS.getCalledFunction();
2169  assert(CalleeFunc);
2170 
2171  SelectionDAG &DAG = CLI.DAG;
2172  const SDLoc &DL = CLI.DL;
2173 
2174  const SISubtarget *ST = getSubtarget();
2175  const SIRegisterInfo *TRI = ST->getRegisterInfo();
2176 
2177  auto &ArgUsageInfo =
2179  const AMDGPUFunctionArgInfo &CalleeArgInfo
2180  = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2181 
2182  const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2183 
2184  // TODO: Unify with private memory register handling. This is complicated by
2185  // the fact that at least in kernels, the input argument is not necessarily
2186  // in the same location as the input.
2199  };
2200 
2201  for (auto InputID : InputRegs) {
2202  const ArgDescriptor *OutgoingArg;
2203  const TargetRegisterClass *ArgRC;
2204 
2205  std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2206  if (!OutgoingArg)
2207  continue;
2208 
2209  const ArgDescriptor *IncomingArg;
2210  const TargetRegisterClass *IncomingArgRC;
2211  std::tie(IncomingArg, IncomingArgRC)
2212  = CallerArgInfo.getPreloadedValue(InputID);
2213  assert(IncomingArgRC == ArgRC);
2214 
2215  // All special arguments are ints for now.
2216  EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2217  SDValue InputReg;
2218 
2219  if (IncomingArg) {
2220  InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2221  } else {
2222  // The implicit arg ptr is special because it doesn't have a corresponding
2223  // input for kernels, and is computed from the kernarg segment pointer.
2224  assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2225  InputReg = getImplicitArgPtr(DAG, DL);
2226  }
2227 
2228  if (OutgoingArg->isRegister()) {
2229  RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2230  } else {
2231  SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2232  InputReg,
2233  OutgoingArg->getStackOffset());
2234  MemOpChains.push_back(ArgStore);
2235  }
2236  }
2237 }
2238 
2240  return CC == CallingConv::Fast;
2241 }
2242 
2243 /// Return true if we might ever do TCO for calls with this calling convention.
2245  switch (CC) {
2246  case CallingConv::C:
2247  return true;
2248  default:
2249  return canGuaranteeTCO(CC);
2250  }
2251 }
2252 
2254  SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2255  const SmallVectorImpl<ISD::OutputArg> &Outs,
2256  const SmallVectorImpl<SDValue> &OutVals,
2257  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2258  if (!mayTailCallThisCC(CalleeCC))
2259  return false;
2260 
2261  MachineFunction &MF = DAG.getMachineFunction();
2262  const Function &CallerF = MF.getFunction();
2263  CallingConv::ID CallerCC = CallerF.getCallingConv();
2264  const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2265  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2266 
2267  // Kernels aren't callable, and don't have a live in return address so it
2268  // doesn't make sense to do a tail call with entry functions.
2269  if (!CallerPreserved)
2270  return false;
2271 
2272  bool CCMatch = CallerCC == CalleeCC;
2273 
2275  if (canGuaranteeTCO(CalleeCC) && CCMatch)
2276  return true;
2277  return false;
2278  }
2279 
2280  // TODO: Can we handle var args?
2281  if (IsVarArg)
2282  return false;
2283 
2284  for (const Argument &Arg : CallerF.args()) {
2285  if (Arg.hasByValAttr())
2286  return false;
2287  }
2288 
2289  LLVMContext &Ctx = *DAG.getContext();
2290 
2291  // Check that the call results are passed in the same way.
2292  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2293  CCAssignFnForCall(CalleeCC, IsVarArg),
2294  CCAssignFnForCall(CallerCC, IsVarArg)))
2295  return false;
2296 
2297  // The callee has to preserve all registers the caller needs to preserve.
2298  if (!CCMatch) {
2299  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2300  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2301  return false;
2302  }
2303 
2304  // Nothing more to check if the callee is taking no arguments.
2305  if (Outs.empty())
2306  return true;
2307 
2309  CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2310 
2311  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2312 
2313  const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2314  // If the stack arguments for this call do not fit into our own save area then
2315  // the call cannot be made tail.
2316  // TODO: Is this really necessary?
2317  if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2318  return false;
2319 
2320  const MachineRegisterInfo &MRI = MF.getRegInfo();
2321  return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2322 }
2323 
2325  if (!CI->isTailCall())
2326  return false;
2327 
2328  const Function *ParentFn = CI->getParent()->getParent();
2329  if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2330  return false;
2331 
2332  auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2333  return (Attr.getValueAsString() != "true");
2334 }
2335 
2336 // The wave scratch offset register is used as the global base pointer.
2338  SmallVectorImpl<SDValue> &InVals) const {
2339  SelectionDAG &DAG = CLI.DAG;
2340  const SDLoc &DL = CLI.DL;
2342  SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2344  SDValue Chain = CLI.Chain;
2345  SDValue Callee = CLI.Callee;
2346  bool &IsTailCall = CLI.IsTailCall;
2347  CallingConv::ID CallConv = CLI.CallConv;
2348  bool IsVarArg = CLI.IsVarArg;
2349  bool IsSibCall = false;
2350  bool IsThisReturn = false;
2351  MachineFunction &MF = DAG.getMachineFunction();
2352 
2353  if (IsVarArg) {
2354  return lowerUnhandledCall(CLI, InVals,
2355  "unsupported call to variadic function ");
2356  }
2357 
2358  if (!CLI.CS.getCalledFunction()) {
2359  return lowerUnhandledCall(CLI, InVals,
2360  "unsupported indirect call to function ");
2361  }
2362 
2363  if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2364  return lowerUnhandledCall(CLI, InVals,
2365  "unsupported required tail call to function ");
2366  }
2367 
2368  // The first 4 bytes are reserved for the callee's emergency stack slot.
2369  const unsigned CalleeUsableStackOffset = 4;
2370 
2371  if (IsTailCall) {
2372  IsTailCall = isEligibleForTailCallOptimization(
2373  Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2374  if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2375  report_fatal_error("failed to perform tail call elimination on a call "
2376  "site marked musttail");
2377  }
2378 
2379  bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2380 
2381  // A sibling call is one where we're under the usual C ABI and not planning
2382  // to change that but can still do a tail call:
2383  if (!TailCallOpt && IsTailCall)
2384  IsSibCall = true;
2385 
2386  if (IsTailCall)
2387  ++NumTailCalls;
2388  }
2389 
2390  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
2391  // FIXME: Remove this hack for function pointer types after removing
2392  // support of old address space mapping. In the new address space
2393  // mapping the pointer in default address space is 64 bit, therefore
2394  // does not need this hack.
2395  if (Callee.getValueType() == MVT::i32) {
2396  const GlobalValue *GV = GA->getGlobal();
2397  Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2398  GA->getTargetFlags());
2399  }
2400  }
2401  assert(Callee.getValueType() == MVT::i64);
2402 
2404 
2405  // Analyze operands of the call, assigning locations to each operand.
2407  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2408  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2409  CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2410 
2411  // Get a count of how many bytes are to be pushed on the stack.
2412  unsigned NumBytes = CCInfo.getNextStackOffset();
2413 
2414  if (IsSibCall) {
2415  // Since we're not changing the ABI to make this a tail call, the memory
2416  // operands are already available in the caller's incoming argument space.
2417  NumBytes = 0;
2418  }
2419 
2420  // FPDiff is the byte offset of the call's argument area from the callee's.
2421  // Stores to callee stack arguments will be placed in FixedStackSlots offset
2422  // by this amount for a tail call. In a sibling call it must be 0 because the
2423  // caller will deallocate the entire stack and the callee still expects its
2424  // arguments to begin at SP+0. Completely unused for non-tail calls.
2425  int32_t FPDiff = 0;
2426  MachineFrameInfo &MFI = MF.getFrameInfo();
2428 
2429  SDValue CallerSavedFP;
2430 
2431  // Adjust the stack pointer for the new arguments...
2432  // These operations are automatically eliminated by the prolog/epilog pass
2433  if (!IsSibCall) {
2434  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2435 
2436  unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2437 
2438  // In the HSA case, this should be an identity copy.
2439  SDValue ScratchRSrcReg
2440  = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2441  RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2442 
2443  // TODO: Don't hardcode these registers and get from the callee function.
2444  SDValue ScratchWaveOffsetReg
2445  = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2446  RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2447 
2448  if (!Info->isEntryFunction()) {
2449  // Avoid clobbering this function's FP value. In the current convention
2450  // callee will overwrite this, so do save/restore around the call site.
2451  CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2452  Info->getFrameOffsetReg(), MVT::i32);
2453  }
2454  }
2455 
2456  // Stack pointer relative accesses are done by changing the offset SGPR. This
2457  // is just the VGPR offset component.
2458  SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
2459 
2460  SmallVector<SDValue, 8> MemOpChains;
2461  MVT PtrVT = MVT::i32;
2462 
2463  // Walk the register/memloc assignments, inserting copies/loads.
2464  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2465  ++i, ++realArgIdx) {
2466  CCValAssign &VA = ArgLocs[i];
2467  SDValue Arg = OutVals[realArgIdx];
2468 
2469  // Promote the value if needed.
2470  switch (VA.getLocInfo()) {
2471  case CCValAssign::Full:
2472  break;
2473  case CCValAssign::BCvt:
2474  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2475  break;
2476  case CCValAssign::ZExt:
2477  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2478  break;
2479  case CCValAssign::SExt:
2480  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2481  break;
2482  case CCValAssign::AExt:
2483  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2484  break;
2485  case CCValAssign::FPExt:
2486  Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2487  break;
2488  default:
2489  llvm_unreachable("Unknown loc info!");
2490  }
2491 
2492  if (VA.isRegLoc()) {
2493  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2494  } else {
2495  assert(VA.isMemLoc());
2496 
2497  SDValue DstAddr;
2498  MachinePointerInfo DstInfo;
2499 
2500  unsigned LocMemOffset = VA.getLocMemOffset();
2501  int32_t Offset = LocMemOffset;
2502 
2503  SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
2504 
2505  if (IsTailCall) {
2506  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2507  unsigned OpSize = Flags.isByVal() ?
2508  Flags.getByValSize() : VA.getValVT().getStoreSize();
2509 
2510  Offset = Offset + FPDiff;
2511  int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2512 
2513  DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2514  StackPtr);
2515  DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2516 
2517  // Make sure any stack arguments overlapping with where we're storing
2518  // are loaded before this eventual operation. Otherwise they'll be
2519  // clobbered.
2520 
2521  // FIXME: Why is this really necessary? This seems to just result in a
2522  // lot of code to copy the stack and write them back to the same
2523  // locations, which are supposed to be immutable?
2524  Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2525  } else {
2526  DstAddr = PtrOff;
2527  DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2528  }
2529 
2530  if (Outs[i].Flags.isByVal()) {
2531  SDValue SizeNode =
2532  DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2533  SDValue Cpy = DAG.getMemcpy(
2534  Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2535  /*isVol = */ false, /*AlwaysInline = */ true,
2536  /*isTailCall = */ false, DstInfo,
2538  *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
2539 
2540  MemOpChains.push_back(Cpy);
2541  } else {
2542  SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2543  MemOpChains.push_back(Store);
2544  }
2545  }
2546  }
2547 
2548  // Copy special input registers after user input arguments.
2549  passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2550 
2551  if (!MemOpChains.empty())
2552  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2553 
2554  // Build a sequence of copy-to-reg nodes chained together with token chain
2555  // and flag operands which copy the outgoing args into the appropriate regs.
2556  SDValue InFlag;
2557  for (auto &RegToPass : RegsToPass) {
2558  Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2559  RegToPass.second, InFlag);
2560  InFlag = Chain.getValue(1);
2561  }
2562 
2563 
2564  SDValue PhysReturnAddrReg;
2565  if (IsTailCall) {
2566  // Since the return is being combined with the call, we need to pass on the
2567  // return address.
2568 
2569  const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2570  SDValue ReturnAddrReg = CreateLiveInRegister(
2571  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2572 
2573  PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2574  MVT::i64);
2575  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2576  InFlag = Chain.getValue(1);
2577  }
2578 
2579  // We don't usually want to end the call-sequence here because we would tidy
2580  // the frame up *after* the call, however in the ABI-changing tail-call case
2581  // we've carefully laid out the parameters so that when sp is reset they'll be
2582  // in the correct location.
2583  if (IsTailCall && !IsSibCall) {
2584  Chain = DAG.getCALLSEQ_END(Chain,
2585  DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2586  DAG.getTargetConstant(0, DL, MVT::i32),
2587  InFlag, DL);
2588  InFlag = Chain.getValue(1);
2589  }
2590 
2591  std::vector<SDValue> Ops;
2592  Ops.push_back(Chain);
2593  Ops.push_back(Callee);
2594 
2595  if (IsTailCall) {
2596  // Each tail call may have to adjust the stack by a different amount, so
2597  // this information must travel along with the operation for eventual
2598  // consumption by emitEpilogue.
2599  Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2600 
2601  Ops.push_back(PhysReturnAddrReg);
2602  }
2603 
2604  // Add argument registers to the end of the list so that they are known live
2605  // into the call.
2606  for (auto &RegToPass : RegsToPass) {
2607  Ops.push_back(DAG.getRegister(RegToPass.first,
2608  RegToPass.second.getValueType()));
2609  }
2610 
2611  // Add a register mask operand representing the call-preserved registers.
2612 
2614  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2615  assert(Mask && "Missing call preserved mask for calling convention");
2616  Ops.push_back(DAG.getRegisterMask(Mask));
2617 
2618  if (InFlag.getNode())
2619  Ops.push_back(InFlag);
2620 
2621  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2622 
2623  // If we're doing a tall call, use a TC_RETURN here rather than an
2624  // actual call instruction.
2625  if (IsTailCall) {
2626  MFI.setHasTailCall();
2627  return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2628  }
2629 
2630  // Returns a chain and a flag for retval copy to use.
2631  SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2632  Chain = Call.getValue(0);
2633  InFlag = Call.getValue(1);
2634 
2635  if (CallerSavedFP) {
2636  SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2637  Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2638  InFlag = Chain.getValue(1);
2639  }
2640 
2641  uint64_t CalleePopBytes = NumBytes;
2642  Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2643  DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2644  InFlag, DL);
2645  if (!Ins.empty())
2646  InFlag = Chain.getValue(1);
2647 
2648  // Handle result values, copying them out of physregs into vregs that we
2649  // return.
2650  return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2651  InVals, IsThisReturn,
2652  IsThisReturn ? OutVals[0] : SDValue());
2653 }
2654 
2655 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2656  SelectionDAG &DAG) const {
2657  unsigned Reg = StringSwitch<unsigned>(RegName)
2658  .Case("m0", AMDGPU::M0)
2659  .Case("exec", AMDGPU::EXEC)
2660  .Case("exec_lo", AMDGPU::EXEC_LO)
2661  .Case("exec_hi", AMDGPU::EXEC_HI)
2662  .Case("flat_scratch", AMDGPU::FLAT_SCR)
2663  .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2664  .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2665  .Default(AMDGPU::NoRegister);
2666 
2667  if (Reg == AMDGPU::NoRegister) {
2668  report_fatal_error(Twine("invalid register name \""
2669  + StringRef(RegName) + "\"."));
2670 
2671  }
2672 
2674  Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2675  report_fatal_error(Twine("invalid register \""
2676  + StringRef(RegName) + "\" for subtarget."));
2677  }
2678 
2679  switch (Reg) {
2680  case AMDGPU::M0:
2681  case AMDGPU::EXEC_LO:
2682  case AMDGPU::EXEC_HI:
2683  case AMDGPU::FLAT_SCR_LO:
2684  case AMDGPU::FLAT_SCR_HI:
2685  if (VT.getSizeInBits() == 32)
2686  return Reg;
2687  break;
2688  case AMDGPU::EXEC:
2689  case AMDGPU::FLAT_SCR:
2690  if (VT.getSizeInBits() == 64)
2691  return Reg;
2692  break;
2693  default:
2694  llvm_unreachable("missing register type checking");
2695  }
2696 
2697  report_fatal_error(Twine("invalid type for register \""
2698  + StringRef(RegName) + "\"."));
2699 }
2700 
2701 // If kill is not the last instruction, split the block so kill is always a
2702 // proper terminator.
2704  MachineBasicBlock *BB) const {
2705  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2706 
2707  MachineBasicBlock::iterator SplitPoint(&MI);
2708  ++SplitPoint;
2709 
2710  if (SplitPoint == BB->end()) {
2711  // Don't bother with a new block.
2713  return BB;
2714  }
2715 
2716  MachineFunction *MF = BB->getParent();
2717  MachineBasicBlock *SplitBB
2719 
2720  MF->insert(++MachineFunction::iterator(BB), SplitBB);
2721  SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2722 
2723  SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2724  BB->addSuccessor(SplitBB);
2725 
2727  return SplitBB;
2728 }
2729 
2730 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2731 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2732 // will only do one iteration. In the worst case, this will loop 64 times.
2733 //
2734 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2736  const SIInstrInfo *TII,
2738  MachineBasicBlock &OrigBB,
2739  MachineBasicBlock &LoopBB,
2740  const DebugLoc &DL,
2741  const MachineOperand &IdxReg,
2742  unsigned InitReg,
2743  unsigned ResultReg,
2744  unsigned PhiReg,
2745  unsigned InitSaveExecReg,
2746  int Offset,
2747  bool UseGPRIdxMode,
2748  bool IsIndirectSrc) {
2749  MachineBasicBlock::iterator I = LoopBB.begin();
2750 
2751  unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2752  unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2753  unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2754  unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2755 
2756  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2757  .addReg(InitReg)
2758  .addMBB(&OrigBB)
2759  .addReg(ResultReg)
2760  .addMBB(&LoopBB);
2761 
2762  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2763  .addReg(InitSaveExecReg)
2764  .addMBB(&OrigBB)
2765  .addReg(NewExec)
2766  .addMBB(&LoopBB);
2767 
2768  // Read the next variant <- also loop target.
2769  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2770  .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2771 
2772  // Compare the just read M0 value to all possible Idx values.
2773  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2774  .addReg(CurrentIdxReg)
2775  .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2776 
2777  // Update EXEC, save the original EXEC value to VCC.
2778  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2779  .addReg(CondReg, RegState::Kill);
2780 
2781  MRI.setSimpleHint(NewExec, CondReg);
2782 
2783  if (UseGPRIdxMode) {
2784  unsigned IdxReg;
2785  if (Offset == 0) {
2786  IdxReg = CurrentIdxReg;
2787  } else {
2788  IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2789  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2790  .addReg(CurrentIdxReg, RegState::Kill)
2791  .addImm(Offset);
2792  }
2793  unsigned IdxMode = IsIndirectSrc ?
2795  MachineInstr *SetOn =
2796  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2797  .addReg(IdxReg, RegState::Kill)
2798  .addImm(IdxMode);
2799  SetOn->getOperand(3).setIsUndef();
2800  } else {
2801  // Move index from VCC into M0
2802  if (Offset == 0) {
2803  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2804  .addReg(CurrentIdxReg, RegState::Kill);
2805  } else {
2806  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2807  .addReg(CurrentIdxReg, RegState::Kill)
2808  .addImm(Offset);
2809  }
2810  }
2811 
2812  // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2813  MachineInstr *InsertPt =
2814  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
2815  .addReg(AMDGPU::EXEC)
2816  .addReg(NewExec);
2817 
2818  // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2819  // s_cbranch_scc0?
2820 
2821  // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2822  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2823  .addMBB(&LoopBB);
2824 
2825  return InsertPt->getIterator();
2826 }
2827 
2828 // This has slightly sub-optimal regalloc when the source vector is killed by
2829 // the read. The register allocator does not understand that the kill is
2830 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
2831 // subregister from it, using 1 more VGPR than necessary. This was saved when
2832 // this was expanded after register allocation.
2834  MachineBasicBlock &MBB,
2835  MachineInstr &MI,
2836  unsigned InitResultReg,
2837  unsigned PhiReg,
2838  int Offset,
2839  bool UseGPRIdxMode,
2840  bool IsIndirectSrc) {
2841  MachineFunction *MF = MBB.getParent();
2843  const DebugLoc &DL = MI.getDebugLoc();
2845 
2846  unsigned DstReg = MI.getOperand(0).getReg();
2847  unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2848  unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2849 
2850  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2851 
2852  // Save the EXEC mask
2853  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2854  .addReg(AMDGPU::EXEC);
2855 
2856  // To insert the loop we need to split the block. Move everything after this
2857  // point to a new block, and insert a new empty block between the two.
2859  MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2860  MachineFunction::iterator MBBI(MBB);
2861  ++MBBI;
2862 
2863  MF->insert(MBBI, LoopBB);
2864  MF->insert(MBBI, RemainderBB);
2865 
2866  LoopBB->addSuccessor(LoopBB);
2867  LoopBB->addSuccessor(RemainderBB);
2868 
2869  // Move the rest of the block into a new block.
2870  RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
2871  RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2872 
2873  MBB.addSuccessor(LoopBB);
2874 
2875  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2876 
2877  auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2878  InitResultReg, DstReg, PhiReg, TmpExec,
2879  Offset, UseGPRIdxMode, IsIndirectSrc);
2880 
2881  MachineBasicBlock::iterator First = RemainderBB->begin();
2882  BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2883  .addReg(SaveExec);
2884 
2885  return InsPt;
2886 }
2887 
2888 // Returns subreg index, offset
2889 static std::pair<unsigned, int>
2891  const TargetRegisterClass *SuperRC,
2892  unsigned VecReg,
2893  int Offset) {
2894  int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
2895 
2896  // Skip out of bounds offsets, or else we would end up using an undefined
2897  // register.
2898  if (Offset >= NumElts || Offset < 0)
2899  return std::make_pair(AMDGPU::sub0, Offset);
2900 
2901  return std::make_pair(AMDGPU::sub0 + Offset, 0);
2902 }
2903 
2904 // Return true if the index is an SGPR and was set.
2907  MachineInstr &MI,
2908  int Offset,
2909  bool UseGPRIdxMode,
2910  bool IsIndirectSrc) {
2911  MachineBasicBlock *MBB = MI.getParent();
2912  const DebugLoc &DL = MI.getDebugLoc();
2914 
2915  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2916  const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2917 
2918  assert(Idx->getReg() != AMDGPU::NoRegister);
2919 
2920  if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2921  return false;
2922 
2923  if (UseGPRIdxMode) {
2924  unsigned IdxMode = IsIndirectSrc ?
2926  if (Offset == 0) {
2927  MachineInstr *SetOn =
2928  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2929  .add(*Idx)
2930  .addImm(IdxMode);
2931 
2932  SetOn->getOperand(3).setIsUndef();
2933  } else {
2934  unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2935  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
2936  .add(*Idx)
2937  .addImm(Offset);
2938  MachineInstr *SetOn =
2939  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2940  .addReg(Tmp, RegState::Kill)
2941  .addImm(IdxMode);
2942 
2943  SetOn->getOperand(3).setIsUndef();
2944  }
2945 
2946  return true;
2947  }
2948 
2949  if (Offset == 0) {
2950  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2951  .add(*Idx);
2952  } else {
2953  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2954  .add(*Idx)
2955  .addImm(Offset);
2956  }
2957 
2958  return true;
2959 }
2960 
2961 // Control flow needs to be inserted if indexing with a VGPR.
2963  MachineBasicBlock &MBB,
2964  const SISubtarget &ST) {
2965  const SIInstrInfo *TII = ST.getInstrInfo();
2966  const SIRegisterInfo &TRI = TII->getRegisterInfo();
2967  MachineFunction *MF = MBB.getParent();
2969 
2970  unsigned Dst = MI.getOperand(0).getReg();
2971  unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
2972  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2973 
2974  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
2975 
2976  unsigned SubReg;
2977  std::tie(SubReg, Offset)
2978  = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
2979 
2980  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2981 
2982  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
2984  const DebugLoc &DL = MI.getDebugLoc();
2985 
2986  if (UseGPRIdxMode) {
2987  // TODO: Look at the uses to avoid the copy. This may require rescheduling
2988  // to avoid interfering with other uses, so probably requires a new
2989  // optimization pass.
2990  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
2991  .addReg(SrcReg, RegState::Undef, SubReg)
2992  .addReg(SrcReg, RegState::Implicit)
2993  .addReg(AMDGPU::M0, RegState::Implicit);
2994  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2995  } else {
2996  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
2997  .addReg(SrcReg, RegState::Undef, SubReg)
2998  .addReg(SrcReg, RegState::Implicit);
2999  }
3000 
3001  MI.eraseFromParent();
3002 
3003  return &MBB;
3004  }
3005 
3006  const DebugLoc &DL = MI.getDebugLoc();
3008 
3009  unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3010  unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3011 
3012  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3013 
3014  auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3015  Offset, UseGPRIdxMode, true);
3016  MachineBasicBlock *LoopBB = InsPt->getParent();
3017 
3018  if (UseGPRIdxMode) {
3019  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3020  .addReg(SrcReg, RegState::Undef, SubReg)
3021  .addReg(SrcReg, RegState::Implicit)
3022  .addReg(AMDGPU::M0, RegState::Implicit);
3023  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3024  } else {
3025  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3026  .addReg(SrcReg, RegState::Undef, SubReg)
3027  .addReg(SrcReg, RegState::Implicit);
3028  }
3029 
3030  MI.eraseFromParent();
3031 
3032  return LoopBB;
3033 }
3034 
3035 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3036  const TargetRegisterClass *VecRC) {
3037  switch (TRI.getRegSizeInBits(*VecRC)) {
3038  case 32: // 4 bytes
3039  return AMDGPU::V_MOVRELD_B32_V1;
3040  case 64: // 8 bytes
3041  return AMDGPU::V_MOVRELD_B32_V2;
3042  case 128: // 16 bytes
3043  return AMDGPU::V_MOVRELD_B32_V4;
3044  case 256: // 32 bytes
3045  return AMDGPU::V_MOVRELD_B32_V8;
3046  case 512: // 64 bytes
3047  return AMDGPU::V_MOVRELD_B32_V16;
3048  default:
3049  llvm_unreachable("unsupported size for MOVRELD pseudos");
3050  }
3051 }
3052 
3054  MachineBasicBlock &MBB,
3055  const SISubtarget &ST) {
3056  const SIInstrInfo *TII = ST.getInstrInfo();
3057  const SIRegisterInfo &TRI = TII->getRegisterInfo();
3058  MachineFunction *MF = MBB.getParent();
3060 
3061  unsigned Dst = MI.getOperand(0).getReg();
3062  const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3063  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3064  const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3065  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3066  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3067 
3068  // This can be an immediate, but will be folded later.
3069  assert(Val->getReg());
3070 
3071  unsigned SubReg;
3072  std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3073  SrcVec->getReg(),
3074  Offset);
3075  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3076 
3077  if (Idx->getReg() == AMDGPU::NoRegister) {
3079  const DebugLoc &DL = MI.getDebugLoc();
3080 
3081  assert(Offset == 0);
3082 
3083  BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3084  .add(*SrcVec)
3085  .add(*Val)
3086  .addImm(SubReg);
3087 
3088  MI.eraseFromParent();
3089  return &MBB;
3090  }
3091 
3092  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3094  const DebugLoc &DL = MI.getDebugLoc();
3095 
3096  if (UseGPRIdxMode) {
3097  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3098  .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3099  .add(*Val)
3100  .addReg(Dst, RegState::ImplicitDefine)
3101  .addReg(SrcVec->getReg(), RegState::Implicit)
3102  .addReg(AMDGPU::M0, RegState::Implicit);
3103 
3104  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3105  } else {
3106  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3107 
3108  BuildMI(MBB, I, DL, MovRelDesc)
3109  .addReg(Dst, RegState::Define)
3110  .addReg(SrcVec->getReg())
3111  .add(*Val)
3112  .addImm(SubReg - AMDGPU::sub0);
3113  }
3114 
3115  MI.eraseFromParent();
3116  return &MBB;
3117  }
3118 
3119  if (Val->isReg())
3120  MRI.clearKillFlags(Val->getReg());
3121 
3122  const DebugLoc &DL = MI.getDebugLoc();
3123 
3124  unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3125 
3126  auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3127  Offset, UseGPRIdxMode, false);
3128  MachineBasicBlock *LoopBB = InsPt->getParent();
3129 
3130  if (UseGPRIdxMode) {
3131  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3132  .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3133  .add(*Val) // src0
3135  .addReg(PhiReg, RegState::Implicit)
3136  .addReg(AMDGPU::M0, RegState::Implicit);
3137  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3138  } else {
3139  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3140 
3141  BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3142  .addReg(Dst, RegState::Define)
3143  .addReg(PhiReg)
3144  .add(*Val)
3145  .addImm(SubReg - AMDGPU::sub0);
3146  }
3147 
3148  MI.eraseFromParent();
3149 
3150  return LoopBB;
3151 }
3152 
3154  MachineInstr &MI, MachineBasicBlock *BB) const {
3155 
3156  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3157  MachineFunction *MF = BB->getParent();
3159 
3160  if (TII->isMIMG(MI)) {
3161  if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3162  report_fatal_error("missing mem operand from MIMG instruction");
3163  }
3164  // Add a memoperand for mimg instructions so that they aren't assumed to
3165  // be ordered memory instuctions.
3166 
3167  return BB;
3168  }
3169 
3170  switch (MI.getOpcode()) {
3171  case AMDGPU::S_ADD_U64_PSEUDO:
3172  case AMDGPU::S_SUB_U64_PSEUDO: {
3174  const DebugLoc &DL = MI.getDebugLoc();
3175 
3176  MachineOperand &Dest = MI.getOperand(0);
3177  MachineOperand &Src0 = MI.getOperand(1);
3178  MachineOperand &Src1 = MI.getOperand(2);
3179 
3180  unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3181  unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3182 
3183  MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3184  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3185  &AMDGPU::SReg_32_XM0RegClass);
3186  MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3187  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3188  &AMDGPU::SReg_32_XM0RegClass);
3189 
3190  MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3191  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3192  &AMDGPU::SReg_32_XM0RegClass);
3193  MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3194  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3195  &AMDGPU::SReg_32_XM0RegClass);
3196 
3197  bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3198 
3199  unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3200  unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3201  BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3202  .add(Src0Sub0)
3203  .add(Src1Sub0);
3204  BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3205  .add(Src0Sub1)
3206  .add(Src1Sub1);
3207  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3208  .addReg(DestSub0)
3209  .addImm(AMDGPU::sub0)
3210  .addReg(DestSub1)
3211  .addImm(AMDGPU::sub1);
3212  MI.eraseFromParent();
3213  return BB;
3214  }
3215  case AMDGPU::SI_INIT_M0: {
3216  BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3217  TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3218  .add(MI.getOperand(0));
3219  MI.eraseFromParent();
3220  return BB;
3221  }
3222  case AMDGPU::SI_INIT_EXEC:
3223  // This should be before all vector instructions.
3224  BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3225  AMDGPU::EXEC)
3226  .addImm(MI.getOperand(0).getImm());
3227  MI.eraseFromParent();
3228  return BB;
3229 
3230  case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3231  // Extract the thread count from an SGPR input and set EXEC accordingly.
3232  // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3233  //
3234  // S_BFE_U32 count, input, {shift, 7}
3235  // S_BFM_B64 exec, count, 0
3236  // S_CMP_EQ_U32 count, 64
3237  // S_CMOV_B64 exec, -1
3238  MachineInstr *FirstMI = &*BB->begin();
3240  unsigned InputReg = MI.getOperand(0).getReg();
3241  unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3242  bool Found = false;
3243 
3244  // Move the COPY of the input reg to the beginning, so that we can use it.
3245  for (auto I = BB->begin(); I != &MI; I++) {
3246  if (I->getOpcode() != TargetOpcode::COPY ||
3247  I->getOperand(0).getReg() != InputReg)
3248  continue;
3249 
3250  if (I == FirstMI) {
3251  FirstMI = &*++BB->begin();
3252  } else {
3253  I->removeFromParent();
3254  BB->insert(FirstMI, &*I);
3255  }
3256  Found = true;
3257  break;
3258  }
3259  assert(Found);
3260  (void)Found;
3261 
3262  // This should be before all vector instructions.
3263  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3264  .addReg(InputReg)
3265  .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3266  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3267  AMDGPU::EXEC)
3268  .addReg(CountReg)
3269  .addImm(0);
3270  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3271  .addReg(CountReg, RegState::Kill)
3272  .addImm(64);
3273  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3274  AMDGPU::EXEC)
3275  .addImm(-1);
3276  MI.eraseFromParent();
3277  return BB;
3278  }
3279 
3280  case AMDGPU::GET_GROUPSTATICSIZE: {
3281  DebugLoc DL = MI.getDebugLoc();
3282  BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3283  .add(MI.getOperand(0))
3284  .addImm(MFI->getLDSSize());
3285  MI.eraseFromParent();
3286  return BB;
3287  }
3288  case AMDGPU::SI_INDIRECT_SRC_V1:
3289  case AMDGPU::SI_INDIRECT_SRC_V2:
3290  case AMDGPU::SI_INDIRECT_SRC_V4:
3291  case AMDGPU::SI_INDIRECT_SRC_V8:
3292  case AMDGPU::SI_INDIRECT_SRC_V16:
3293  return emitIndirectSrc(MI, *BB, *getSubtarget());
3294  case AMDGPU::SI_INDIRECT_DST_V1:
3295  case AMDGPU::SI_INDIRECT_DST_V2:
3296  case AMDGPU::SI_INDIRECT_DST_V4:
3297  case AMDGPU::SI_INDIRECT_DST_V8:
3298  case AMDGPU::SI_INDIRECT_DST_V16:
3299  return emitIndirectDst(MI, *BB, *getSubtarget());
3300  case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3301  case AMDGPU::SI_KILL_I1_PSEUDO:
3302  return splitKillBlock(MI, BB);
3303  case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3305 
3306  unsigned Dst = MI.getOperand(0).getReg();
3307  unsigned Src0 = MI.getOperand(1).getReg();
3308  unsigned Src1 = MI.getOperand(2).getReg();
3309  const DebugLoc &DL = MI.getDebugLoc();
3310  unsigned SrcCond = MI.getOperand(3).getReg();
3311 
3312  unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3313  unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3314  unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3315 
3316  BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3317  .addReg(SrcCond);
3318  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3319  .addReg(Src0, 0, AMDGPU::sub0)
3320  .addReg(Src1, 0, AMDGPU::sub0)
3321  .addReg(SrcCondCopy);
3322  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3323  .addReg(Src0, 0, AMDGPU::sub1)
3324  .addReg(Src1, 0, AMDGPU::sub1)
3325  .addReg(SrcCondCopy);
3326 
3327  BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3328  .addReg(DstLo)
3329  .addImm(AMDGPU::sub0)
3330  .addReg(DstHi)
3331  .addImm(AMDGPU::sub1);
3332  MI.eraseFromParent();
3333  return BB;
3334  }
3335  case AMDGPU::SI_BR_UNDEF: {
3336  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3337  const DebugLoc &DL = MI.getDebugLoc();
3338  MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3339  .add(MI.getOperand(0));
3340  Br->getOperand(1).setIsUndef(true); // read undef SCC
3341  MI.eraseFromParent();
3342  return BB;
3343  }
3344  case AMDGPU::ADJCALLSTACKUP:
3345  case AMDGPU::ADJCALLSTACKDOWN: {
3347  MachineInstrBuilder MIB(*MF, &MI);
3348  MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3349  .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit);
3350  return BB;
3351  }
3352  case AMDGPU::SI_CALL_ISEL:
3353  case AMDGPU::SI_TCRETURN_ISEL: {
3354  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3355  const DebugLoc &DL = MI.getDebugLoc();
3356  unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3357 
3359  unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3360  MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3361  assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3362 
3363  const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3364 
3365  MachineInstrBuilder MIB;
3366  if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3367  MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3368  .add(MI.getOperand(0))
3369  .addGlobalAddress(G);
3370  } else {
3371  MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3372  .add(MI.getOperand(0))
3373  .addGlobalAddress(G);
3374 
3375  // There is an additional imm operand for tcreturn, but it should be in the
3376  // right place already.
3377  }
3378 
3379  for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
3380  MIB.add(MI.getOperand(I));
3381 
3383  MI.eraseFromParent();
3384  return BB;
3385  }
3386  default:
3388  }
3389 }
3390 
3392  return isTypeLegal(VT.getScalarType());
3393 }
3394 
3396  // This currently forces unfolding various combinations of fsub into fma with
3397  // free fneg'd operands. As long as we have fast FMA (controlled by
3398  // isFMAFasterThanFMulAndFAdd), we should perform these.
3399 
3400  // When fma is quarter rate, for f64 where add / sub are at best half rate,
3401  // most of these combines appear to be cycle neutral but save on instruction
3402  // count / code size.
3403  return true;
3404 }
3405 
3407  EVT VT) const {
3408  if (!VT.isVector()) {
3409  return MVT::i1;
3410  }
3411  return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3412 }
3413 
3415  // TODO: Should i16 be used always if legal? For now it would force VALU
3416  // shifts.
3417  return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3418 }
3419 
3420 // Answering this is somewhat tricky and depends on the specific device which
3421 // have different rates for fma or all f64 operations.
3422 //
3423 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3424 // regardless of which device (although the number of cycles differs between
3425 // devices), so it is always profitable for f64.
3426 //
3427 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3428 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3429 // which we can always do even without fused FP ops since it returns the same
3430 // result as the separate operations and since it is always full
3431 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3432 // however does not support denormals, so we do report fma as faster if we have
3433 // a fast fma device and require denormals.
3434 //
3436  VT = VT.getScalarType();
3437 
3438  switch (VT.getSimpleVT().SimpleTy) {
3439  case MVT::f32:
3440  // This is as fast on some subtargets. However, we always have full rate f32
3441  // mad available which returns the same result as the separate operations
3442  // which we should prefer over fma. We can't use this if we want to support
3443  // denormals, so only report this in these cases.
3445  case MVT::f64:
3446  return true;
3447  case MVT::f16:
3449  default:
3450  break;
3451  }
3452 
3453  return false;
3454 }
3455 
3456 //===----------------------------------------------------------------------===//
3457 // Custom DAG Lowering Operations
3458 //===----------------------------------------------------------------------===//
3459 
3461  switch (Op.getOpcode()) {
3462  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3463  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3464  case ISD::LOAD: {
3465  SDValue Result = LowerLOAD(Op, DAG);
3466  assert((!Result.getNode() ||
3467  Result.getNode()->getNumValues() == 2) &&
3468  "Load should return a value and a chain");
3469  return Result;
3470  }
3471 
3472  case ISD::FSIN:
3473  case ISD::FCOS:
3474  return LowerTrig(Op, DAG);
3475  case ISD::SELECT: return LowerSELECT(Op, DAG);
3476  case ISD::FDIV: return LowerFDIV(Op, DAG);
3477  case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3478  case ISD::STORE: return LowerSTORE(Op, DAG);
3479  case ISD::GlobalAddress: {
3480  MachineFunction &MF = DAG.getMachineFunction();
3482  return LowerGlobalAddress(MFI, Op, DAG);
3483  }
3484  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3485  case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3486  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3487  case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3489  return lowerINSERT_VECTOR_ELT(Op, DAG);
3491  return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3492  case ISD::FP_ROUND:
3493  return lowerFP_ROUND(Op, DAG);
3494  case ISD::TRAP:
3495  case ISD::DEBUGTRAP:
3496  return lowerTRAP(Op, DAG);
3497  }
3498  return SDValue();
3499 }
3500 
3501 static unsigned getImageOpcode(unsigned IID) {
3502  switch (IID) {
3503  case Intrinsic::amdgcn_image_load:
3504  return AMDGPUISD::IMAGE_LOAD;
3505  case Intrinsic::amdgcn_image_load_mip:
3507 
3508  // Basic sample.
3509  case Intrinsic::amdgcn_image_sample:
3510  return AMDGPUISD::IMAGE_SAMPLE;
3511  case Intrinsic::amdgcn_image_sample_cl:
3513  case Intrinsic::amdgcn_image_sample_d:
3515  case Intrinsic::amdgcn_image_sample_d_cl:
3517  case Intrinsic::amdgcn_image_sample_l:
3519  case Intrinsic::amdgcn_image_sample_b:
3521  case Intrinsic::amdgcn_image_sample_b_cl:
3523  case Intrinsic::amdgcn_image_sample_lz:
3525  case Intrinsic::amdgcn_image_sample_cd:
3527  case Intrinsic::amdgcn_image_sample_cd_cl:
3529 
3530  // Sample with comparison.
3531  case Intrinsic::amdgcn_image_sample_c:
3533  case Intrinsic::amdgcn_image_sample_c_cl:
3535  case Intrinsic::amdgcn_image_sample_c_d:
3537  case Intrinsic::amdgcn_image_sample_c_d_cl:
3539  case Intrinsic::amdgcn_image_sample_c_l:
3541  case Intrinsic::amdgcn_image_sample_c_b:
3543  case Intrinsic::amdgcn_image_sample_c_b_cl:
3545  case Intrinsic::amdgcn_image_sample_c_lz:
3547  case Intrinsic::amdgcn_image_sample_c_cd:
3549  case Intrinsic::amdgcn_image_sample_c_cd_cl:
3551 
3552  // Sample with offsets.
3553  case Intrinsic::amdgcn_image_sample_o:
3555  case Intrinsic::amdgcn_image_sample_cl_o:
3557  case Intrinsic::amdgcn_image_sample_d_o:
3559  case Intrinsic::amdgcn_image_sample_d_cl_o:
3561  case Intrinsic::amdgcn_image_sample_l_o:
3563  case Intrinsic::amdgcn_image_sample_b_o:
3565  case Intrinsic::amdgcn_image_sample_b_cl_o:
3567  case Intrinsic::amdgcn_image_sample_lz_o:
3569  case Intrinsic::amdgcn_image_sample_cd_o:
3571  case Intrinsic::amdgcn_image_sample_cd_cl_o:
3573 
3574  // Sample with comparison and offsets.
3575  case Intrinsic::amdgcn_image_sample_c_o:
3577  case Intrinsic::amdgcn_image_sample_c_cl_o:
3579  case Intrinsic::amdgcn_image_sample_c_d_o:
3581  case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3583  case Intrinsic::amdgcn_image_sample_c_l_o:
3585  case Intrinsic::amdgcn_image_sample_c_b_o:
3587  case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3589  case Intrinsic::amdgcn_image_sample_c_lz_o:
3591  case Intrinsic::amdgcn_image_sample_c_cd_o:
3593  case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3595 
3596  // Basic gather4.
3597  case Intrinsic::amdgcn_image_gather4:
3598  return AMDGPUISD::IMAGE_GATHER4;
3599  case Intrinsic::amdgcn_image_gather4_cl:
3601  case Intrinsic::amdgcn_image_gather4_l:
3603  case Intrinsic::amdgcn_image_gather4_b:
3605  case Intrinsic::amdgcn_image_gather4_b_cl:
3607  case Intrinsic::amdgcn_image_gather4_lz:
3609 
3610  // Gather4 with comparison.
3611  case Intrinsic::amdgcn_image_gather4_c:
3613  case Intrinsic::amdgcn_image_gather4_c_cl:
3615  case Intrinsic::amdgcn_image_gather4_c_l:
3617  case Intrinsic::amdgcn_image_gather4_c_b:
3619  case Intrinsic::amdgcn_image_gather4_c_b_cl:
3621  case Intrinsic::amdgcn_image_gather4_c_lz:
3623 
3624  // Gather4 with offsets.
3625  case Intrinsic::amdgcn_image_gather4_o:
3627  case Intrinsic::amdgcn_image_gather4_cl_o:
3629  case Intrinsic::amdgcn_image_gather4_l_o:
3631  case Intrinsic::amdgcn_image_gather4_b_o:
3633  case Intrinsic::amdgcn_image_gather4_b_cl_o:
3635  case Intrinsic::amdgcn_image_gather4_lz_o:
3637 
3638  // Gather4 with comparison and offsets.
3639  case Intrinsic::amdgcn_image_gather4_c_o:
3641  case Intrinsic::amdgcn_image_gather4_c_cl_o:
3643  case Intrinsic::amdgcn_image_gather4_c_l_o:
3645  case Intrinsic::amdgcn_image_gather4_c_b_o:
3647  case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
3649  case Intrinsic::amdgcn_image_gather4_c_lz_o:
3651 
3652  default:
3653  break;
3654  }
3655  return 0;
3656 }
3657 
3658 static SDValue adjustLoadValueType(SDValue Result, EVT LoadVT, SDLoc DL,
3659  SelectionDAG &DAG, bool Unpacked) {
3660  if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3661  // Truncate to v2i16/v4i16.
3662  EVT IntLoadVT = LoadVT.changeTypeToInteger();
3663  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, IntLoadVT, Result);
3664  // Bitcast to original type (v2f16/v4f16).
3665  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Trunc);
3666  }
3667  // Cast back to the original packed type.
3668  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3669 }
3670 
3671 // This is to lower INTRINSIC_W_CHAIN with illegal result types.
3672 SDValue SITargetLowering::lowerIntrinsicWChain_IllegalReturnType(SDValue Op,
3673  SDValue &Chain, SelectionDAG &DAG) const {
3674  EVT LoadVT = Op.getValueType();
3675  // TODO: handle v3f16.
3676  if (LoadVT != MVT::v2f16 && LoadVT != MVT::v4f16)
3677  return SDValue();
3678 
3679  bool Unpacked = Subtarget->hasUnpackedD16VMem();
3680  EVT UnpackedLoadVT = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
3681  EVT EquivLoadVT = Unpacked ? UnpackedLoadVT :
3682  getEquivalentMemType(*DAG.getContext(), LoadVT);
3683  // Change from v4f16/v2f16 to EquivLoadVT.
3684  SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3685 
3686  SDValue Res;
3687  SDLoc DL(Op);
3688  MemSDNode *M = cast<MemSDNode>(Op);
3689  unsigned IID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3690  switch (IID) {
3691  case Intrinsic::amdgcn_tbuffer_load: {
3692  SDValue Ops[] = {
3693  Op.getOperand(0), // Chain
3694  Op.getOperand(2), // rsrc
3695  Op.getOperand(3), // vindex
3696  Op.getOperand(4), // voffset
3697  Op.getOperand(5), // soffset
3698  Op.getOperand(6), // offset
3699  Op.getOperand(7), // dfmt
3700  Op.getOperand(8), // nfmt
3701  Op.getOperand(9), // glc
3702  Op.getOperand(10) // slc
3703  };
3705  VTList, Ops, M->getMemoryVT(),
3706  M->getMemOperand());
3707  Chain = Res.getValue(1);
3708  return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3709  }
3710  case Intrinsic::amdgcn_buffer_load_format: {
3711  SDValue Ops[] = {
3712  Op.getOperand(0), // Chain
3713  Op.getOperand(2), // rsrc
3714  Op.getOperand(3), // vindex
3715  Op.getOperand(4), // offset
3716  Op.getOperand(5), // glc
3717  Op.getOperand(6) // slc
3718  };
3720  DL, VTList, Ops, M->getMemoryVT(),
3721  M->getMemOperand());
3722  Chain = Res.getValue(1);
3723  return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3724  }
3725  case Intrinsic::amdgcn_image_load:
3726  case Intrinsic::amdgcn_image_load_mip: {
3727  SDValue Ops[] = {
3728  Op.getOperand(0), // Chain
3729  Op.getOperand(2), // vaddr
3730  Op.getOperand(3), // rsrc
3731  Op.getOperand(4), // dmask
3732  Op.getOperand(5), // glc
3733  Op.getOperand(6), // slc
3734  Op.getOperand(7), // lwe
3735  Op.getOperand(8) // da
3736  };
3737  unsigned Opc = getImageOpcode(IID);
3738  Res = DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, M->getMemoryVT(),
3739  M->getMemOperand());
3740  Chain = Res.getValue(1);
3741  return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3742  }
3743  // Basic sample.
3744  case Intrinsic::amdgcn_image_sample:
3745  case Intrinsic::amdgcn_image_sample_cl:
3746  case Intrinsic::amdgcn_image_sample_d:
3747  case Intrinsic::amdgcn_image_sample_d_cl:
3748  case Intrinsic::amdgcn_image_sample_l:
3749  case Intrinsic::amdgcn_image_sample_b:
3750  case Intrinsic::amdgcn_image_sample_b_cl:
3751  case Intrinsic::amdgcn_image_sample_lz:
3752  case Intrinsic::amdgcn_image_sample_cd:
3753  case Intrinsic::amdgcn_image_sample_cd_cl:
3754 
3755  // Sample with comparison.
3756  case Intrinsic::amdgcn_image_sample_c:
3757  case Intrinsic::amdgcn_image_sample_c_cl:
3758  case Intrinsic::amdgcn_image_sample_c_d:
3759  case Intrinsic::amdgcn_image_sample_c_d_cl:
3760  case Intrinsic::amdgcn_image_sample_c_l:
3761  case Intrinsic::amdgcn_image_sample_c_b:
3762  case Intrinsic::amdgcn_image_sample_c_b_cl:
3763  case Intrinsic::amdgcn_image_sample_c_lz:
3764  case Intrinsic::amdgcn_image_sample_c_cd:
3765  case Intrinsic::amdgcn_image_sample_c_cd_cl:
3766 
3767  // Sample with offsets.
3768  case Intrinsic::amdgcn_image_sample_o:
3769  case Intrinsic::amdgcn_image_sample_cl_o:
3770  case Intrinsic::amdgcn_image_sample_d_o:
3771  case Intrinsic::amdgcn_image_sample_d_cl_o:
3772  case Intrinsic::amdgcn_image_sample_l_o:
3773  case Intrinsic::amdgcn_image_sample_b_o:
3774  case Intrinsic::amdgcn_image_sample_b_cl_o:
3775  case Intrinsic::amdgcn_image_sample_lz_o:
3776  case Intrinsic::amdgcn_image_sample_cd_o:
3777  case Intrinsic::amdgcn_image_sample_cd_cl_o:
3778 
3779  // Sample with comparison and offsets.
3780  case Intrinsic::amdgcn_image_sample_c_o:
3781  case Intrinsic::amdgcn_image_sample_c_cl_o:
3782  case Intrinsic::amdgcn_image_sample_c_d_o:
3783  case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3784  case Intrinsic::amdgcn_image_sample_c_l_o:
3785  case Intrinsic::amdgcn_image_sample_c_b_o:
3786  case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3787  case Intrinsic::amdgcn_image_sample_c_lz_o:
3788  case Intrinsic::amdgcn_image_sample_c_cd_o:
3789  case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3790 
3791  // Basic gather4
3792  case Intrinsic::amdgcn_image_gather4:
3793  case Intrinsic::amdgcn_image_gather4_cl:
3794  case Intrinsic::amdgcn_image_gather4_l:
3795  case Intrinsic::amdgcn_image_gather4_b:
3796  case Intrinsic::amdgcn_image_gather4_b_cl:
3797  case Intrinsic::amdgcn_image_gather4_lz:
3798 
3799  // Gather4 with comparison
3800  case Intrinsic::amdgcn_image_gather4_c:
3801  case Intrinsic::amdgcn_image_gather4_c_cl:
3802  case Intrinsic::amdgcn_image_gather4_c_l:
3803  case Intrinsic::amdgcn_image_gather4_c_b:
3804  case Intrinsic::amdgcn_image_gather4_c_b_cl:
3805  case Intrinsic::amdgcn_image_gather4_c_lz:
3806 
3807  // Gather4 with offsets
3808  case Intrinsic::amdgcn_image_gather4_o:
3809  case Intrinsic::amdgcn_image_gather4_cl_o:
3810  case Intrinsic::amdgcn_image_gather4_l_o:
3811  case Intrinsic::amdgcn_image_gather4_b_o:
3812  case Intrinsic::amdgcn_image_gather4_b_cl_o:
3813  case Intrinsic::amdgcn_image_gather4_lz_o:
3814 
3815  // Gather4 with comparison and offsets
3816  case Intrinsic::amdgcn_image_gather4_c_o:
3817  case Intrinsic::amdgcn_image_gather4_c_cl_o:
3818  case Intrinsic::amdgcn_image_gather4_c_l_o:
3819  case Intrinsic::amdgcn_image_gather4_c_b_o:
3820  case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
3821  case Intrinsic::amdgcn_image_gather4_c_lz_o: {
3822  SDValue Ops[] = {
3823  Op.getOperand(0), // Chain
3824  Op.getOperand(2), // vaddr
3825  Op.getOperand(3), // rsrc
3826  Op.getOperand(4), // sampler
3827  Op.getOperand(5), // dmask
3828  Op.getOperand(6), // unorm
3829  Op.getOperand(7), // glc
3830  Op.getOperand(8), // slc
3831  Op.getOperand(9), // lwe
3832  Op.getOperand(10) // da
3833  };
3834  unsigned Opc = getImageOpcode(IID);
3835  Res = DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, M->getMemoryVT(),
3836  M->getMemOperand());
3837  Chain = Res.getValue(1);
3838  return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3839  }
3840  default:
3841  return SDValue();
3842  }
3843 }
3844 
3847  SelectionDAG &DAG) const {
3848  switch (N->getOpcode()) {
3849  case ISD::INSERT_VECTOR_ELT: {
3850  if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3851  Results.push_back(Res);
3852  return;
3853  }
3854  case ISD::EXTRACT_VECTOR_ELT: {
3855  if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3856  Results.push_back(Res);
3857  return;
3858  }
3859  case ISD::INTRINSIC_WO_CHAIN: {
3860  unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3861  switch (IID) {
3862  case Intrinsic::amdgcn_cvt_pkrtz: {
3863  SDValue Src0 = N->getOperand(1);
3864  SDValue Src1 = N->getOperand(2);
3865  SDLoc SL(N);
3867  Src0, Src1);
3868  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3869  return;
3870  }
3871  case Intrinsic::amdgcn_cvt_pknorm_i16:
3872  case Intrinsic::amdgcn_cvt_pknorm_u16:
3873  case Intrinsic::amdgcn_cvt_pk_i16:
3874  case Intrinsic::amdgcn_cvt_pk_u16: {
3875  SDValue Src0 = N->getOperand(1);
3876  SDValue Src1 = N->getOperand(2);
3877  SDLoc SL(N);
3878  unsigned Opcode;
3879 
3880  if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3882  else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3884  else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3885  Opcode = AMDGPUISD::CVT_PK_I16_I32;
3886  else
3887  Opcode = AMDGPUISD::CVT_PK_U16_U32;
3888 
3889  SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3890  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3891  return;
3892  }
3893  }
3894  break;
3895  }
3896  case ISD::INTRINSIC_W_CHAIN: {
3897  SDValue Chain;
3898  if (SDValue Res = lowerIntrinsicWChain_IllegalReturnType(SDValue(N, 0),
3899  Chain, DAG)) {
3900  Results.push_back(Res);
3901  Results.push_back(Chain);
3902  return;
3903  }
3904  break;
3905  }
3906  case ISD::SELECT: {
3907  SDLoc SL(N);
3908  EVT VT = N->getValueType(0);
3909  EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3910  SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3911  SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3912 
3913  EVT SelectVT = NewVT;
3914  if (NewVT.bitsLT(MVT::i32)) {
3915  LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3916  RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3917  SelectVT = MVT::i32;
3918  }
3919 
3920  SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3921  N->getOperand(0), LHS, RHS);
3922 
3923  if (NewVT != SelectVT)
3924  NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3925  Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3926  return;
3927  }
3928  default:
3929  break;
3930  }
3931 }
3932 
3933 /// \brief Helper function for LowerBRCOND
3934 static SDNode *findUser(SDValue Value, unsigned Opcode) {
3935 
3936  SDNode *Parent = Value.getNode();
3937  for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3938  I != E; ++I) {
3939 
3940  if (I.getUse().get() != Value)
3941  continue;
3942 
3943  if (I->getOpcode() == Opcode)
3944  return *I;
3945  }
3946  return nullptr;
3947 }
3948 
3949 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
3950  if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3951  switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
3952  case Intrinsic::amdgcn_if:
3953  return AMDGPUISD::IF;
3954  case Intrinsic::amdgcn_else:
3955  return AMDGPUISD::ELSE;
3956  case Intrinsic::amdgcn_loop:
3957  return AMDGPUISD::LOOP;
3958  case Intrinsic::amdgcn_end_cf:
3959  llvm_unreachable("should not occur");
3960  default:
3961  return 0;
3962  }
3963  }
3964 
3965  // break, if_break, else_break are all only used as inputs to loop, not
3966  // directly as branch conditions.
3967  return 0;
3968 }
3969 
3970 void SITargetLowering::createDebuggerPrologueStackObjects(
3971  MachineFunction &MF) const {
3972  // Create stack objects that are used for emitting debugger prologue.
3973  //
3974  // Debugger prologue writes work group IDs and work item IDs to scratch memory
3975  // at fixed location in the following format:
3976  // offset 0: work group ID x
3977  // offset 4: work group ID y
3978  // offset 8: work group ID z
3979  // offset 16: work item ID x
3980  // offset 20: work item ID y
3981  // offset 24: work item ID z
3983  int ObjectIdx = 0;
3984 
3985  // For each dimension:
3986  for (unsigned i = 0; i < 3; ++i) {
3987  // Create fixed stack object for work group ID.
3988  ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
3989  Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3990  // Create fixed stack object for work item ID.
3991  ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
3992  Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3993  }
3994 }
3995 
3996 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3997  const Triple &TT = getTargetMachine().getTargetTriple();
3998  return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
4001 }
4002 
4003 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4004  return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
4007  !shouldEmitFixup(GV) &&
4009 }
4010 
4011 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4012  return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4013 }
4014 
4015 /// This transforms the control flow intrinsics to get the branch destination as
4016 /// last parameter, also switches branch target with BR if the need arise
4017 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4018  SelectionDAG &DAG) const {
4019  SDLoc DL(BRCOND);
4020 
4021  SDNode *Intr = BRCOND.getOperand(1).getNode();
4022  SDValue Target = BRCOND.getOperand(2);
4023  SDNode *BR = nullptr;
4024  SDNode *SetCC = nullptr;
4025 
4026  if (Intr->getOpcode() == ISD::SETCC) {
4027  // As long as we negate the condition everything is fine
4028  SetCC = Intr;
4029  Intr = SetCC->getOperand(0).getNode();
4030 
4031  } else {
4032  // Get the target from BR if we don't negate the condition
4033  BR = findUser(BRCOND, ISD::BR);
4034  Target = BR->getOperand(1);
4035  }
4036 
4037  // FIXME: This changes the types of the intrinsics instead of introducing new
4038  // nodes with the correct types.
4039  // e.g. llvm.amdgcn.loop
4040 
4041  // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4042  // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4043 
4044  unsigned CFNode = isCFIntrinsic(Intr);
4045  if (CFNode == 0) {
4046  // This is a uniform branch so we don't need to legalize.
4047  return BRCOND;
4048  }
4049 
4050  bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4051  Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4052 
4053  assert(!SetCC ||
4054  (SetCC->getConstantOperandVal(1) == 1 &&
4055  cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4056  ISD::SETNE));
4057 
4058  // operands of the new intrinsic call
4060  if (HaveChain)
4061  Ops.push_back(BRCOND.getOperand(0));
4062 
4063  Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
4064  Ops.push_back(Target);
4065 
4066  ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4067 
4068  // build the new intrinsic call
4069  SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4070 
4071  if (!HaveChain) {
4072  SDValue Ops[] = {
4073  SDValue(Result, 0),
4074  BRCOND.getOperand(0)
4075  };
4076 
4077  Result = DAG.getMergeValues(Ops, DL).getNode();
4078  }
4079 
4080  if (BR) {
4081  // Give the branch instruction our target
4082  SDValue Ops[] = {
4083  BR->getOperand(0),
4084  BRCOND.getOperand(2)
4085  };
4086  SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4087  DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4088  BR = NewBR.getNode();
4089  }
4090 
4091  SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4092 
4093  // Copy the intrinsic results to registers
4094  for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4096  if (!CopyToReg)
4097  continue;
4098 
4099  Chain = DAG.getCopyToReg(
4100  Chain, DL,
4101  CopyToReg->getOperand(1),
4102  SDValue(Result, i - 1),
4103  SDValue());
4104 
4105  DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4106  }
4107 
4108  // Remove the old intrinsic from the chain
4110  SDValue(Intr, Intr->getNumValues() - 1),
4111  Intr->getOperand(0));
4112 
4113  return Chain;
4114 }
4115 
4116 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4117  SDValue Op,
4118  const SDLoc &DL,
4119  EVT VT) const {
4120  return Op.getValueType().bitsLE(VT) ?
4121  DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4122  DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4123 }
4124 
4125 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4126  assert(Op.getValueType() == MVT::f16 &&
4127  "Do not know how to custom lower FP_ROUND for non-f16 type");
4128 
4129  SDValue Src = Op.getOperand(0);
4130  EVT SrcVT = Src.getValueType();
4131  if (SrcVT != MVT::f64)
4132  return Op;
4133 
4134  SDLoc DL(Op);
4135 
4136  SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4137  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4138  return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4139 }
4140 
4141 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4142  SDLoc SL(Op);
4143  MachineFunction &MF = DAG.getMachineFunction();
4144  SDValue Chain = Op.getOperand(0);
4145 
4146  unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
4148 
4152  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4153  assert(UserSGPR != AMDGPU::NoRegister);
4154 
4155  SDValue QueuePtr = CreateLiveInRegister(
4156  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4157 
4158  SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4159 
4160  SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4161  QueuePtr, SDValue());
4162 
4163  SDValue Ops[] = {
4164  ToReg,
4165  DAG.getTargetConstant(TrapID, SL, MVT::i16),
4166  SGPR01,
4167  ToReg.getValue(1)
4168  };
4169 
4170  return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4171  }
4172 
4173  switch (TrapID) {
4175  return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4178  "debugtrap handler not supported",
4179  Op.getDebugLoc(),
4180  DS_Warning);
4181  LLVMContext &Ctx = MF.getFunction().getContext();
4182  Ctx.diagnose(NoTrap);
4183  return Chain;
4184  }
4185  default:
4186  llvm_unreachable("unsupported trap handler type!");
4187  }
4188 
4189  return Chain;
4190 }
4191 
4192 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4193  SelectionDAG &DAG) const {
4194  // FIXME: Use inline constants (src_{shared, private}_base) instead.
4195  if (Subtarget->hasApertureRegs()) {
4196  unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
4199  unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
4202  unsigned Encoding =
4204  Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4205  WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4206 
4207  SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4208  SDValue ApertureReg = SDValue(
4209  DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4210  SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4211  return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4212  }
4213 
4214  MachineFunction &MF = DAG.getMachineFunction();
4216  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4217  assert(UserSGPR != AMDGPU::NoRegister);
4218 
4219  SDValue QueuePtr = CreateLiveInRegister(
4220  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4221 
4222  // Offset into amd_queue_t for group_segment_aperture_base_hi /
4223  // private_segment_aperture_base_hi.
4224  uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
4225 
4226  SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4227 
4228  // TODO: Use custom target PseudoSourceValue.
4229  // TODO: We should use the value from the IR intrinsic call, but it might not
4230  // be available and how do we get it?
4233 
4234  MachinePointerInfo PtrInfo(V, StructOffset);
4235  return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4236  MinAlign(64, StructOffset),
4239 }
4240 
4241 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4242  SelectionDAG &DAG) const {
4243  SDLoc SL(Op);
4244  const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4245 
4246  SDValue Src = ASC->getOperand(0);
4247  SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4248 
4249  const AMDGPUTargetMachine &TM =
4250  static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4251 
4252  // flat -> local/private
4253  if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
4254  unsigned DestAS = ASC->getDestAddressSpace();
4255 
4256  if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
4257  DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
4258  unsigned NullVal = TM.getNullPointerValue(DestAS);
4259  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4260  SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4261  SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4262 
4263  return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4264  NonNull, Ptr, SegmentNullPtr);
4265  }
4266  }
4267 
4268  // local/private -> flat
4269  if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
4270  unsigned SrcAS = ASC->getSrcAddressSpace();
4271 
4272  if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
4273  SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
4274  unsigned NullVal = TM.getNullPointerValue(SrcAS);
4275  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4276 
4277  SDValue NonNull
4278  = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4279 
4280  SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4281  SDValue CvtPtr
4282  = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4283 
4284  return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4285  DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4286  FlatNullPtr);
4287  }
4288  }
4289 
4290  // global <-> flat are no-ops and never emitted.
4291 
4292  const MachineFunction &MF = DAG.getMachineFunction();
4293  DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4294  MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4295  DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4296 
4297  return DAG.getUNDEF(ASC->getValueType(0));
4298 }
4299 
4300 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4301  SelectionDAG &DAG) const {
4302  SDValue Idx = Op.getOperand(2);
4303  if (isa<ConstantSDNode>(Idx))
4304  return SDValue();
4305 
4306  // Avoid stack access for dynamic indexing.
4307  SDLoc SL(Op);
4308  SDValue Vec = Op.getOperand(0);
4309  SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
4310 
4311  // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4312  SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
4313 
4314  // Convert vector index to bit-index.
4315  SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
4316  DAG.getConstant(16, SL, MVT::i32));
4317 
4318  SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4319 
4320  SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
4321  DAG.getConstant(0xffff, SL, MVT::i32),
4322  ScaledIdx);
4323 
4324  SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
4325  SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
4326  DAG.getNOT(SL, BFM, MVT::i32), BCVec);
4327 
4328  SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
4329  return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
4330 }
4331 
4332 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4333  SelectionDAG &DAG) const {
4334  SDLoc SL(Op);
4335 
4336  EVT ResultVT = Op.getValueType();
4337  SDValue Vec = Op.getOperand(0);
4338  SDValue Idx = Op.getOperand(1);
4339 
4340  DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4341 
4342  // Make sure we we do any optimizations that will make it easier to fold
4343  // source modifiers before obscuring it with bit operations.
4344 
4345  // XXX - Why doesn't this get called when vector_shuffle is expanded?
4346  if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4347  return Combined;
4348 
4349  if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
4350  SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4351 
4352  if (CIdx->getZExtValue() == 1) {
4353  Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
4354  DAG.getConstant(16, SL, MVT::i32));
4355  } else {
4356  assert(CIdx->getZExtValue() == 0);
4357  }
4358 
4359  if (ResultVT.bitsLT(MVT::i32))
4360  Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4361  return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4362  }
4363 
4364  SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
4365 
4366  // Convert vector index to bit-index.
4367  SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
4368 
4369  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4370  SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
4371 
4372  SDValue Result = Elt;
4373  if (ResultVT.bitsLT(MVT::i32))
4374  Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4375 
4376  return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4377 }
4378 
4379 bool
4381  // We can fold offsets for anything that doesn't require a GOT relocation.
4382  return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
4385  !shouldEmitGOTReloc(GA->getGlobal());
4386 }
4387 
4388 static SDValue
4390  const SDLoc &DL, unsigned Offset, EVT PtrVT,
4391  unsigned GAFlags = SIInstrInfo::MO_NONE) {
4392  // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4393  // lowered to the following code sequence:
4394  //
4395  // For constant address space:
4396  // s_getpc_b64 s[0:1]
4397  // s_add_u32 s0, s0, $symbol
4398  // s_addc_u32 s1, s1, 0
4399  //
4400  // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4401  // a fixup or relocation is emitted to replace $symbol with a literal
4402  // constant, which is a pc-relative offset from the encoding of the $symbol
4403  // operand to the global variable.
4404  //
4405  // For global address space:
4406  // s_getpc_b64 s[0:1]
4407  // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4408  // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4409  //
4410  // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4411  // fixups or relocations are emitted to replace $symbol@*@lo and
4412  // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4413  // which is a 64-bit pc-relative offset from the encoding of the $symbol
4414  // operand to the global variable.
4415  //
4416  // What we want here is an offset from the value returned by s_getpc
4417  // (which is the address of the s_add_u32 instruction) to the global
4418  // variable, but since the encoding of $symbol starts 4 bytes after the start
4419  // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4420  // small. This requires us to add 4 to the global variable offset in order to
4421  // compute the correct address.
4422  SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4423  GAFlags);
4424  SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4425  GAFlags == SIInstrInfo::MO_NONE ?
4426  GAFlags : GAFlags + 1);
4427  return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
4428 }
4429 
4430 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4431  SDValue Op,
4432  SelectionDAG &DAG) const {
4433  GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
4434  const GlobalValue *GV = GSD->getGlobal();
4435 
4436  if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
4439  // FIXME: It isn't correct to rely on the type of the pointer. This should
4440  // be removed when address space 0 is 64-bit.
4441  !GV->getType()->getElementType()->isFunctionTy())
4442  return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4443 
4444  SDLoc DL(GSD);
4445  EVT PtrVT = Op.getValueType();
4446 
4447  if (shouldEmitFixup(GV))
4448  return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
4449  else if (shouldEmitPCReloc(GV))
4450  return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4452 
4453  SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
4455 
4456  Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
4458  const DataLayout &DataLayout = DAG.getDataLayout();
4459  unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4460  // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4461  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4462 
4463  return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
4466 }
4467 
4469  const SDLoc &DL, SDValue V) const {
4470  // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4471  // the destination register.
4472  //
4473  // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4474  // so we will end up with redundant moves to m0.
4475  //
4476  // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4477 
4478  // A Null SDValue creates a glue result.
4479  SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4480  V, Chain);
4481  return SDValue(M0, 0);
4482 }
4483 
4484 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4485  SDValue Op,
4486  MVT VT,
4487  unsigned Offset) const {
4488  SDLoc SL(Op);
4489  SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
4490  DAG.getEntryNode(), Offset, false);
4491  // The local size values will have the hi 16-bits as zero.
4492  return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4493  DAG.getValueType(VT));
4494 }
4495 
4497  EVT VT) {
4499  "non-hsa intrinsic with hsa target",
4500  DL.getDebugLoc());
4501  DAG.getContext()->diagnose(BadIntrin);
4502  return DAG.getUNDEF(VT);
4503 }
4504 
4506  EVT VT) {
4508  "intrinsic not supported on subtarget",
4509  DL.getDebugLoc());
4510  DAG.getContext()->diagnose(BadIntrin);
4511  return DAG.getUNDEF(VT);
4512 }
4513 
4514 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4515  SelectionDAG &DAG) const {
4516  MachineFunction &MF = DAG.getMachineFunction();
4517  auto MFI = MF.getInfo<SIMachineFunctionInfo>();
4518 
4519  EVT VT = Op.getValueType();
4520  SDLoc DL(Op);
4521  unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4522 
4523  // TODO: Should this propagate fast-math-flags?
4524 
4525  switch (IntrinsicID) {
4526  case Intrinsic::amdgcn_implicit_buffer_ptr: {
4527  if (getSubtarget()->isAmdCodeObjectV2(MF))
4528  return emitNonHSAIntrinsicError(DAG, DL, VT);
4529  return getPreloadedValue(DAG, *MFI, VT,
4531  }
4532  case Intrinsic::amdgcn_dispatch_ptr:
4533  case Intrinsic::amdgcn_queue_ptr: {
4534  if (!Subtarget->isAmdCodeObjectV2(MF)) {
4535  DiagnosticInfoUnsupported BadIntrin(
4536  MF.getFunction(), "unsupported hsa intrinsic without hsa target",
4537  DL.getDebugLoc());
4538  DAG.getContext()->diagnose(BadIntrin);
4539  return DAG.getUNDEF(VT);
4540  }
4541 
4542  auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4544  return getPreloadedValue(DAG, *MFI, VT, RegID);
4545  }
4546  case Intrinsic::amdgcn_implicitarg_ptr: {
4547  if (MFI->isEntryFunction())
4548  return getImplicitArgPtr(DAG, DL);
4549  return getPreloadedValue(DAG, *MFI, VT,
4551  }
4552  case Intrinsic::amdgcn_kernarg_segment_ptr: {
4553  return getPreloadedValue(DAG, *MFI, VT,
4555  }
4556  case Intrinsic::amdgcn_dispatch_id: {
4557  return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
4558  }
4559  case Intrinsic::amdgcn_rcp:
4560  return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4561  case Intrinsic::amdgcn_rsq:
4562  return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4563  case Intrinsic::amdgcn_rsq_legacy:
4565  return emitRemovedIntrinsicError(DAG, DL, VT);
4566 
4567  return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
4568  case Intrinsic::amdgcn_rcp_legacy:
4570  return emitRemovedIntrinsicError(DAG, DL, VT);
4571  return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
4572  case Intrinsic::amdgcn_rsq_clamp: {
4574  return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
4575 
4576  Type *Type = VT.getTypeForEVT(*DAG.getContext());
4578  APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4579 
4580  SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4581  SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4582  DAG.getConstantFP(Max, DL, VT));
4583  return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4584  DAG.getConstantFP(Min, DL, VT));
4585  }
4586  case Intrinsic::r600_read_ngroups_x:
4587  if (Subtarget->isAmdHsaOS())
4588  return emitNonHSAIntrinsicError(DAG, DL, VT);
4589 
4590  return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4592  case Intrinsic::r600_read_ngroups_y:
4593  if (Subtarget->isAmdHsaOS())
4594  return emitNonHSAIntrinsicError(DAG, DL, VT);
4595 
4596  return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4598  case Intrinsic::r600_read_ngroups_z:
4599  if (Subtarget->isAmdHsaOS())
4600  return emitNonHSAIntrinsicError(DAG, DL, VT);
4601 
4602  return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4604  case Intrinsic::r600_read_global_size_x:
4605  if (Subtarget->isAmdHsaOS())
4606  return emitNonHSAIntrinsicError(DAG, DL, VT);
4607 
4608  return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4609