LLVM  9.0.0svn
SIISelLowering.cpp
Go to the documentation of this file.
1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
71 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/KnownBits.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
92  "amdgpu-vgpr-index-mode",
93  cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94  cl::init(false));
95 
97  "amdgpu-disable-loop-alignment",
98  cl::desc("Do not align and prefetch loops"),
99  cl::init(false));
100 
101 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102  unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103  for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104  if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105  return AMDGPU::SGPR0 + Reg;
106  }
107  }
108  llvm_unreachable("Cannot allocate sgpr");
109 }
110 
112  const GCNSubtarget &STI)
113  : AMDGPUTargetLowering(TM, STI),
114  Subtarget(&STI) {
115  addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
116  addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
117 
118  addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
119  addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
120 
121  addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122  addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123  addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
124 
125  addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126  addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127 
128  addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129  addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130 
131  addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
133 
134  addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135  addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136 
137  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
138  addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139 
140  addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
141  addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
142 
143  if (Subtarget->has16BitInsts()) {
144  addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145  addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
146 
147  // Unless there are also VOP3P operations, not operations are really legal.
148  addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149  addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
150  addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151  addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
152  }
153 
155 
156  // We need to custom lower vector stores from local memory
165 
174 
185 
188 
193 
199 
204 
207 
215 
223 
230 
237 
244 
247 
250 
254 
255 #if 0
258 #endif
259 
260  // We only support LOAD/STORE and vector manipulation ops for vectors
261  // with > 4 elements.
264  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
265  switch (Op) {
266  case ISD::LOAD:
267  case ISD::STORE:
268  case ISD::BUILD_VECTOR:
269  case ISD::BITCAST:
275  break;
276  case ISD::CONCAT_VECTORS:
278  break;
279  default:
281  break;
282  }
283  }
284  }
285 
287 
288  // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
289  // is expanded to avoid having two separate loops in case the index is a VGPR.
290 
291  // Most operations are naturally 32-bit vector operations. We only support
292  // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
293  for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
296 
299 
302 
305  }
306 
311 
314 
315  // Avoid stack access for these.
316  // TODO: Generalize to more vector types.
321 
327 
331 
336 
337  // Deal with vec3 vector operations when widened to vec4.
342 
343  // Deal with vec5 vector operations when widened to vec8.
348 
349  // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
350  // and output demarshalling
353 
354  // We can't return success/failure, only the old value,
355  // let LLVM add the comparison
358 
359  if (Subtarget->hasFlatAddressSpace()) {
362  }
363 
366 
367  // On SI this is s_memtime and s_memrealtime on VI.
371 
372  if (Subtarget->has16BitInsts()) {
376  }
377 
378  // v_mad_f32 does not support denormals according to some sources.
379  if (!Subtarget->hasFP32Denormals())
381 
382  if (!Subtarget->hasBFI()) {
383  // fcopysign can be done in a single instruction with BFI.
386  }
387 
388  if (!Subtarget->hasBCNT(32))
390 
391  if (!Subtarget->hasBCNT(64))
393 
394  if (Subtarget->hasFFBH())
396 
397  if (Subtarget->hasFFBL())
399 
400  // We only really have 32-bit BFE instructions (and 16-bit on VI).
401  //
402  // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
403  // effort to match them now. We want this to be false for i64 cases when the
404  // extraction isn't restricted to the upper or lower half. Ideally we would
405  // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
406  // span the midpoint are probably relatively rare, so don't worry about them
407  // for now.
408  if (Subtarget->hasBFE())
409  setHasExtractBitsInsn(true);
410 
415 
416 
417  // These are really only legal for ieee_mode functions. We should be avoiding
418  // them for functions that don't have ieee_mode enabled, so just say they are
419  // legal.
424 
425 
426  if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
430  } else {
435  }
436 
438 
443 
444  if (Subtarget->has16BitInsts()) {
446 
449 
452 
455 
458 
463 
466 
472 
474 
476 
478 
480 
485 
490 
491  // F16 - Constant Actions.
493 
494  // F16 - Load/Store Actions.
499 
500  // F16 - VOP1 Actions.
509 
510  // F16 - VOP2 Actions.
513 
515 
516  // F16 - VOP3 Actions.
518  if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
520 
521  for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
522  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
523  switch (Op) {
524  case ISD::LOAD:
525  case ISD::STORE:
526  case ISD::BUILD_VECTOR:
527  case ISD::BITCAST:
533  break;
534  case ISD::CONCAT_VECTORS:
536  break;
537  default:
539  break;
540  }
541  }
542  }
543 
544  // XXX - Do these do anything? Vector constants turn into build_vector.
547 
550 
555 
560 
567 
572 
577 
582 
586 
587  if (!Subtarget->hasVOP3PInsts()) {
590  }
591 
593  // This isn't really legal, but this avoids the legalizer unrolling it (and
594  // allows matching fneg (fabs x) patterns)
596 
601 
604 
607  }
608 
609  if (Subtarget->hasVOP3PInsts()) {
620 
624 
627 
629 
632 
639 
644 
647 
650 
654 
658  }
659 
662 
663  if (Subtarget->has16BitInsts()) {
668  } else {
669  // Legalization hack.
672 
675  }
676 
679  }
680 
708 
709  // All memory operations. Some folding on the pointer operand is done to help
710  // matching the constant offsets in the addressing modes.
729 
731 }
732 
734  return Subtarget;
735 }
736 
737 //===----------------------------------------------------------------------===//
738 // TargetLowering queries
739 //===----------------------------------------------------------------------===//
740 
741 // v_mad_mix* support a conversion from f16 to f32.
742 //
743 // There is only one special case when denormals are enabled we don't currently,
744 // where this is OK to use.
746  EVT DestVT, EVT SrcVT) const {
747  return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
748  (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
749  DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
750  SrcVT.getScalarType() == MVT::f16;
751 }
752 
754  // SI has some legal vector types, but no legal vector operations. Say no
755  // shuffles are legal in order to prefer scalarizing some vector operations.
756  return false;
757 }
758 
760  CallingConv::ID CC,
761  EVT VT) const {
762  // TODO: Consider splitting all arguments into 32-bit pieces.
763  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
764  EVT ScalarVT = VT.getScalarType();
765  unsigned Size = ScalarVT.getSizeInBits();
766  if (Size == 32)
767  return ScalarVT.getSimpleVT();
768 
769  if (Size == 64)
770  return MVT::i32;
771 
772  if (Size == 16 && Subtarget->has16BitInsts())
773  return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
774  }
775 
776  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
777 }
778 
780  CallingConv::ID CC,
781  EVT VT) const {
782  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
783  unsigned NumElts = VT.getVectorNumElements();
784  EVT ScalarVT = VT.getScalarType();
785  unsigned Size = ScalarVT.getSizeInBits();
786 
787  if (Size == 32)
788  return NumElts;
789 
790  if (Size == 64)
791  return 2 * NumElts;
792 
793  if (Size == 16 && Subtarget->has16BitInsts())
794  return (VT.getVectorNumElements() + 1) / 2;
795  }
796 
797  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
798 }
799 
802  EVT VT, EVT &IntermediateVT,
803  unsigned &NumIntermediates, MVT &RegisterVT) const {
804  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
805  unsigned NumElts = VT.getVectorNumElements();
806  EVT ScalarVT = VT.getScalarType();
807  unsigned Size = ScalarVT.getSizeInBits();
808  if (Size == 32) {
809  RegisterVT = ScalarVT.getSimpleVT();
810  IntermediateVT = RegisterVT;
811  NumIntermediates = NumElts;
812  return NumIntermediates;
813  }
814 
815  if (Size == 64) {
816  RegisterVT = MVT::i32;
817  IntermediateVT = RegisterVT;
818  NumIntermediates = 2 * NumElts;
819  return NumIntermediates;
820  }
821 
822  // FIXME: We should fix the ABI to be the same on targets without 16-bit
823  // support, but unless we can properly handle 3-vectors, it will be still be
824  // inconsistent.
825  if (Size == 16 && Subtarget->has16BitInsts()) {
826  RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
827  IntermediateVT = RegisterVT;
828  NumIntermediates = (NumElts + 1) / 2;
829  return NumIntermediates;
830  }
831  }
832 
834  Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
835 }
836 
838  // Only limited forms of aggregate type currently expected.
839  assert(Ty->isStructTy() && "Expected struct type");
840 
841 
842  Type *ElementType = nullptr;
843  unsigned NumElts;
844  if (Ty->getContainedType(0)->isVectorTy()) {
845  VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
846  ElementType = VecComponent->getElementType();
847  NumElts = VecComponent->getNumElements();
848  } else {
849  ElementType = Ty->getContainedType(0);
850  NumElts = 1;
851  }
852 
853  assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
854 
855  // Calculate the size of the memVT type from the aggregate
856  unsigned Pow2Elts = 0;
857  unsigned ElementSize;
858  switch (ElementType->getTypeID()) {
859  default:
860  llvm_unreachable("Unknown type!");
861  case Type::IntegerTyID:
862  ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
863  break;
864  case Type::HalfTyID:
865  ElementSize = 16;
866  break;
867  case Type::FloatTyID:
868  ElementSize = 32;
869  break;
870  }
871  unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
872  Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
873 
874  return MVT::getVectorVT(MVT::getVT(ElementType, false),
875  Pow2Elts);
876 }
877 
879  const CallInst &CI,
880  MachineFunction &MF,
881  unsigned IntrID) const {
882  if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
883  AMDGPU::lookupRsrcIntrinsic(IntrID)) {
885  (Intrinsic::ID)IntrID);
886  if (Attr.hasFnAttribute(Attribute::ReadNone))
887  return false;
888 
890 
891  if (RsrcIntr->IsImage) {
892  Info.ptrVal = MFI->getImagePSV(
894  CI.getArgOperand(RsrcIntr->RsrcArg));
895  Info.align = 0;
896  } else {
897  Info.ptrVal = MFI->getBufferPSV(
899  CI.getArgOperand(RsrcIntr->RsrcArg));
900  }
901 
903  if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
905  Info.memVT = MVT::getVT(CI.getType(), true);
906  if (Info.memVT == MVT::Other) {
907  // Some intrinsics return an aggregate type - special case to work out
908  // the correct memVT
909  Info.memVT = memVTFromAggregate(CI.getType());
910  }
912  } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
913  Info.opc = ISD::INTRINSIC_VOID;
914  Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
916  } else {
917  // Atomic
919  Info.memVT = MVT::getVT(CI.getType());
923 
924  // XXX - Should this be volatile without known ordering?
926  }
927  return true;
928  }
929 
930  switch (IntrID) {
931  case Intrinsic::amdgcn_atomic_inc:
932  case Intrinsic::amdgcn_atomic_dec:
933  case Intrinsic::amdgcn_ds_ordered_add:
934  case Intrinsic::amdgcn_ds_ordered_swap:
935  case Intrinsic::amdgcn_ds_fadd:
936  case Intrinsic::amdgcn_ds_fmin:
937  case Intrinsic::amdgcn_ds_fmax: {
939  Info.memVT = MVT::getVT(CI.getType());
940  Info.ptrVal = CI.getOperand(0);
941  Info.align = 0;
943 
944  const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
945  if (!Vol->isZero())
947 
948  return true;
949  }
950  case Intrinsic::amdgcn_ds_append:
951  case Intrinsic::amdgcn_ds_consume: {
953  Info.memVT = MVT::getVT(CI.getType());
954  Info.ptrVal = CI.getOperand(0);
955  Info.align = 0;
957 
958  const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
959  if (!Vol->isZero())
961 
962  return true;
963  }
964  default:
965  return false;
966  }
967 }
968 
971  Type *&AccessTy) const {
972  switch (II->getIntrinsicID()) {
973  case Intrinsic::amdgcn_atomic_inc:
974  case Intrinsic::amdgcn_atomic_dec:
975  case Intrinsic::amdgcn_ds_ordered_add:
976  case Intrinsic::amdgcn_ds_ordered_swap:
977  case Intrinsic::amdgcn_ds_fadd:
978  case Intrinsic::amdgcn_ds_fmin:
979  case Intrinsic::amdgcn_ds_fmax: {
980  Value *Ptr = II->getArgOperand(0);
981  AccessTy = II->getType();
982  Ops.push_back(Ptr);
983  return true;
984  }
985  default:
986  return false;
987  }
988 }
989 
990 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
991  if (!Subtarget->hasFlatInstOffsets()) {
992  // Flat instructions do not have offsets, and only have the register
993  // address.
994  return AM.BaseOffs == 0 && AM.Scale == 0;
995  }
996 
997  // GFX9 added a 13-bit signed offset. When using regular flat instructions,
998  // the sign bit is ignored and is treated as a 12-bit unsigned offset.
999 
1000  // GFX10 shrinked signed offset to 12 bits. When using regular flat
1001  // instructions, the sign bit is also ignored and is treated as 11-bit
1002  // unsigned offset.
1003 
1004  if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1005  return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1006 
1007  // Just r + i
1008  return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
1009 }
1010 
1012  if (Subtarget->hasFlatGlobalInsts())
1013  return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1014 
1015  if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1016  // Assume the we will use FLAT for all global memory accesses
1017  // on VI.
1018  // FIXME: This assumption is currently wrong. On VI we still use
1019  // MUBUF instructions for the r + i addressing mode. As currently
1020  // implemented, the MUBUF instructions only work on buffer < 4GB.
1021  // It may be possible to support > 4GB buffers with MUBUF instructions,
1022  // by setting the stride value in the resource descriptor which would
1023  // increase the size limit to (stride * 4GB). However, this is risky,
1024  // because it has never been validated.
1025  return isLegalFlatAddressingMode(AM);
1026  }
1027 
1028  return isLegalMUBUFAddressingMode(AM);
1029 }
1030 
1031 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1032  // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1033  // additionally can do r + r + i with addr64. 32-bit has more addressing
1034  // mode options. Depending on the resource constant, it can also do
1035  // (i64 r0) + (i32 r1) * (i14 i).
1036  //
1037  // Private arrays end up using a scratch buffer most of the time, so also
1038  // assume those use MUBUF instructions. Scratch loads / stores are currently
1039  // implemented as mubuf instructions with offen bit set, so slightly
1040  // different than the normal addr64.
1041  if (!isUInt<12>(AM.BaseOffs))
1042  return false;
1043 
1044  // FIXME: Since we can split immediate into soffset and immediate offset,
1045  // would it make sense to allow any immediate?
1046 
1047  switch (AM.Scale) {
1048  case 0: // r + i or just i, depending on HasBaseReg.
1049  return true;
1050  case 1:
1051  return true; // We have r + r or r + i.
1052  case 2:
1053  if (AM.HasBaseReg) {
1054  // Reject 2 * r + r.
1055  return false;
1056  }
1057 
1058  // Allow 2 * r as r + r
1059  // Or 2 * r + i is allowed as r + r + i.
1060  return true;
1061  default: // Don't allow n * r
1062  return false;
1063  }
1064 }
1065 
1067  const AddrMode &AM, Type *Ty,
1068  unsigned AS, Instruction *I) const {
1069  // No global is ever allowed as a base.
1070  if (AM.BaseGV)
1071  return false;
1072 
1073  if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1074  return isLegalGlobalAddressingMode(AM);
1075 
1076  if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1079  // If the offset isn't a multiple of 4, it probably isn't going to be
1080  // correctly aligned.
1081  // FIXME: Can we get the real alignment here?
1082  if (AM.BaseOffs % 4 != 0)
1083  return isLegalMUBUFAddressingMode(AM);
1084 
1085  // There are no SMRD extloads, so if we have to do a small type access we
1086  // will use a MUBUF load.
1087  // FIXME?: We also need to do this if unaligned, but we don't know the
1088  // alignment here.
1089  if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1090  return isLegalGlobalAddressingMode(AM);
1091 
1092  if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1093  // SMRD instructions have an 8-bit, dword offset on SI.
1094  if (!isUInt<8>(AM.BaseOffs / 4))
1095  return false;
1096  } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1097  // On CI+, this can also be a 32-bit literal constant offset. If it fits
1098  // in 8-bits, it can use a smaller encoding.
1099  if (!isUInt<32>(AM.BaseOffs / 4))
1100  return false;
1101  } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1102  // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1103  if (!isUInt<20>(AM.BaseOffs))
1104  return false;
1105  } else
1106  llvm_unreachable("unhandled generation");
1107 
1108  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1109  return true;
1110 
1111  if (AM.Scale == 1 && AM.HasBaseReg)
1112  return true;
1113 
1114  return false;
1115 
1116  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1117  return isLegalMUBUFAddressingMode(AM);
1118  } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1119  AS == AMDGPUAS::REGION_ADDRESS) {
1120  // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1121  // field.
1122  // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1123  // an 8-bit dword offset but we don't know the alignment here.
1124  if (!isUInt<16>(AM.BaseOffs))
1125  return false;
1126 
1127  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1128  return true;
1129 
1130  if (AM.Scale == 1 && AM.HasBaseReg)
1131  return true;
1132 
1133  return false;
1134  } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1136  // For an unknown address space, this usually means that this is for some
1137  // reason being used for pure arithmetic, and not based on some addressing
1138  // computation. We don't have instructions that compute pointers with any
1139  // addressing modes, so treat them as having no offset like flat
1140  // instructions.
1141  return isLegalFlatAddressingMode(AM);
1142  } else {
1143  llvm_unreachable("unhandled address space");
1144  }
1145 }
1146 
1147 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1148  const SelectionDAG &DAG) const {
1149  if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1150  return (MemVT.getSizeInBits() <= 4 * 32);
1151  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1152  unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1153  return (MemVT.getSizeInBits() <= MaxPrivateBits);
1154  } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
1155  return (MemVT.getSizeInBits() <= 2 * 32);
1156  }
1157  return true;
1158 }
1159 
1161  unsigned AddrSpace,
1162  unsigned Align,
1163  bool *IsFast) const {
1164  if (IsFast)
1165  *IsFast = false;
1166 
1167  // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1168  // which isn't a simple VT.
1169  // Until MVT is extended to handle this, simply check for the size and
1170  // rely on the condition below: allow accesses if the size is a multiple of 4.
1171  if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1172  VT.getStoreSize() > 16)) {
1173  return false;
1174  }
1175 
1176  if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1177  AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1178  // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1179  // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1180  // with adjacent offsets.
1181  bool AlignedBy4 = (Align % 4 == 0);
1182  if (IsFast)
1183  *IsFast = AlignedBy4;
1184 
1185  return AlignedBy4;
1186  }
1187 
1188  // FIXME: We have to be conservative here and assume that flat operations
1189  // will access scratch. If we had access to the IR function, then we
1190  // could determine if any private memory was used in the function.
1191  if (!Subtarget->hasUnalignedScratchAccess() &&
1192  (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1193  AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1194  bool AlignedBy4 = Align >= 4;
1195  if (IsFast)
1196  *IsFast = AlignedBy4;
1197 
1198  return AlignedBy4;
1199  }
1200 
1201  if (Subtarget->hasUnalignedBufferAccess()) {
1202  // If we have an uniform constant load, it still requires using a slow
1203  // buffer instruction if unaligned.
1204  if (IsFast) {
1205  *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1206  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1207  (Align % 4 == 0) : true;
1208  }
1209 
1210  return true;
1211  }
1212 
1213  // Smaller than dword value must be aligned.
1214  if (VT.bitsLT(MVT::i32))
1215  return false;
1216 
1217  // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1218  // byte-address are ignored, thus forcing Dword alignment.
1219  // This applies to private, global, and constant memory.
1220  if (IsFast)
1221  *IsFast = true;
1222 
1223  return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1224 }
1225 
1227  uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1228  bool ZeroMemset, bool MemcpyStrSrc,
1229  const AttributeList &FuncAttributes) const {
1230  // FIXME: Should account for address space here.
1231 
1232  // The default fallback uses the private pointer size as a guess for a type to
1233  // use. Make sure we switch these to 64-bit accesses.
1234 
1235  if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1236  return MVT::v4i32;
1237 
1238  if (Size >= 8 && DstAlign >= 4)
1239  return MVT::v2i32;
1240 
1241  // Use the default.
1242  return MVT::Other;
1243 }
1244 
1245 static bool isFlatGlobalAddrSpace(unsigned AS) {
1246  return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1247  AS == AMDGPUAS::FLAT_ADDRESS ||
1250 }
1251 
1253  unsigned DestAS) const {
1254  return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1255 }
1256 
1258  const MemSDNode *MemNode = cast<MemSDNode>(N);
1259  const Value *Ptr = MemNode->getMemOperand()->getValue();
1260  const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1261  return I && I->getMetadata("amdgpu.noclobber");
1262 }
1263 
1265  unsigned DestAS) const {
1266  // Flat -> private/local is a simple truncate.
1267  // Flat -> global is no-op
1268  if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1269  return true;
1270 
1271  return isNoopAddrSpaceCast(SrcAS, DestAS);
1272 }
1273 
1275  const MemSDNode *MemNode = cast<MemSDNode>(N);
1276 
1277  return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1278 }
1279 
1282  if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1283  return TypeSplitVector;
1284 
1286 }
1287 
1289  Type *Ty) const {
1290  // FIXME: Could be smarter if called for vector constants.
1291  return true;
1292 }
1293 
1295  if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1296  switch (Op) {
1297  case ISD::LOAD:
1298  case ISD::STORE:
1299 
1300  // These operations are done with 32-bit instructions anyway.
1301  case ISD::AND:
1302  case ISD::OR:
1303  case ISD::XOR:
1304  case ISD::SELECT:
1305  // TODO: Extensions?
1306  return true;
1307  default:
1308  return false;
1309  }
1310  }
1311 
1312  // SimplifySetCC uses this function to determine whether or not it should
1313  // create setcc with i1 operands. We don't have instructions for i1 setcc.
1314  if (VT == MVT::i1 && Op == ISD::SETCC)
1315  return false;
1316 
1317  return TargetLowering::isTypeDesirableForOp(Op, VT);
1318 }
1319 
1320 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1321  const SDLoc &SL,
1322  SDValue Chain,
1323  uint64_t Offset) const {
1324  const DataLayout &DL = DAG.getDataLayout();
1325  MachineFunction &MF = DAG.getMachineFunction();
1327 
1328  const ArgDescriptor *InputPtrReg;
1329  const TargetRegisterClass *RC;
1330 
1331  std::tie(InputPtrReg, RC)
1333 
1336  SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1337  MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1338 
1339  return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1340 }
1341 
1342 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1343  const SDLoc &SL) const {
1344  uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1345  FIRST_IMPLICIT);
1346  return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1347 }
1348 
1349 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1350  const SDLoc &SL, SDValue Val,
1351  bool Signed,
1352  const ISD::InputArg *Arg) const {
1353  // First, if it is a widened vector, narrow it.
1354  if (VT.isVector() &&
1355  VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1356  EVT NarrowedVT =
1358  VT.getVectorNumElements());
1359  Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1360  DAG.getConstant(0, SL, MVT::i32));
1361  }
1362 
1363  // Then convert the vector elements or scalar value.
1364  if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1365  VT.bitsLT(MemVT)) {
1366  unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1367  Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1368  }
1369 
1370  if (MemVT.isFloatingPoint())
1371  Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1372  else if (Signed)
1373  Val = DAG.getSExtOrTrunc(Val, SL, VT);
1374  else
1375  Val = DAG.getZExtOrTrunc(Val, SL, VT);
1376 
1377  return Val;
1378 }
1379 
1380 SDValue SITargetLowering::lowerKernargMemParameter(
1381  SelectionDAG &DAG, EVT VT, EVT MemVT,
1382  const SDLoc &SL, SDValue Chain,
1383  uint64_t Offset, unsigned Align, bool Signed,
1384  const ISD::InputArg *Arg) const {
1385  Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1387  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1388 
1389  // Try to avoid using an extload by loading earlier than the argument address,
1390  // and extracting the relevant bits. The load should hopefully be merged with
1391  // the previous argument.
1392  if (MemVT.getStoreSize() < 4 && Align < 4) {
1393  // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1394  int64_t AlignDownOffset = alignDown(Offset, 4);
1395  int64_t OffsetDiff = Offset - AlignDownOffset;
1396 
1397  EVT IntVT = MemVT.changeTypeToInteger();
1398 
1399  // TODO: If we passed in the base kernel offset we could have a better
1400  // alignment than 4, but we don't really need it.
1401  SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1402  SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1405 
1406  SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1407  SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1408 
1409  SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1410  ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1411  ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1412 
1413 
1414  return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1415  }
1416 
1417  SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1418  SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1421 
1422  SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1423  return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1424 }
1425 
1426 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1427  const SDLoc &SL, SDValue Chain,
1428  const ISD::InputArg &Arg) const {
1429  MachineFunction &MF = DAG.getMachineFunction();
1430  MachineFrameInfo &MFI = MF.getFrameInfo();
1431 
1432  if (Arg.Flags.isByVal()) {
1433  unsigned Size = Arg.Flags.getByValSize();
1434  int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1435  return DAG.getFrameIndex(FrameIdx, MVT::i32);
1436  }
1437 
1438  unsigned ArgOffset = VA.getLocMemOffset();
1439  unsigned ArgSize = VA.getValVT().getStoreSize();
1440 
1441  int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1442 
1443  // Create load nodes to retrieve arguments from the stack.
1444  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1445  SDValue ArgValue;
1446 
1447  // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1449  MVT MemVT = VA.getValVT();
1450 
1451  switch (VA.getLocInfo()) {
1452  default:
1453  break;
1454  case CCValAssign::BCvt:
1455  MemVT = VA.getLocVT();
1456  break;
1457  case CCValAssign::SExt:
1458  ExtType = ISD::SEXTLOAD;
1459  break;
1460  case CCValAssign::ZExt:
1461  ExtType = ISD::ZEXTLOAD;
1462  break;
1463  case CCValAssign::AExt:
1464  ExtType = ISD::EXTLOAD;
1465  break;
1466  }
1467 
1468  ArgValue = DAG.getExtLoad(
1469  ExtType, SL, VA.getLocVT(), Chain, FIN,
1471  MemVT);
1472  return ArgValue;
1473 }
1474 
1475 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1476  const SIMachineFunctionInfo &MFI,
1477  EVT VT,
1479  const ArgDescriptor *Reg;
1480  const TargetRegisterClass *RC;
1481 
1482  std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1483  return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1484 }
1485 
1487  CallingConv::ID CallConv,
1489  BitVector &Skipped,
1490  FunctionType *FType,
1492  for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1493  const ISD::InputArg *Arg = &Ins[I];
1494 
1495  assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1496  "vector type argument should have been split");
1497 
1498  // First check if it's a PS input addr.
1499  if (CallConv == CallingConv::AMDGPU_PS &&
1500  !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
1501 
1502  bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1503 
1504  // Inconveniently only the first part of the split is marked as isSplit,
1505  // so skip to the end. We only want to increment PSInputNum once for the
1506  // entire split argument.
1507  if (Arg->Flags.isSplit()) {
1508  while (!Arg->Flags.isSplitEnd()) {
1509  assert(!Arg->VT.isVector() &&
1510  "unexpected vector split in ps argument type");
1511  if (!SkipArg)
1512  Splits.push_back(*Arg);
1513  Arg = &Ins[++I];
1514  }
1515  }
1516 
1517  if (SkipArg) {
1518  // We can safely skip PS inputs.
1519  Skipped.set(Arg->getOrigArgIndex());
1520  ++PSInputNum;
1521  continue;
1522  }
1523 
1524  Info->markPSInputAllocated(PSInputNum);
1525  if (Arg->Used)
1526  Info->markPSInputEnabled(PSInputNum);
1527 
1528  ++PSInputNum;
1529  }
1530 
1531  Splits.push_back(*Arg);
1532  }
1533 }
1534 
1535 // Allocate special inputs passed in VGPRs.
1537  MachineFunction &MF,
1538  const SIRegisterInfo &TRI,
1540  if (Info.hasWorkItemIDX()) {
1541  unsigned Reg = AMDGPU::VGPR0;
1542  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1543 
1544  CCInfo.AllocateReg(Reg);
1546  }
1547 
1548  if (Info.hasWorkItemIDY()) {
1549  unsigned Reg = AMDGPU::VGPR1;
1550  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1551 
1552  CCInfo.AllocateReg(Reg);
1554  }
1555 
1556  if (Info.hasWorkItemIDZ()) {
1557  unsigned Reg = AMDGPU::VGPR2;
1558  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1559 
1560  CCInfo.AllocateReg(Reg);
1562  }
1563 }
1564 
1565 // Try to allocate a VGPR at the end of the argument list, or if no argument
1566 // VGPRs are left allocating a stack slot.
1568  ArrayRef<MCPhysReg> ArgVGPRs
1569  = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1570  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1571  if (RegIdx == ArgVGPRs.size()) {
1572  // Spill to stack required.
1573  int64_t Offset = CCInfo.AllocateStack(4, 4);
1574 
1575  return ArgDescriptor::createStack(Offset);
1576  }
1577 
1578  unsigned Reg = ArgVGPRs[RegIdx];
1579  Reg = CCInfo.AllocateReg(Reg);
1580  assert(Reg != AMDGPU::NoRegister);
1581 
1582  MachineFunction &MF = CCInfo.getMachineFunction();
1583  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1584  return ArgDescriptor::createRegister(Reg);
1585 }
1586 
1588  const TargetRegisterClass *RC,
1589  unsigned NumArgRegs) {
1590  ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1591  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1592  if (RegIdx == ArgSGPRs.size())
1593  report_fatal_error("ran out of SGPRs for arguments");
1594 
1595  unsigned Reg = ArgSGPRs[RegIdx];
1596  Reg = CCInfo.AllocateReg(Reg);
1597  assert(Reg != AMDGPU::NoRegister);
1598 
1599  MachineFunction &MF = CCInfo.getMachineFunction();
1600  MF.addLiveIn(Reg, RC);
1601  return ArgDescriptor::createRegister(Reg);
1602 }
1603 
1605  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1606 }
1607 
1609  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1610 }
1611 
1613  MachineFunction &MF,
1614  const SIRegisterInfo &TRI,
1616  if (Info.hasWorkItemIDX())
1617  Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1618 
1619  if (Info.hasWorkItemIDY())
1620  Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1621 
1622  if (Info.hasWorkItemIDZ())
1623  Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1624 }
1625 
1627  MachineFunction &MF,
1628  const SIRegisterInfo &TRI,
1630  auto &ArgInfo = Info.getArgInfo();
1631 
1632  // TODO: Unify handling with private memory pointers.
1633 
1634  if (Info.hasDispatchPtr())
1635  ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1636 
1637  if (Info.hasQueuePtr())
1638  ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1639 
1640  if (Info.hasKernargSegmentPtr())
1641  ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1642 
1643  if (Info.hasDispatchID())
1644  ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1645 
1646  // flat_scratch_init is not applicable for non-kernel functions.
1647 
1648  if (Info.hasWorkGroupIDX())
1649  ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1650 
1651  if (Info.hasWorkGroupIDY())
1652  ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1653 
1654  if (Info.hasWorkGroupIDZ())
1655  ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1656 
1657  if (Info.hasImplicitArgPtr())
1658  ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1659 }
1660 
1661 // Allocate special inputs passed in user SGPRs.
1662 static void allocateHSAUserSGPRs(CCState &CCInfo,
1663  MachineFunction &MF,
1664  const SIRegisterInfo &TRI,
1666  if (Info.hasImplicitBufferPtr()) {
1667  unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1668  MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1669  CCInfo.AllocateReg(ImplicitBufferPtrReg);
1670  }
1671 
1672  // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1673  if (Info.hasPrivateSegmentBuffer()) {
1674  unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1675  MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1676  CCInfo.AllocateReg(PrivateSegmentBufferReg);
1677  }
1678 
1679  if (Info.hasDispatchPtr()) {
1680  unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1681  MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1682  CCInfo.AllocateReg(DispatchPtrReg);
1683  }
1684 
1685  if (Info.hasQueuePtr()) {
1686  unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1687  MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1688  CCInfo.AllocateReg(QueuePtrReg);
1689  }
1690 
1691  if (Info.hasKernargSegmentPtr()) {
1692  unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1693  MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1694  CCInfo.AllocateReg(InputPtrReg);
1695  }
1696 
1697  if (Info.hasDispatchID()) {
1698  unsigned DispatchIDReg = Info.addDispatchID(TRI);
1699  MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1700  CCInfo.AllocateReg(DispatchIDReg);
1701  }
1702 
1703  if (Info.hasFlatScratchInit()) {
1704  unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1705  MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1706  CCInfo.AllocateReg(FlatScratchInitReg);
1707  }
1708 
1709  // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1710  // these from the dispatch pointer.
1711 }
1712 
1713 // Allocate special input registers that are initialized per-wave.
1714 static void allocateSystemSGPRs(CCState &CCInfo,
1715  MachineFunction &MF,
1717  CallingConv::ID CallConv,
1718  bool IsShader) {
1719  if (Info.hasWorkGroupIDX()) {
1720  unsigned Reg = Info.addWorkGroupIDX();
1721  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1722  CCInfo.AllocateReg(Reg);
1723  }
1724 
1725  if (Info.hasWorkGroupIDY()) {
1726  unsigned Reg = Info.addWorkGroupIDY();
1727  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1728  CCInfo.AllocateReg(Reg);
1729  }
1730 
1731  if (Info.hasWorkGroupIDZ()) {
1732  unsigned Reg = Info.addWorkGroupIDZ();
1733  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1734  CCInfo.AllocateReg(Reg);
1735  }
1736 
1737  if (Info.hasWorkGroupInfo()) {
1738  unsigned Reg = Info.addWorkGroupInfo();
1739  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1740  CCInfo.AllocateReg(Reg);
1741  }
1742 
1743  if (Info.hasPrivateSegmentWaveByteOffset()) {
1744  // Scratch wave offset passed in system SGPR.
1745  unsigned PrivateSegmentWaveByteOffsetReg;
1746 
1747  if (IsShader) {
1748  PrivateSegmentWaveByteOffsetReg =
1750 
1751  // This is true if the scratch wave byte offset doesn't have a fixed
1752  // location.
1753  if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1754  PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1755  Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1756  }
1757  } else
1758  PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1759 
1760  MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1761  CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1762  }
1763 }
1764 
1766  MachineFunction &MF,
1767  const SIRegisterInfo &TRI,
1769  // Now that we've figured out where the scratch register inputs are, see if
1770  // should reserve the arguments and use them directly.
1771  MachineFrameInfo &MFI = MF.getFrameInfo();
1772  bool HasStackObjects = MFI.hasStackObjects();
1773 
1774  // Record that we know we have non-spill stack objects so we don't need to
1775  // check all stack objects later.
1776  if (HasStackObjects)
1777  Info.setHasNonSpillStackObjects(true);
1778 
1779  // Everything live out of a block is spilled with fast regalloc, so it's
1780  // almost certain that spilling will be required.
1781  if (TM.getOptLevel() == CodeGenOpt::None)
1782  HasStackObjects = true;
1783 
1784  // For now assume stack access is needed in any callee functions, so we need
1785  // the scratch registers to pass in.
1786  bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1787 
1788  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1789  if (ST.isAmdHsaOrMesa(MF.getFunction())) {
1790  if (RequiresStackAccess) {
1791  // If we have stack objects, we unquestionably need the private buffer
1792  // resource. For the Code Object V2 ABI, this will be the first 4 user
1793  // SGPR inputs. We can reserve those and use them directly.
1794 
1795  unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1797  Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1798 
1799  if (MFI.hasCalls()) {
1800  // If we have calls, we need to keep the frame register in a register
1801  // that won't be clobbered by a call, so ensure it is copied somewhere.
1802 
1803  // This is not a problem for the scratch wave offset, because the same
1804  // registers are reserved in all functions.
1805 
1806  // FIXME: Nothing is really ensuring this is a call preserved register,
1807  // it's just selected from the end so it happens to be.
1808  unsigned ReservedOffsetReg
1810  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1811  } else {
1812  unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1814  Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1815  }
1816  } else {
1817  unsigned ReservedBufferReg
1819  unsigned ReservedOffsetReg
1821 
1822  // We tentatively reserve the last registers (skipping the last two
1823  // which may contain VCC). After register allocation, we'll replace
1824  // these with the ones immediately after those which were really
1825  // allocated. In the prologue copies will be inserted from the argument
1826  // to these reserved registers.
1827  Info.setScratchRSrcReg(ReservedBufferReg);
1828  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1829  }
1830  } else {
1831  unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1832 
1833  // Without HSA, relocations are used for the scratch pointer and the
1834  // buffer resource setup is always inserted in the prologue. Scratch wave
1835  // offset is still in an input SGPR.
1836  Info.setScratchRSrcReg(ReservedBufferReg);
1837 
1838  if (HasStackObjects && !MFI.hasCalls()) {
1839  unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1841  Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1842  } else {
1843  unsigned ReservedOffsetReg
1845  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1846  }
1847  }
1848 }
1849 
1852  return !Info->isEntryFunction();
1853 }
1854 
1856 
1857 }
1858 
1861  const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1863 
1864  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1865  if (!IStart)
1866  return;
1867 
1868  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1869  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1870  MachineBasicBlock::iterator MBBI = Entry->begin();
1871  for (const MCPhysReg *I = IStart; *I; ++I) {
1872  const TargetRegisterClass *RC = nullptr;
1873  if (AMDGPU::SReg_64RegClass.contains(*I))
1874  RC = &AMDGPU::SGPR_64RegClass;
1875  else if (AMDGPU::SReg_32RegClass.contains(*I))
1876  RC = &AMDGPU::SGPR_32RegClass;
1877  else
1878  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1879 
1880  unsigned NewVR = MRI->createVirtualRegister(RC);
1881  // Create copy from CSR to a virtual register.
1882  Entry->addLiveIn(*I);
1883  BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1884  .addReg(*I);
1885 
1886  // Insert the copy-back instructions right before the terminator.
1887  for (auto *Exit : Exits)
1888  BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1889  TII->get(TargetOpcode::COPY), *I)
1890  .addReg(NewVR);
1891  }
1892 }
1893 
1895  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1896  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1897  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1899 
1900  MachineFunction &MF = DAG.getMachineFunction();
1901  const Function &Fn = MF.getFunction();
1902  FunctionType *FType = MF.getFunction().getFunctionType();
1904 
1905  if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1906  DiagnosticInfoUnsupported NoGraphicsHSA(
1907  Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1908  DAG.getContext()->diagnose(NoGraphicsHSA);
1909  return DAG.getEntryNode();
1910  }
1911 
1914  BitVector Skipped(Ins.size());
1915  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1916  *DAG.getContext());
1917 
1918  bool IsShader = AMDGPU::isShader(CallConv);
1919  bool IsKernel = AMDGPU::isKernel(CallConv);
1920  bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1921 
1922  if (!IsEntryFunc) {
1923  // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1924  // this when allocating argument fixed offsets.
1925  CCInfo.AllocateStack(4, 4);
1926  }
1927 
1928  if (IsShader) {
1929  processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1930 
1931  // At least one interpolation mode must be enabled or else the GPU will
1932  // hang.
1933  //
1934  // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1935  // set PSInputAddr, the user wants to enable some bits after the compilation
1936  // based on run-time states. Since we can't know what the final PSInputEna
1937  // will look like, so we shouldn't do anything here and the user should take
1938  // responsibility for the correct programming.
1939  //
1940  // Otherwise, the following restrictions apply:
1941  // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1942  // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1943  // enabled too.
1944  if (CallConv == CallingConv::AMDGPU_PS) {
1945  if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1946  ((Info->getPSInputAddr() & 0xF) == 0 &&
1947  Info->isPSInputAllocated(11))) {
1948  CCInfo.AllocateReg(AMDGPU::VGPR0);
1949  CCInfo.AllocateReg(AMDGPU::VGPR1);
1950  Info->markPSInputAllocated(0);
1951  Info->markPSInputEnabled(0);
1952  }
1953  if (Subtarget->isAmdPalOS()) {
1954  // For isAmdPalOS, the user does not enable some bits after compilation
1955  // based on run-time states; the register values being generated here are
1956  // the final ones set in hardware. Therefore we need to apply the
1957  // workaround to PSInputAddr and PSInputEnable together. (The case where
1958  // a bit is set in PSInputAddr but not PSInputEnable is where the
1959  // frontend set up an input arg for a particular interpolation mode, but
1960  // nothing uses that input arg. Really we should have an earlier pass
1961  // that removes such an arg.)
1962  unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1963  if ((PsInputBits & 0x7F) == 0 ||
1964  ((PsInputBits & 0xF) == 0 &&
1965  (PsInputBits >> 11 & 1)))
1966  Info->markPSInputEnabled(
1968  }
1969  }
1970 
1971  assert(!Info->hasDispatchPtr() &&
1972  !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1973  !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1974  !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1975  !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1976  !Info->hasWorkItemIDZ());
1977  } else if (IsKernel) {
1978  assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1979  } else {
1980  Splits.append(Ins.begin(), Ins.end());
1981  }
1982 
1983  if (IsEntryFunc) {
1984  allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1985  allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1986  }
1987 
1988  if (IsKernel) {
1989  analyzeFormalArgumentsCompute(CCInfo, Ins);
1990  } else {
1991  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1992  CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1993  }
1994 
1995  SmallVector<SDValue, 16> Chains;
1996 
1997  // FIXME: This is the minimum kernel argument alignment. We should improve
1998  // this to the maximum alignment of the arguments.
1999  //
2000  // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2001  // kern arg offset.
2002  const unsigned KernelArgBaseAlign = 16;
2003 
2004  for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2005  const ISD::InputArg &Arg = Ins[i];
2006  if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2007  InVals.push_back(DAG.getUNDEF(Arg.VT));
2008  continue;
2009  }
2010 
2011  CCValAssign &VA = ArgLocs[ArgIdx++];
2012  MVT VT = VA.getLocVT();
2013 
2014  if (IsEntryFunc && VA.isMemLoc()) {
2015  VT = Ins[i].VT;
2016  EVT MemVT = VA.getLocVT();
2017 
2018  const uint64_t Offset = VA.getLocMemOffset();
2019  unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2020 
2021  SDValue Arg = lowerKernargMemParameter(
2022  DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2023  Chains.push_back(Arg.getValue(1));
2024 
2025  auto *ParamTy =
2026  dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2027  if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2028  ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2029  ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2030  // On SI local pointers are just offsets into LDS, so they are always
2031  // less than 16-bits. On CI and newer they could potentially be
2032  // real pointers, so we can't guarantee their size.
2033  Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2034  DAG.getValueType(MVT::i16));
2035  }
2036 
2037  InVals.push_back(Arg);
2038  continue;
2039  } else if (!IsEntryFunc && VA.isMemLoc()) {
2040  SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2041  InVals.push_back(Val);
2042  if (!Arg.Flags.isByVal())
2043  Chains.push_back(Val.getValue(1));
2044  continue;
2045  }
2046 
2047  assert(VA.isRegLoc() && "Parameter must be in a register!");
2048 
2049  unsigned Reg = VA.getLocReg();
2050  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2051  EVT ValVT = VA.getValVT();
2052 
2053  Reg = MF.addLiveIn(Reg, RC);
2054  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2055 
2056  if (Arg.Flags.isSRet()) {
2057  // The return object should be reasonably addressable.
2058 
2059  // FIXME: This helps when the return is a real sret. If it is a
2060  // automatically inserted sret (i.e. CanLowerReturn returns false), an
2061  // extra copy is inserted in SelectionDAGBuilder which obscures this.
2062  unsigned NumBits
2064  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2065  DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2066  }
2067 
2068  // If this is an 8 or 16-bit value, it is really passed promoted
2069  // to 32 bits. Insert an assert[sz]ext to capture this, then
2070  // truncate to the right size.
2071  switch (VA.getLocInfo()) {
2072  case CCValAssign::Full:
2073  break;
2074  case CCValAssign::BCvt:
2075  Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2076  break;
2077  case CCValAssign::SExt:
2078  Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2079  DAG.getValueType(ValVT));
2080  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2081  break;
2082  case CCValAssign::ZExt:
2083  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2084  DAG.getValueType(ValVT));
2085  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2086  break;
2087  case CCValAssign::AExt:
2088  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2089  break;
2090  default:
2091  llvm_unreachable("Unknown loc info!");
2092  }
2093 
2094  InVals.push_back(Val);
2095  }
2096 
2097  if (!IsEntryFunc) {
2098  // Special inputs come after user arguments.
2099  allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2100  }
2101 
2102  // Start adding system SGPRs.
2103  if (IsEntryFunc) {
2104  allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2105  } else {
2106  CCInfo.AllocateReg(Info->getScratchRSrcReg());
2107  CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2108  CCInfo.AllocateReg(Info->getFrameOffsetReg());
2109  allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2110  }
2111 
2112  auto &ArgUsageInfo =
2114  ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2115 
2116  unsigned StackArgSize = CCInfo.getNextStackOffset();
2117  Info->setBytesInStackArgArea(StackArgSize);
2118 
2119  return Chains.empty() ? Chain :
2120  DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2121 }
2122 
2123 // TODO: If return values can't fit in registers, we should return as many as
2124 // possible in registers before passing on stack.
2126  CallingConv::ID CallConv,
2127  MachineFunction &MF, bool IsVarArg,
2128  const SmallVectorImpl<ISD::OutputArg> &Outs,
2129  LLVMContext &Context) const {
2130  // Replacing returns with sret/stack usage doesn't make sense for shaders.
2131  // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2132  // for shaders. Vector types should be explicitly handled by CC.
2133  if (AMDGPU::isEntryFunctionCC(CallConv))
2134  return true;
2135 
2137  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2138  return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2139 }
2140 
2141 SDValue
2143  bool isVarArg,
2144  const SmallVectorImpl<ISD::OutputArg> &Outs,
2145  const SmallVectorImpl<SDValue> &OutVals,
2146  const SDLoc &DL, SelectionDAG &DAG) const {
2147  MachineFunction &MF = DAG.getMachineFunction();
2149 
2150  if (AMDGPU::isKernel(CallConv)) {
2151  return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2152  OutVals, DL, DAG);
2153  }
2154 
2155  bool IsShader = AMDGPU::isShader(CallConv);
2156 
2157  Info->setIfReturnsVoid(Outs.empty());
2158  bool IsWaveEnd = Info->returnsVoid() && IsShader;
2159 
2160  // CCValAssign - represent the assignment of the return value to a location.
2163 
2164  // CCState - Info about the registers and stack slots.
2165  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2166  *DAG.getContext());
2167 
2168  // Analyze outgoing return values.
2169  CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2170 
2171  SDValue Flag;
2172  SmallVector<SDValue, 48> RetOps;
2173  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2174 
2175  // Add return address for callable functions.
2176  if (!Info->isEntryFunction()) {
2178  SDValue ReturnAddrReg = CreateLiveInRegister(
2179  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2180 
2181  // FIXME: Should be able to use a vreg here, but need a way to prevent it
2182  // from being allcoated to a CSR.
2183 
2184  SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2185  MVT::i64);
2186 
2187  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2188  Flag = Chain.getValue(1);
2189 
2190  RetOps.push_back(PhysReturnAddrReg);
2191  }
2192 
2193  // Copy the result values into the output registers.
2194  for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2195  ++I, ++RealRVLocIdx) {
2196  CCValAssign &VA = RVLocs[I];
2197  assert(VA.isRegLoc() && "Can only return in registers!");
2198  // TODO: Partially return in registers if return values don't fit.
2199  SDValue Arg = OutVals[RealRVLocIdx];
2200 
2201  // Copied from other backends.
2202  switch (VA.getLocInfo()) {
2203  case CCValAssign::Full:
2204  break;
2205  case CCValAssign::BCvt:
2206  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2207  break;
2208  case CCValAssign::SExt:
2209  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2210  break;
2211  case CCValAssign::ZExt:
2212  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2213  break;
2214  case CCValAssign::AExt:
2215  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2216  break;
2217  default:
2218  llvm_unreachable("Unknown loc info!");
2219  }
2220 
2221  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2222  Flag = Chain.getValue(1);
2223  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2224  }
2225 
2226  // FIXME: Does sret work properly?
2227  if (!Info->isEntryFunction()) {
2228  const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2229  const MCPhysReg *I =
2231  if (I) {
2232  for (; *I; ++I) {
2233  if (AMDGPU::SReg_64RegClass.contains(*I))
2234  RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2235  else if (AMDGPU::SReg_32RegClass.contains(*I))
2236  RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2237  else
2238  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2239  }
2240  }
2241  }
2242 
2243  // Update chain and glue.
2244  RetOps[0] = Chain;
2245  if (Flag.getNode())
2246  RetOps.push_back(Flag);
2247 
2248  unsigned Opc = AMDGPUISD::ENDPGM;
2249  if (!IsWaveEnd)
2251  return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2252 }
2253 
2255  SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2256  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2257  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2258  SDValue ThisVal) const {
2259  CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2260 
2261  // Assign locations to each value returned by this call.
2263  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2264  *DAG.getContext());
2265  CCInfo.AnalyzeCallResult(Ins, RetCC);
2266 
2267  // Copy all of the result registers out of their specified physreg.
2268  for (unsigned i = 0; i != RVLocs.size(); ++i) {
2269  CCValAssign VA = RVLocs[i];
2270  SDValue Val;
2271 
2272  if (VA.isRegLoc()) {
2273  Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2274  Chain = Val.getValue(1);
2275  InFlag = Val.getValue(2);
2276  } else if (VA.isMemLoc()) {
2277  report_fatal_error("TODO: return values in memory");
2278  } else
2279  llvm_unreachable("unknown argument location type");
2280 
2281  switch (VA.getLocInfo()) {
2282  case CCValAssign::Full:
2283  break;
2284  case CCValAssign::BCvt:
2285  Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2286  break;
2287  case CCValAssign::ZExt:
2288  Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2289  DAG.getValueType(VA.getValVT()));
2290  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2291  break;
2292  case CCValAssign::SExt:
2293  Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2294  DAG.getValueType(VA.getValVT()));
2295  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2296  break;
2297  case CCValAssign::AExt:
2298  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2299  break;
2300  default:
2301  llvm_unreachable("Unknown loc info!");
2302  }
2303 
2304  InVals.push_back(Val);
2305  }
2306 
2307  return Chain;
2308 }
2309 
2310 // Add code to pass special inputs required depending on used features separate
2311 // from the explicit user arguments present in the IR.
2313  CallLoweringInfo &CLI,
2314  CCState &CCInfo,
2315  const SIMachineFunctionInfo &Info,
2316  SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2317  SmallVectorImpl<SDValue> &MemOpChains,
2318  SDValue Chain) const {
2319  // If we don't have a call site, this was a call inserted by
2320  // legalization. These can never use special inputs.
2321  if (!CLI.CS)
2322  return;
2323 
2324  const Function *CalleeFunc = CLI.CS.getCalledFunction();
2325  assert(CalleeFunc);
2326 
2327  SelectionDAG &DAG = CLI.DAG;
2328  const SDLoc &DL = CLI.DL;
2329 
2330  const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2331 
2332  auto &ArgUsageInfo =
2334  const AMDGPUFunctionArgInfo &CalleeArgInfo
2335  = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2336 
2337  const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2338 
2339  // TODO: Unify with private memory register handling. This is complicated by
2340  // the fact that at least in kernels, the input argument is not necessarily
2341  // in the same location as the input.
2354  };
2355 
2356  for (auto InputID : InputRegs) {
2357  const ArgDescriptor *OutgoingArg;
2358  const TargetRegisterClass *ArgRC;
2359 
2360  std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2361  if (!OutgoingArg)
2362  continue;
2363 
2364  const ArgDescriptor *IncomingArg;
2365  const TargetRegisterClass *IncomingArgRC;
2366  std::tie(IncomingArg, IncomingArgRC)
2367  = CallerArgInfo.getPreloadedValue(InputID);
2368  assert(IncomingArgRC == ArgRC);
2369 
2370  // All special arguments are ints for now.
2371  EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2372  SDValue InputReg;
2373 
2374  if (IncomingArg) {
2375  InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2376  } else {
2377  // The implicit arg ptr is special because it doesn't have a corresponding
2378  // input for kernels, and is computed from the kernarg segment pointer.
2379  assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2380  InputReg = getImplicitArgPtr(DAG, DL);
2381  }
2382 
2383  if (OutgoingArg->isRegister()) {
2384  RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2385  } else {
2386  unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2387  SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2388  SpecialArgOffset);
2389  MemOpChains.push_back(ArgStore);
2390  }
2391  }
2392 }
2393 
2395  return CC == CallingConv::Fast;
2396 }
2397 
2398 /// Return true if we might ever do TCO for calls with this calling convention.
2400  switch (CC) {
2401  case CallingConv::C:
2402  return true;
2403  default:
2404  return canGuaranteeTCO(CC);
2405  }
2406 }
2407 
2409  SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2410  const SmallVectorImpl<ISD::OutputArg> &Outs,
2411  const SmallVectorImpl<SDValue> &OutVals,
2412  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2413  if (!mayTailCallThisCC(CalleeCC))
2414  return false;
2415 
2416  MachineFunction &MF = DAG.getMachineFunction();
2417  const Function &CallerF = MF.getFunction();
2418  CallingConv::ID CallerCC = CallerF.getCallingConv();
2420  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2421 
2422  // Kernels aren't callable, and don't have a live in return address so it
2423  // doesn't make sense to do a tail call with entry functions.
2424  if (!CallerPreserved)
2425  return false;
2426 
2427  bool CCMatch = CallerCC == CalleeCC;
2428 
2430  if (canGuaranteeTCO(CalleeCC) && CCMatch)
2431  return true;
2432  return false;
2433  }
2434 
2435  // TODO: Can we handle var args?
2436  if (IsVarArg)
2437  return false;
2438 
2439  for (const Argument &Arg : CallerF.args()) {
2440  if (Arg.hasByValAttr())
2441  return false;
2442  }
2443 
2444  LLVMContext &Ctx = *DAG.getContext();
2445 
2446  // Check that the call results are passed in the same way.
2447  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2448  CCAssignFnForCall(CalleeCC, IsVarArg),
2449  CCAssignFnForCall(CallerCC, IsVarArg)))
2450  return false;
2451 
2452  // The callee has to preserve all registers the caller needs to preserve.
2453  if (!CCMatch) {
2454  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2455  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2456  return false;
2457  }
2458 
2459  // Nothing more to check if the callee is taking no arguments.
2460  if (Outs.empty())
2461  return true;
2462 
2464  CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2465 
2466  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2467 
2468  const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2469  // If the stack arguments for this call do not fit into our own save area then
2470  // the call cannot be made tail.
2471  // TODO: Is this really necessary?
2472  if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2473  return false;
2474 
2475  const MachineRegisterInfo &MRI = MF.getRegInfo();
2476  return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2477 }
2478 
2480  if (!CI->isTailCall())
2481  return false;
2482 
2483  const Function *ParentFn = CI->getParent()->getParent();
2484  if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2485  return false;
2486 
2487  auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2488  return (Attr.getValueAsString() != "true");
2489 }
2490 
2491 // The wave scratch offset register is used as the global base pointer.
2493  SmallVectorImpl<SDValue> &InVals) const {
2494  SelectionDAG &DAG = CLI.DAG;
2495  const SDLoc &DL = CLI.DL;
2497  SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2499  SDValue Chain = CLI.Chain;
2500  SDValue Callee = CLI.Callee;
2501  bool &IsTailCall = CLI.IsTailCall;
2502  CallingConv::ID CallConv = CLI.CallConv;
2503  bool IsVarArg = CLI.IsVarArg;
2504  bool IsSibCall = false;
2505  bool IsThisReturn = false;
2506  MachineFunction &MF = DAG.getMachineFunction();
2507 
2508  if (IsVarArg) {
2509  return lowerUnhandledCall(CLI, InVals,
2510  "unsupported call to variadic function ");
2511  }
2512 
2513  if (!CLI.CS.getInstruction())
2514  report_fatal_error("unsupported libcall legalization");
2515 
2516  if (!CLI.CS.getCalledFunction()) {
2517  return lowerUnhandledCall(CLI, InVals,
2518  "unsupported indirect call to function ");
2519  }
2520 
2521  if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2522  return lowerUnhandledCall(CLI, InVals,
2523  "unsupported required tail call to function ");
2524  }
2525 
2527  // Note the issue is with the CC of the calling function, not of the call
2528  // itself.
2529  return lowerUnhandledCall(CLI, InVals,
2530  "unsupported call from graphics shader of function ");
2531  }
2532 
2533  // The first 4 bytes are reserved for the callee's emergency stack slot.
2534  if (IsTailCall) {
2535  IsTailCall = isEligibleForTailCallOptimization(
2536  Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2537  if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2538  report_fatal_error("failed to perform tail call elimination on a call "
2539  "site marked musttail");
2540  }
2541 
2542  bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2543 
2544  // A sibling call is one where we're under the usual C ABI and not planning
2545  // to change that but can still do a tail call:
2546  if (!TailCallOpt && IsTailCall)
2547  IsSibCall = true;
2548 
2549  if (IsTailCall)
2550  ++NumTailCalls;
2551  }
2552 
2554 
2555  // Analyze operands of the call, assigning locations to each operand.
2557  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2558  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2559 
2560  // The first 4 bytes are reserved for the callee's emergency stack slot.
2561  CCInfo.AllocateStack(4, 4);
2562 
2563  CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2564 
2565  // Get a count of how many bytes are to be pushed on the stack.
2566  unsigned NumBytes = CCInfo.getNextStackOffset();
2567 
2568  if (IsSibCall) {
2569  // Since we're not changing the ABI to make this a tail call, the memory
2570  // operands are already available in the caller's incoming argument space.
2571  NumBytes = 0;
2572  }
2573 
2574  // FPDiff is the byte offset of the call's argument area from the callee's.
2575  // Stores to callee stack arguments will be placed in FixedStackSlots offset
2576  // by this amount for a tail call. In a sibling call it must be 0 because the
2577  // caller will deallocate the entire stack and the callee still expects its
2578  // arguments to begin at SP+0. Completely unused for non-tail calls.
2579  int32_t FPDiff = 0;
2580  MachineFrameInfo &MFI = MF.getFrameInfo();
2582 
2583  SDValue CallerSavedFP;
2584 
2585  // Adjust the stack pointer for the new arguments...
2586  // These operations are automatically eliminated by the prolog/epilog pass
2587  if (!IsSibCall) {
2588  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2589 
2590  SmallVector<SDValue, 4> CopyFromChains;
2591 
2592  unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2593 
2594  // In the HSA case, this should be an identity copy.
2595  SDValue ScratchRSrcReg
2596  = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2597  RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2598  CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2599 
2600  // TODO: Don't hardcode these registers and get from the callee function.
2601  SDValue ScratchWaveOffsetReg
2602  = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2603  RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2604  CopyFromChains.push_back(ScratchWaveOffsetReg.getValue(1));
2605 
2606  if (!Info->isEntryFunction()) {
2607  // Avoid clobbering this function's FP value. In the current convention
2608  // callee will overwrite this, so do save/restore around the call site.
2609  CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2610  Info->getFrameOffsetReg(), MVT::i32);
2611  CopyFromChains.push_back(CallerSavedFP.getValue(1));
2612  }
2613 
2614  Chain = DAG.getTokenFactor(DL, CopyFromChains);
2615  }
2616 
2617  SmallVector<SDValue, 8> MemOpChains;
2618  MVT PtrVT = MVT::i32;
2619 
2620  // Walk the register/memloc assignments, inserting copies/loads.
2621  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2622  ++i, ++realArgIdx) {
2623  CCValAssign &VA = ArgLocs[i];
2624  SDValue Arg = OutVals[realArgIdx];
2625 
2626  // Promote the value if needed.
2627  switch (VA.getLocInfo()) {
2628  case CCValAssign::Full:
2629  break;
2630  case CCValAssign::BCvt:
2631  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2632  break;
2633  case CCValAssign::ZExt:
2634  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2635  break;
2636  case CCValAssign::SExt:
2637  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2638  break;
2639  case CCValAssign::AExt:
2640  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2641  break;
2642  case CCValAssign::FPExt:
2643  Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2644  break;
2645  default:
2646  llvm_unreachable("Unknown loc info!");
2647  }
2648 
2649  if (VA.isRegLoc()) {
2650  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2651  } else {
2652  assert(VA.isMemLoc());
2653 
2654  SDValue DstAddr;
2655  MachinePointerInfo DstInfo;
2656 
2657  unsigned LocMemOffset = VA.getLocMemOffset();
2658  int32_t Offset = LocMemOffset;
2659 
2660  SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2661  unsigned Align = 0;
2662 
2663  if (IsTailCall) {
2664  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2665  unsigned OpSize = Flags.isByVal() ?
2666  Flags.getByValSize() : VA.getValVT().getStoreSize();
2667 
2668  // FIXME: We can have better than the minimum byval required alignment.
2669  Align = Flags.isByVal() ? Flags.getByValAlign() :
2670  MinAlign(Subtarget->getStackAlignment(), Offset);
2671 
2672  Offset = Offset + FPDiff;
2673  int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2674 
2675  DstAddr = DAG.getFrameIndex(FI, PtrVT);
2676  DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2677 
2678  // Make sure any stack arguments overlapping with where we're storing
2679  // are loaded before this eventual operation. Otherwise they'll be
2680  // clobbered.
2681 
2682  // FIXME: Why is this really necessary? This seems to just result in a
2683  // lot of code to copy the stack and write them back to the same
2684  // locations, which are supposed to be immutable?
2685  Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2686  } else {
2687  DstAddr = PtrOff;
2688  DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2689  Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2690  }
2691 
2692  if (Outs[i].Flags.isByVal()) {
2693  SDValue SizeNode =
2694  DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2695  SDValue Cpy = DAG.getMemcpy(
2696  Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2697  /*isVol = */ false, /*AlwaysInline = */ true,
2698  /*isTailCall = */ false, DstInfo,
2701 
2702  MemOpChains.push_back(Cpy);
2703  } else {
2704  SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2705  MemOpChains.push_back(Store);
2706  }
2707  }
2708  }
2709 
2710  // Copy special input registers after user input arguments.
2711  passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2712 
2713  if (!MemOpChains.empty())
2714  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2715 
2716  // Build a sequence of copy-to-reg nodes chained together with token chain
2717  // and flag operands which copy the outgoing args into the appropriate regs.
2718  SDValue InFlag;
2719  for (auto &RegToPass : RegsToPass) {
2720  Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2721  RegToPass.second, InFlag);
2722  InFlag = Chain.getValue(1);
2723  }
2724 
2725 
2726  SDValue PhysReturnAddrReg;
2727  if (IsTailCall) {
2728  // Since the return is being combined with the call, we need to pass on the
2729  // return address.
2730 
2732  SDValue ReturnAddrReg = CreateLiveInRegister(
2733  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2734 
2735  PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2736  MVT::i64);
2737  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2738  InFlag = Chain.getValue(1);
2739  }
2740 
2741  // We don't usually want to end the call-sequence here because we would tidy
2742  // the frame up *after* the call, however in the ABI-changing tail-call case
2743  // we've carefully laid out the parameters so that when sp is reset they'll be
2744  // in the correct location.
2745  if (IsTailCall && !IsSibCall) {
2746  Chain = DAG.getCALLSEQ_END(Chain,
2747  DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2748  DAG.getTargetConstant(0, DL, MVT::i32),
2749  InFlag, DL);
2750  InFlag = Chain.getValue(1);
2751  }
2752 
2753  std::vector<SDValue> Ops;
2754  Ops.push_back(Chain);
2755  Ops.push_back(Callee);
2756  // Add a redundant copy of the callee global which will not be legalized, as
2757  // we need direct access to the callee later.
2758  GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2759  const GlobalValue *GV = GSD->getGlobal();
2760  Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2761 
2762  if (IsTailCall) {
2763  // Each tail call may have to adjust the stack by a different amount, so
2764  // this information must travel along with the operation for eventual
2765  // consumption by emitEpilogue.
2766  Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2767 
2768  Ops.push_back(PhysReturnAddrReg);
2769  }
2770 
2771  // Add argument registers to the end of the list so that they are known live
2772  // into the call.
2773  for (auto &RegToPass : RegsToPass) {
2774  Ops.push_back(DAG.getRegister(RegToPass.first,
2775  RegToPass.second.getValueType()));
2776  }
2777 
2778  // Add a register mask operand representing the call-preserved registers.
2779 
2780  auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2781  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2782  assert(Mask && "Missing call preserved mask for calling convention");
2783  Ops.push_back(DAG.getRegisterMask(Mask));
2784 
2785  if (InFlag.getNode())
2786  Ops.push_back(InFlag);
2787 
2788  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2789 
2790  // If we're doing a tall call, use a TC_RETURN here rather than an
2791  // actual call instruction.
2792  if (IsTailCall) {
2793  MFI.setHasTailCall();
2794  return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2795  }
2796 
2797  // Returns a chain and a flag for retval copy to use.
2798  SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2799  Chain = Call.getValue(0);
2800  InFlag = Call.getValue(1);
2801 
2802  if (CallerSavedFP) {
2803  SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2804  Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2805  InFlag = Chain.getValue(1);
2806  }
2807 
2808  uint64_t CalleePopBytes = NumBytes;
2809  Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2810  DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2811  InFlag, DL);
2812  if (!Ins.empty())
2813  InFlag = Chain.getValue(1);
2814 
2815  // Handle result values, copying them out of physregs into vregs that we
2816  // return.
2817  return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2818  InVals, IsThisReturn,
2819  IsThisReturn ? OutVals[0] : SDValue());
2820 }
2821 
2822 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2823  SelectionDAG &DAG) const {
2824  unsigned Reg = StringSwitch<unsigned>(RegName)
2825  .Case("m0", AMDGPU::M0)
2826  .Case("exec", AMDGPU::EXEC)
2827  .Case("exec_lo", AMDGPU::EXEC_LO)
2828  .Case("exec_hi", AMDGPU::EXEC_HI)
2829  .Case("flat_scratch", AMDGPU::FLAT_SCR)
2830  .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2831  .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2832  .Default(AMDGPU::NoRegister);
2833 
2834  if (Reg == AMDGPU::NoRegister) {
2835  report_fatal_error(Twine("invalid register name \""
2836  + StringRef(RegName) + "\"."));
2837 
2838  }
2839 
2840  if ((Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||
2841  Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) &&
2842  Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2843  report_fatal_error(Twine("invalid register \""
2844  + StringRef(RegName) + "\" for subtarget."));
2845  }
2846 
2847  switch (Reg) {
2848  case AMDGPU::M0:
2849  case AMDGPU::EXEC_LO:
2850  case AMDGPU::EXEC_HI:
2851  case AMDGPU::FLAT_SCR_LO:
2852  case AMDGPU::FLAT_SCR_HI:
2853  if (VT.getSizeInBits() == 32)
2854  return Reg;
2855  break;
2856  case AMDGPU::EXEC:
2857  case AMDGPU::FLAT_SCR:
2858  if (VT.getSizeInBits() == 64)
2859  return Reg;
2860  break;
2861  default:
2862  llvm_unreachable("missing register type checking");
2863  }
2864 
2865  report_fatal_error(Twine("invalid type for register \""
2866  + StringRef(RegName) + "\"."));
2867 }
2868 
2869 // If kill is not the last instruction, split the block so kill is always a
2870 // proper terminator.
2872  MachineBasicBlock *BB) const {
2873  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2874 
2875  MachineBasicBlock::iterator SplitPoint(&MI);
2876  ++SplitPoint;
2877 
2878  if (SplitPoint == BB->end()) {
2879  // Don't bother with a new block.
2881  return BB;
2882  }
2883 
2884  MachineFunction *MF = BB->getParent();
2885  MachineBasicBlock *SplitBB
2887 
2888  MF->insert(++MachineFunction::iterator(BB), SplitBB);
2889  SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2890 
2891  SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2892  BB->addSuccessor(SplitBB);
2893 
2895  return SplitBB;
2896 }
2897 
2898 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2899 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2900 // will only do one iteration. In the worst case, this will loop 64 times.
2901 //
2902 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2904  const SIInstrInfo *TII,
2906  MachineBasicBlock &OrigBB,
2907  MachineBasicBlock &LoopBB,
2908  const DebugLoc &DL,
2909  const MachineOperand &IdxReg,
2910  unsigned InitReg,
2911  unsigned ResultReg,
2912  unsigned PhiReg,
2913  unsigned InitSaveExecReg,
2914  int Offset,
2915  bool UseGPRIdxMode,
2916  bool IsIndirectSrc) {
2917  MachineBasicBlock::iterator I = LoopBB.begin();
2918 
2919  unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2920  unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2921  unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2922  unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2923 
2924  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2925  .addReg(InitReg)
2926  .addMBB(&OrigBB)
2927  .addReg(ResultReg)
2928  .addMBB(&LoopBB);
2929 
2930  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2931  .addReg(InitSaveExecReg)
2932  .addMBB(&OrigBB)
2933  .addReg(NewExec)
2934  .addMBB(&LoopBB);
2935 
2936  // Read the next variant <- also loop target.
2937  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2938  .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2939 
2940  // Compare the just read M0 value to all possible Idx values.
2941  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2942  .addReg(CurrentIdxReg)
2943  .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2944 
2945  // Update EXEC, save the original EXEC value to VCC.
2946  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2947  .addReg(CondReg, RegState::Kill);
2948 
2949  MRI.setSimpleHint(NewExec, CondReg);
2950 
2951  if (UseGPRIdxMode) {
2952  unsigned IdxReg;
2953  if (Offset == 0) {
2954  IdxReg = CurrentIdxReg;
2955  } else {
2956  IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2957  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2958  .addReg(CurrentIdxReg, RegState::Kill)
2959  .addImm(Offset);
2960  }
2961  unsigned IdxMode = IsIndirectSrc ?
2963  MachineInstr *SetOn =
2964  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2965  .addReg(IdxReg, RegState::Kill)
2966  .addImm(IdxMode);
2967  SetOn->getOperand(3).setIsUndef();
2968  } else {
2969  // Move index from VCC into M0
2970  if (Offset == 0) {
2971  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2972  .addReg(CurrentIdxReg, RegState::Kill);
2973  } else {
2974  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2975  .addReg(CurrentIdxReg, RegState::Kill)
2976  .addImm(Offset);
2977  }
2978  }
2979 
2980  // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2981  MachineInstr *InsertPt =
2982  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
2983  .addReg(AMDGPU::EXEC)
2984  .addReg(NewExec);
2985 
2986  // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2987  // s_cbranch_scc0?
2988 
2989  // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2990  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2991  .addMBB(&LoopBB);
2992 
2993  return InsertPt->getIterator();
2994 }
2995 
2996 // This has slightly sub-optimal regalloc when the source vector is killed by
2997 // the read. The register allocator does not understand that the kill is
2998 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
2999 // subregister from it, using 1 more VGPR than necessary. This was saved when
3000 // this was expanded after register allocation.
3002  MachineBasicBlock &MBB,
3003  MachineInstr &MI,
3004  unsigned InitResultReg,
3005  unsigned PhiReg,
3006  int Offset,
3007  bool UseGPRIdxMode,
3008  bool IsIndirectSrc) {
3009  MachineFunction *MF = MBB.getParent();
3011  const DebugLoc &DL = MI.getDebugLoc();
3013 
3014  unsigned DstReg = MI.getOperand(0).getReg();
3015  unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3016  unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3017 
3018  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3019 
3020  // Save the EXEC mask
3021  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
3022  .addReg(AMDGPU::EXEC);
3023 
3024  // To insert the loop we need to split the block. Move everything after this
3025  // point to a new block, and insert a new empty block between the two.
3027  MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3028  MachineFunction::iterator MBBI(MBB);
3029  ++MBBI;
3030 
3031  MF->insert(MBBI, LoopBB);
3032  MF->insert(MBBI, RemainderBB);
3033 
3034  LoopBB->addSuccessor(LoopBB);
3035  LoopBB->addSuccessor(RemainderBB);
3036 
3037  // Move the rest of the block into a new block.
3038  RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3039  RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3040 
3041  MBB.addSuccessor(LoopBB);
3042 
3043  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3044 
3045  auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3046  InitResultReg, DstReg, PhiReg, TmpExec,
3047  Offset, UseGPRIdxMode, IsIndirectSrc);
3048 
3049  MachineBasicBlock::iterator First = RemainderBB->begin();
3050  BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
3051  .addReg(SaveExec);
3052 
3053  return InsPt;
3054 }
3055 
3056 // Returns subreg index, offset
3057 static std::pair<unsigned, int>
3059  const TargetRegisterClass *SuperRC,
3060  unsigned VecReg,
3061  int Offset) {
3062  int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3063 
3064  // Skip out of bounds offsets, or else we would end up using an undefined
3065  // register.
3066  if (Offset >= NumElts || Offset < 0)
3067  return std::make_pair(AMDGPU::sub0, Offset);
3068 
3069  return std::make_pair(AMDGPU::sub0 + Offset, 0);
3070 }
3071 
3072 // Return true if the index is an SGPR and was set.
3075  MachineInstr &MI,
3076  int Offset,
3077  bool UseGPRIdxMode,
3078  bool IsIndirectSrc) {
3079  MachineBasicBlock *MBB = MI.getParent();
3080  const DebugLoc &DL = MI.getDebugLoc();
3082 
3083  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3084  const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3085 
3086  assert(Idx->getReg() != AMDGPU::NoRegister);
3087 
3088  if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3089  return false;
3090 
3091  if (UseGPRIdxMode) {
3092  unsigned IdxMode = IsIndirectSrc ?
3094  if (Offset == 0) {
3095  MachineInstr *SetOn =
3096  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3097  .add(*Idx)
3098  .addImm(IdxMode);
3099 
3100  SetOn->getOperand(3).setIsUndef();
3101  } else {
3102  unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3103  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3104  .add(*Idx)
3105  .addImm(Offset);
3106  MachineInstr *SetOn =
3107  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3108  .addReg(Tmp, RegState::Kill)
3109  .addImm(IdxMode);
3110 
3111  SetOn->getOperand(3).setIsUndef();
3112  }
3113 
3114  return true;
3115  }
3116 
3117  if (Offset == 0) {
3118  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3119  .add(*Idx);
3120  } else {
3121  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3122  .add(*Idx)
3123  .addImm(Offset);
3124  }
3125 
3126  return true;
3127 }
3128 
3129 // Control flow needs to be inserted if indexing with a VGPR.
3131  MachineBasicBlock &MBB,
3132  const GCNSubtarget &ST) {
3133  const SIInstrInfo *TII = ST.getInstrInfo();
3134  const SIRegisterInfo &TRI = TII->getRegisterInfo();
3135  MachineFunction *MF = MBB.getParent();
3137 
3138  unsigned Dst = MI.getOperand(0).getReg();
3139  unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3140  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3141 
3142  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3143 
3144  unsigned SubReg;
3145  std::tie(SubReg, Offset)
3146  = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3147 
3148  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3149 
3150  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3152  const DebugLoc &DL = MI.getDebugLoc();
3153 
3154  if (UseGPRIdxMode) {
3155  // TODO: Look at the uses to avoid the copy. This may require rescheduling
3156  // to avoid interfering with other uses, so probably requires a new
3157  // optimization pass.
3158  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3159  .addReg(SrcReg, RegState::Undef, SubReg)
3160  .addReg(SrcReg, RegState::Implicit)
3161  .addReg(AMDGPU::M0, RegState::Implicit);
3162  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3163  } else {
3164  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3165  .addReg(SrcReg, RegState::Undef, SubReg)
3166  .addReg(SrcReg, RegState::Implicit);
3167  }
3168 
3169  MI.eraseFromParent();
3170 
3171  return &MBB;
3172  }
3173 
3174  const DebugLoc &DL = MI.getDebugLoc();
3176 
3177  unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3178  unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3179 
3180  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3181 
3182  auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3183  Offset, UseGPRIdxMode, true);
3184  MachineBasicBlock *LoopBB = InsPt->getParent();
3185 
3186  if (UseGPRIdxMode) {
3187  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3188  .addReg(SrcReg, RegState::Undef, SubReg)
3189  .addReg(SrcReg, RegState::Implicit)
3190  .addReg(AMDGPU::M0, RegState::Implicit);
3191  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3192  } else {
3193  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3194  .addReg(SrcReg, RegState::Undef, SubReg)
3195  .addReg(SrcReg, RegState::Implicit);
3196  }
3197 
3198  MI.eraseFromParent();
3199 
3200  return LoopBB;
3201 }
3202 
3203 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3204  const TargetRegisterClass *VecRC) {
3205  switch (TRI.getRegSizeInBits(*VecRC)) {
3206  case 32: // 4 bytes
3207  return AMDGPU::V_MOVRELD_B32_V1;
3208  case 64: // 8 bytes
3209  return AMDGPU::V_MOVRELD_B32_V2;
3210  case 128: // 16 bytes
3211  return AMDGPU::V_MOVRELD_B32_V4;
3212  case 256: // 32 bytes
3213  return AMDGPU::V_MOVRELD_B32_V8;
3214  case 512: // 64 bytes
3215  return AMDGPU::V_MOVRELD_B32_V16;
3216  default:
3217  llvm_unreachable("unsupported size for MOVRELD pseudos");
3218  }
3219 }
3220 
3222  MachineBasicBlock &MBB,
3223  const GCNSubtarget &ST) {
3224  const SIInstrInfo *TII = ST.getInstrInfo();
3225  const SIRegisterInfo &TRI = TII->getRegisterInfo();
3226  MachineFunction *MF = MBB.getParent();
3228 
3229  unsigned Dst = MI.getOperand(0).getReg();
3230  const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3231  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3232  const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3233  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3234  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3235 
3236  // This can be an immediate, but will be folded later.
3237  assert(Val->getReg());
3238 
3239  unsigned SubReg;
3240  std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3241  SrcVec->getReg(),
3242  Offset);
3243  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3244 
3245  if (Idx->getReg() == AMDGPU::NoRegister) {
3247  const DebugLoc &DL = MI.getDebugLoc();
3248 
3249  assert(Offset == 0);
3250 
3251  BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3252  .add(*SrcVec)
3253  .add(*Val)
3254  .addImm(SubReg);
3255 
3256  MI.eraseFromParent();
3257  return &MBB;
3258  }
3259 
3260  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3262  const DebugLoc &DL = MI.getDebugLoc();
3263 
3264  if (UseGPRIdxMode) {
3265  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3266  .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3267  .add(*Val)
3268  .addReg(Dst, RegState::ImplicitDefine)
3269  .addReg(SrcVec->getReg(), RegState::Implicit)
3270  .addReg(AMDGPU::M0, RegState::Implicit);
3271 
3272  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3273  } else {
3274  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3275 
3276  BuildMI(MBB, I, DL, MovRelDesc)
3277  .addReg(Dst, RegState::Define)
3278  .addReg(SrcVec->getReg())
3279  .add(*Val)
3280  .addImm(SubReg - AMDGPU::sub0);
3281  }
3282 
3283  MI.eraseFromParent();
3284  return &MBB;
3285  }
3286 
3287  if (Val->isReg())
3288  MRI.clearKillFlags(Val->getReg());
3289 
3290  const DebugLoc &DL = MI.getDebugLoc();
3291 
3292  unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3293 
3294  auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3295  Offset, UseGPRIdxMode, false);
3296  MachineBasicBlock *LoopBB = InsPt->getParent();
3297 
3298  if (UseGPRIdxMode) {
3299  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3300  .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3301  .add(*Val) // src0
3303  .addReg(PhiReg, RegState::Implicit)
3304  .addReg(AMDGPU::M0, RegState::Implicit);
3305  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3306  } else {
3307  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3308 
3309  BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3310  .addReg(Dst, RegState::Define)
3311  .addReg(PhiReg)
3312  .add(*Val)
3313  .addImm(SubReg - AMDGPU::sub0);
3314  }
3315 
3316  MI.eraseFromParent();
3317 
3318  return LoopBB;
3319 }
3320 
3322  MachineInstr &MI, MachineBasicBlock *BB) const {
3323 
3324  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3325  MachineFunction *MF = BB->getParent();
3327 
3328  if (TII->isMIMG(MI)) {
3329  if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3330  report_fatal_error("missing mem operand from MIMG instruction");
3331  }
3332  // Add a memoperand for mimg instructions so that they aren't assumed to
3333  // be ordered memory instuctions.
3334 
3335  return BB;
3336  }
3337 
3338  switch (MI.getOpcode()) {
3339  case AMDGPU::S_ADD_U64_PSEUDO:
3340  case AMDGPU::S_SUB_U64_PSEUDO: {
3342  const DebugLoc &DL = MI.getDebugLoc();
3343 
3344  MachineOperand &Dest = MI.getOperand(0);
3345  MachineOperand &Src0 = MI.getOperand(1);
3346  MachineOperand &Src1 = MI.getOperand(2);
3347 
3348  unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3349  unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3350 
3351  MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3352  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3353  &AMDGPU::SReg_32_XM0RegClass);
3354  MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3355  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3356  &AMDGPU::SReg_32_XM0RegClass);
3357 
3358  MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3359  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3360  &AMDGPU::SReg_32_XM0RegClass);
3361  MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3362  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3363  &AMDGPU::SReg_32_XM0RegClass);
3364 
3365  bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3366 
3367  unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3368  unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3369  BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3370  .add(Src0Sub0)
3371  .add(Src1Sub0);
3372  BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3373  .add(Src0Sub1)
3374  .add(Src1Sub1);
3375  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3376  .addReg(DestSub0)
3377  .addImm(AMDGPU::sub0)
3378  .addReg(DestSub1)
3379  .addImm(AMDGPU::sub1);
3380  MI.eraseFromParent();
3381  return BB;
3382  }
3383  case AMDGPU::SI_INIT_M0: {
3384  BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3385  TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3386  .add(MI.getOperand(0));
3387  MI.eraseFromParent();
3388  return BB;
3389  }
3390  case AMDGPU::SI_INIT_EXEC:
3391  // This should be before all vector instructions.
3392  BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3393  AMDGPU::EXEC)
3394  .addImm(MI.getOperand(0).getImm());
3395  MI.eraseFromParent();
3396  return BB;
3397 
3398  case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3399  // Extract the thread count from an SGPR input and set EXEC accordingly.
3400  // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3401  //
3402  // S_BFE_U32 count, input, {shift, 7}
3403  // S_BFM_B64 exec, count, 0
3404  // S_CMP_EQ_U32 count, 64
3405  // S_CMOV_B64 exec, -1
3406  MachineInstr *FirstMI = &*BB->begin();
3408  unsigned InputReg = MI.getOperand(0).getReg();
3409  unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3410  bool Found = false;
3411 
3412  // Move the COPY of the input reg to the beginning, so that we can use it.
3413  for (auto I = BB->begin(); I != &MI; I++) {
3414  if (I->getOpcode() != TargetOpcode::COPY ||
3415  I->getOperand(0).getReg() != InputReg)
3416  continue;
3417 
3418  if (I == FirstMI) {
3419  FirstMI = &*++BB->begin();
3420  } else {
3421  I->removeFromParent();
3422  BB->insert(FirstMI, &*I);
3423  }
3424  Found = true;
3425  break;
3426  }
3427  assert(Found);
3428  (void)Found;
3429 
3430  // This should be before all vector instructions.
3431  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3432  .addReg(InputReg)
3433  .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3434  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3435  AMDGPU::EXEC)
3436  .addReg(CountReg)
3437  .addImm(0);
3438  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3439  .addReg(CountReg, RegState::Kill)
3440  .addImm(64);
3441  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3442  AMDGPU::EXEC)
3443  .addImm(-1);
3444  MI.eraseFromParent();
3445  return BB;
3446  }
3447 
3448  case AMDGPU::GET_GROUPSTATICSIZE: {
3449  DebugLoc DL = MI.getDebugLoc();
3450  BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3451  .add(MI.getOperand(0))
3452  .addImm(MFI->getLDSSize());
3453  MI.eraseFromParent();
3454  return BB;
3455  }
3456  case AMDGPU::SI_INDIRECT_SRC_V1:
3457  case AMDGPU::SI_INDIRECT_SRC_V2:
3458  case AMDGPU::SI_INDIRECT_SRC_V4:
3459  case AMDGPU::SI_INDIRECT_SRC_V8:
3460  case AMDGPU::SI_INDIRECT_SRC_V16:
3461  return emitIndirectSrc(MI, *BB, *getSubtarget());
3462  case AMDGPU::SI_INDIRECT_DST_V1:
3463  case AMDGPU::SI_INDIRECT_DST_V2:
3464  case AMDGPU::SI_INDIRECT_DST_V4:
3465  case AMDGPU::SI_INDIRECT_DST_V8:
3466  case AMDGPU::SI_INDIRECT_DST_V16:
3467  return emitIndirectDst(MI, *BB, *getSubtarget());
3468  case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3469  case AMDGPU::SI_KILL_I1_PSEUDO:
3470  return splitKillBlock(MI, BB);
3471  case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3473 
3474  unsigned Dst = MI.getOperand(0).getReg();
3475  unsigned Src0 = MI.getOperand(1).getReg();
3476  unsigned Src1 = MI.getOperand(2).getReg();
3477  const DebugLoc &DL = MI.getDebugLoc();
3478  unsigned SrcCond = MI.getOperand(3).getReg();
3479 
3480  unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3481  unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3482  unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3483 
3484  BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3485  .addReg(SrcCond);
3486  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3487  .addImm(0)
3488  .addReg(Src0, 0, AMDGPU::sub0)
3489  .addImm(0)
3490  .addReg(Src1, 0, AMDGPU::sub0)
3491  .addReg(SrcCondCopy);
3492  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3493  .addImm(0)
3494  .addReg(Src0, 0, AMDGPU::sub1)
3495  .addImm(0)
3496  .addReg(Src1, 0, AMDGPU::sub1)
3497  .addReg(SrcCondCopy);
3498 
3499  BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3500  .addReg(DstLo)
3501  .addImm(AMDGPU::sub0)
3502  .addReg(DstHi)
3503  .addImm(AMDGPU::sub1);
3504  MI.eraseFromParent();
3505  return BB;
3506  }
3507  case AMDGPU::SI_BR_UNDEF: {
3508  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3509  const DebugLoc &DL = MI.getDebugLoc();
3510  MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3511  .add(MI.getOperand(0));
3512  Br->getOperand(1).setIsUndef(true); // read undef SCC
3513  MI.eraseFromParent();
3514  return BB;
3515  }
3516  case AMDGPU::ADJCALLSTACKUP:
3517  case AMDGPU::ADJCALLSTACKDOWN: {
3519  MachineInstrBuilder MIB(*MF, &MI);
3520 
3521  // Add an implicit use of the frame offset reg to prevent the restore copy
3522  // inserted after the call from being reorderd after stack operations in the
3523  // the caller's frame.
3524  MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3525  .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3526  .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3527  return BB;
3528  }
3529  case AMDGPU::SI_CALL_ISEL: {
3530  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3531  const DebugLoc &DL = MI.getDebugLoc();
3532 
3533  unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3534 
3535  MachineInstrBuilder MIB;
3536  MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3537 
3538  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3539  MIB.add(MI.getOperand(I));
3540 
3541  MIB.cloneMemRefs(MI);
3542  MI.eraseFromParent();
3543  return BB;
3544  }
3545  case AMDGPU::V_ADD_I32_e32:
3546  case AMDGPU::V_SUB_I32_e32:
3547  case AMDGPU::V_SUBREV_I32_e32: {
3548  // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3549  const DebugLoc &DL = MI.getDebugLoc();
3550  unsigned Opc = MI.getOpcode();
3551 
3552  bool NeedClampOperand = false;
3553  if (TII->pseudoToMCOpcode(Opc) == -1) {
3554  Opc = AMDGPU::getVOPe64(Opc);
3555  NeedClampOperand = true;
3556  }
3557 
3558  auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3559  if (TII->isVOP3(*I)) {
3560  I.addReg(AMDGPU::VCC, RegState::Define);
3561  }
3562  I.add(MI.getOperand(1))
3563  .add(MI.getOperand(2));
3564  if (NeedClampOperand)
3565  I.addImm(0); // clamp bit for e64 encoding
3566 
3567  TII->legalizeOperands(*I);
3568 
3569  MI.eraseFromParent();
3570  return BB;
3571  }
3572  default:
3574  }
3575 }
3576 
3578  return isTypeLegal(VT.getScalarType());
3579 }
3580 
3582  // This currently forces unfolding various combinations of fsub into fma with
3583  // free fneg'd operands. As long as we have fast FMA (controlled by
3584  // isFMAFasterThanFMulAndFAdd), we should perform these.
3585 
3586  // When fma is quarter rate, for f64 where add / sub are at best half rate,
3587  // most of these combines appear to be cycle neutral but save on instruction
3588  // count / code size.
3589  return true;
3590 }
3591 
3593  EVT VT) const {
3594  if (!VT.isVector()) {
3595  return MVT::i1;
3596  }
3597  return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3598 }
3599 
3601  // TODO: Should i16 be used always if legal? For now it would force VALU
3602  // shifts.
3603  return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3604 }
3605 
3606 // Answering this is somewhat tricky and depends on the specific device which
3607 // have different rates for fma or all f64 operations.
3608 //
3609 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3610 // regardless of which device (although the number of cycles differs between
3611 // devices), so it is always profitable for f64.
3612 //
3613 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3614 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3615 // which we can always do even without fused FP ops since it returns the same
3616 // result as the separate operations and since it is always full
3617 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3618 // however does not support denormals, so we do report fma as faster if we have
3619 // a fast fma device and require denormals.
3620 //
3622  VT = VT.getScalarType();
3623 
3624  switch (VT.getSimpleVT().SimpleTy) {
3625  case MVT::f32: {
3626  // This is as fast on some subtargets. However, we always have full rate f32
3627  // mad available which returns the same result as the separate operations
3628  // which we should prefer over fma. We can't use this if we want to support
3629  // denormals, so only report this in these cases.
3630  if (Subtarget->hasFP32Denormals())
3631  return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3632 
3633  // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3634  return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3635  }
3636  case MVT::f64:
3637  return true;
3638  case MVT::f16:
3639  return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3640  default:
3641  break;
3642  }
3643 
3644  return false;
3645 }
3646 
3647 //===----------------------------------------------------------------------===//
3648 // Custom DAG Lowering Operations
3649 //===----------------------------------------------------------------------===//
3650 
3651 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3652 // wider vector type is legal.
3654  SelectionDAG &DAG) const {
3655  unsigned Opc = Op.getOpcode();
3656  EVT VT = Op.getValueType();
3657  assert(VT == MVT::v4f16);
3658 
3659  SDValue Lo, Hi;
3660  std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3661 
3662  SDLoc SL(Op);
3663  SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3664  Op->getFlags());
3665  SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3666  Op->getFlags());
3667 
3668  return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3669 }
3670 
3671 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3672 // wider vector type is legal.
3674  SelectionDAG &DAG) const {
3675  unsigned Opc = Op.getOpcode();
3676  EVT VT = Op.getValueType();
3677  assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3678 
3679  SDValue Lo0, Hi0;
3680  std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3681  SDValue Lo1, Hi1;
3682  std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3683 
3684  SDLoc SL(Op);
3685 
3686  SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3687  Op->getFlags());
3688  SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3689  Op->getFlags());
3690 
3691  return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3692 }
3693 
3695  switch (Op.getOpcode()) {
3696  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3697  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3698  case ISD::LOAD: {
3699  SDValue Result = LowerLOAD(Op, DAG);
3700  assert((!Result.getNode() ||
3701  Result.getNode()->getNumValues() == 2) &&
3702  "Load should return a value and a chain");
3703  return Result;
3704  }
3705 
3706  case ISD::FSIN:
3707  case ISD::FCOS:
3708  return LowerTrig(Op, DAG);
3709  case ISD::SELECT: return LowerSELECT(Op, DAG);
3710  case ISD::FDIV: return LowerFDIV(Op, DAG);
3711  case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3712  case ISD::STORE: return LowerSTORE(Op, DAG);
3713  case ISD::GlobalAddress: {
3714  MachineFunction &MF = DAG.getMachineFunction();
3716  return LowerGlobalAddress(MFI, Op, DAG);
3717  }
3718  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3719  case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3720  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3721  case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3723  return lowerINSERT_VECTOR_ELT(Op, DAG);
3725  return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3726  case ISD::BUILD_VECTOR:
3727  return lowerBUILD_VECTOR(Op, DAG);
3728  case ISD::FP_ROUND:
3729  return lowerFP_ROUND(Op, DAG);
3730  case ISD::TRAP:
3731  return lowerTRAP(Op, DAG);
3732  case ISD::DEBUGTRAP:
3733  return lowerDEBUGTRAP(Op, DAG);
3734  case ISD::FABS:
3735  case ISD::FNEG:
3736  case ISD::FCANONICALIZE:
3737  return splitUnaryVectorOp(Op, DAG);
3738  case ISD::FMINNUM:
3739  case ISD::FMAXNUM:
3740  return lowerFMINNUM_FMAXNUM(Op, DAG);
3741  case ISD::SHL:
3742  case ISD::SRA:
3743  case ISD::SRL:
3744  case ISD::ADD:
3745  case ISD::SUB:
3746  case ISD::MUL:
3747  case ISD::SMIN:
3748  case ISD::SMAX:
3749  case ISD::UMIN:
3750  case ISD::UMAX:
3751  case ISD::FADD:
3752  case ISD::FMUL:
3753  case ISD::FMINNUM_IEEE:
3754  case ISD::FMAXNUM_IEEE:
3755  return splitBinaryVectorOp(Op, DAG);
3756  }
3757  return SDValue();
3758 }
3759 
3761  const SDLoc &DL,
3762  SelectionDAG &DAG, bool Unpacked) {
3763  if (!LoadVT.isVector())
3764  return Result;
3765 
3766  if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3767  // Truncate to v2i16/v4i16.
3768  EVT IntLoadVT = LoadVT.changeTypeToInteger();
3769 
3770  // Workaround legalizer not scalarizing truncate after vector op
3771  // legalization byt not creating intermediate vector trunc.
3773  DAG.ExtractVectorElements(Result, Elts);
3774  for (SDValue &Elt : Elts)
3775  Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3776 
3777  Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3778 
3779  // Bitcast to original type (v2f16/v4f16).
3780  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3781  }
3782 
3783  // Cast back to the original packed type.
3784  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3785 }
3786 
3787 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3788  MemSDNode *M,
3789  SelectionDAG &DAG,
3790  ArrayRef<SDValue> Ops,
3791  bool IsIntrinsic) const {
3792  SDLoc DL(M);
3793 
3794  bool Unpacked = Subtarget->hasUnpackedD16VMem();
3795  EVT LoadVT = M->getValueType(0);
3796 
3797  EVT EquivLoadVT = LoadVT;
3798  if (Unpacked && LoadVT.isVector()) {
3799  EquivLoadVT = LoadVT.isVector() ?
3801  LoadVT.getVectorNumElements()) : LoadVT;
3802  }
3803 
3804  // Change from v4f16/v2f16 to EquivLoadVT.
3805  SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3806 
3807  SDValue Load
3808  = DAG.getMemIntrinsicNode(
3809  IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3810  VTList, Ops, M->getMemoryVT(),
3811  M->getMemOperand());
3812  if (!Unpacked) // Just adjusted the opcode.
3813  return Load;
3814 
3815  SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
3816 
3817  return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
3818 }
3819 
3821  SDNode *N, SelectionDAG &DAG) {
3822  EVT VT = N->getValueType(0);
3823  const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
3824  int CondCode = CD->getSExtValue();
3825  if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3826  CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3827  return DAG.getUNDEF(VT);
3828 
3829  ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3830 
3831 
3832  SDValue LHS = N->getOperand(1);
3833  SDValue RHS = N->getOperand(2);
3834 
3835  SDLoc DL(N);
3836 
3837  EVT CmpVT = LHS.getValueType();
3838  if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3839  unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3841  LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3842  RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3843  }
3844 
3845  ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3846 
3847  return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
3848  DAG.getCondCode(CCOpcode));
3849 }
3850 
3852  SDNode *N, SelectionDAG &DAG) {
3853  EVT VT = N->getValueType(0);
3854  const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
3855 
3856  int CondCode = CD->getSExtValue();
3857  if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3858  CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3859  return DAG.getUNDEF(VT);
3860  }
3861 
3862  SDValue Src0 = N->getOperand(1);
3863  SDValue Src1 = N->getOperand(2);
3864  EVT CmpVT = Src0.getValueType();
3865  SDLoc SL(N);
3866 
3867  if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3868  Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3869  Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3870  }
3871 
3872  FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3873  ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3874  return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
3875  Src1, DAG.getCondCode(CCOpcode));
3876 }
3877 
3880  SelectionDAG &DAG) const {
3881  switch (N->getOpcode()) {
3882  case ISD::INSERT_VECTOR_ELT: {
3883  if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3884  Results.push_back(Res);
3885  return;
3886  }
3887  case ISD::EXTRACT_VECTOR_ELT: {
3888  if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3889  Results.push_back(Res);
3890  return;
3891  }
3892  case ISD::INTRINSIC_WO_CHAIN: {
3893  unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3894  switch (IID) {
3895  case Intrinsic::amdgcn_cvt_pkrtz: {
3896  SDValue Src0 = N->getOperand(1);
3897  SDValue Src1 = N->getOperand(2);
3898  SDLoc SL(N);
3900  Src0, Src1);
3901  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3902  return;
3903  }
3904  case Intrinsic::amdgcn_cvt_pknorm_i16:
3905  case Intrinsic::amdgcn_cvt_pknorm_u16:
3906  case Intrinsic::amdgcn_cvt_pk_i16:
3907  case Intrinsic::amdgcn_cvt_pk_u16: {
3908  SDValue Src0 = N->getOperand(1);
3909  SDValue Src1 = N->getOperand(2);
3910  SDLoc SL(N);
3911  unsigned Opcode;
3912 
3913  if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3915  else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3917  else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3918  Opcode = AMDGPUISD::CVT_PK_I16_I32;
3919  else
3920  Opcode = AMDGPUISD::CVT_PK_U16_U32;
3921 
3922  EVT VT = N->getValueType(0);
3923  if (isTypeLegal(VT))
3924  Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3925  else {
3926  SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3927  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3928  }
3929  return;
3930  }
3931  }
3932  break;
3933  }
3934  case ISD::INTRINSIC_W_CHAIN: {
3935  if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
3936  Results.push_back(Res);
3937  Results.push_back(Res.getValue(1));
3938  return;
3939  }
3940 
3941  break;
3942  }
3943  case ISD::SELECT: {
3944  SDLoc SL(N);
3945  EVT VT = N->getValueType(0);
3946  EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3947  SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3948  SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3949 
3950  EVT SelectVT = NewVT;
3951  if (NewVT.bitsLT(MVT::i32)) {
3952  LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3953  RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3954  SelectVT = MVT::i32;
3955  }
3956 
3957  SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3958  N->getOperand(0), LHS, RHS);
3959 
3960  if (NewVT != SelectVT)
3961  NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3962  Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3963  return;
3964  }
3965  case ISD::FNEG: {
3966  if (N->getValueType(0) != MVT::v2f16)
3967  break;
3968 
3969  SDLoc SL(N);
3970  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3971 
3972  SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3973  BC,
3974  DAG.getConstant(0x80008000, SL, MVT::i32));
3975  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3976  return;
3977  }
3978  case ISD::FABS: {
3979  if (N->getValueType(0) != MVT::v2f16)
3980  break;
3981 
3982  SDLoc SL(N);
3983  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3984 
3985  SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3986  BC,
3987  DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3988  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3989  return;
3990  }
3991  default:
3992  break;
3993  }
3994 }
3995 
3996 /// Helper function for LowerBRCOND
3997 static SDNode *findUser(SDValue Value, unsigned Opcode) {
3998 
3999  SDNode *Parent = Value.getNode();
4000  for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4001  I != E; ++I) {
4002 
4003  if (I.getUse().get() != Value)
4004  continue;
4005 
4006  if (I->getOpcode() == Opcode)
4007  return *I;
4008  }
4009  return nullptr;
4010 }
4011 
4012 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4013  if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4014  switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4015  case Intrinsic::amdgcn_if:
4016  return AMDGPUISD::IF;
4017  case Intrinsic::amdgcn_else:
4018  return AMDGPUISD::ELSE;
4019  case Intrinsic::amdgcn_loop:
4020  return AMDGPUISD::LOOP;
4021  case Intrinsic::amdgcn_end_cf:
4022  llvm_unreachable("should not occur");
4023  default:
4024  return 0;
4025  }
4026  }
4027 
4028  // break, if_break, else_break are all only used as inputs to loop, not
4029  // directly as branch conditions.
4030  return 0;
4031 }
4032 
4033 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4034  const Triple &TT = getTargetMachine().getTargetTriple();
4035  return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4038 }
4039 
4040 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4041  // FIXME: Either avoid relying on address space here or change the default
4042  // address space for functions to avoid the explicit check.
4043  return (GV->getValueType()->isFunctionTy() ||
4047  !shouldEmitFixup(GV) &&
4049 }
4050 
4051 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4052  return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4053 }
4054 
4055 /// This transforms the control flow intrinsics to get the branch destination as
4056 /// last parameter, also switches branch target with BR if the need arise
4057 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4058  SelectionDAG &DAG) const {
4059  SDLoc DL(BRCOND);
4060 
4061  SDNode *Intr = BRCOND.getOperand(1).getNode();
4062  SDValue Target = BRCOND.getOperand(2);
4063  SDNode *BR = nullptr;
4064  SDNode *SetCC = nullptr;
4065 
4066  if (Intr->getOpcode() == ISD::SETCC) {
4067  // As long as we negate the condition everything is fine
4068  SetCC = Intr;
4069  Intr = SetCC->getOperand(0).getNode();
4070 
4071  } else {
4072  // Get the target from BR if we don't negate the condition
4073  BR = findUser(BRCOND, ISD::BR);
4074  Target = BR->getOperand(1);
4075  }
4076 
4077  // FIXME: This changes the types of the intrinsics instead of introducing new
4078  // nodes with the correct types.
4079  // e.g. llvm.amdgcn.loop
4080 
4081  // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4082  // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4083 
4084  unsigned CFNode = isCFIntrinsic(Intr);
4085  if (CFNode == 0) {
4086  // This is a uniform branch so we don't need to legalize.
4087  return BRCOND;
4088  }
4089 
4090  bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4091  Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4092 
4093  assert(!SetCC ||
4094  (SetCC->getConstantOperandVal(1) == 1 &&
4095  cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4096  ISD::SETNE));
4097 
4098  // operands of the new intrinsic call
4100  if (HaveChain)
4101  Ops.push_back(BRCOND.getOperand(0));
4102 
4103  Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
4104  Ops.push_back(Target);
4105 
4106  ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4107 
4108  // build the new intrinsic call
4109  SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4110 
4111  if (!HaveChain) {
4112  SDValue Ops[] = {
4113  SDValue(Result, 0),
4114  BRCOND.getOperand(0)
4115  };
4116 
4117  Result = DAG.getMergeValues(Ops, DL).getNode();
4118  }
4119 
4120  if (BR) {
4121  // Give the branch instruction our target
4122  SDValue Ops[] = {
4123  BR->getOperand(0),
4124  BRCOND.getOperand(2)
4125  };
4126  SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4127  DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4128  BR = NewBR.getNode();
4129  }
4130 
4131  SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4132 
4133  // Copy the intrinsic results to registers
4134  for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4136  if (!CopyToReg)
4137  continue;
4138 
4139  Chain = DAG.getCopyToReg(
4140  Chain, DL,
4141  CopyToReg->getOperand(1),
4142  SDValue(Result, i - 1),
4143  SDValue());
4144 
4145  DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4146  }
4147 
4148  // Remove the old intrinsic from the chain
4150  SDValue(Intr, Intr->getNumValues() - 1),
4151  Intr->getOperand(0));
4152 
4153  return Chain;
4154 }
4155 
4156 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4157  SDValue Op,
4158  const SDLoc &DL,
4159  EVT VT) const {
4160  return Op.getValueType().bitsLE(VT) ?
4161  DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4162  DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4163 }
4164 
4165 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4166  assert(Op.getValueType() == MVT::f16 &&
4167  "Do not know how to custom lower FP_ROUND for non-f16 type");
4168 
4169  SDValue Src = Op.getOperand(0);
4170  EVT SrcVT = Src.getValueType();
4171  if (SrcVT != MVT::f64)
4172  return Op;
4173 
4174  SDLoc DL(Op);
4175 
4176  SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4177  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4178  return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4179 }
4180 
4181 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4182  SelectionDAG &DAG) const {
4183  EVT VT = Op.getValueType();
4184  const MachineFunction &MF = DAG.getMachineFunction();
4186  bool IsIEEEMode = Info->getMode().IEEE;
4187 
4188  // FIXME: Assert during eslection that this is only selected for
4189  // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4190  // mode functions, but this happens to be OK since it's only done in cases
4191  // where there is known no sNaN.
4192  if (IsIEEEMode)
4193  return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4194 
4195  if (VT == MVT::v4f16)
4196  return splitBinaryVectorOp(Op, DAG);
4197  return Op;
4198 }
4199 
4200 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4201  SDLoc SL(Op);
4202  SDValue Chain = Op.getOperand(0);
4203 
4204  if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4205  !Subtarget->isTrapHandlerEnabled())
4206  return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4207 
4208  MachineFunction &MF = DAG.getMachineFunction();
4210  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4211  assert(UserSGPR != AMDGPU::NoRegister);
4212  SDValue QueuePtr = CreateLiveInRegister(
4213  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4214  SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4215  SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4216  QueuePtr, SDValue());
4217  SDValue Ops[] = {
4218  ToReg,
4220  SGPR01,
4221  ToReg.getValue(1)
4222  };
4223  return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4224 }
4225 
4226 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4227  SDLoc SL(Op);
4228  SDValue Chain = Op.getOperand(0);
4229  MachineFunction &MF = DAG.getMachineFunction();
4230 
4231  if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4232  !Subtarget->isTrapHandlerEnabled()) {
4234  "debugtrap handler not supported",
4235  Op.getDebugLoc(),
4236  DS_Warning);
4237  LLVMContext &Ctx = MF.getFunction().getContext();
4238  Ctx.diagnose(NoTrap);
4239  return Chain;
4240  }
4241 
4242  SDValue Ops[] = {
4243  Chain,
4245  };
4246  return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4247 }
4248 
4249 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4250  SelectionDAG &DAG) const {
4251  // FIXME: Use inline constants (src_{shared, private}_base) instead.
4252  if (Subtarget->hasApertureRegs()) {
4253  unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4256  unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4259  unsigned Encoding =
4261  Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4262  WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4263 
4264  SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4265  SDValue ApertureReg = SDValue(
4266  DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4267  SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4268  return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4269  }
4270 
4271  MachineFunction &MF = DAG.getMachineFunction();
4273  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4274  assert(UserSGPR != AMDGPU::NoRegister);
4275 
4276  SDValue QueuePtr = CreateLiveInRegister(
4277  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4278 
4279  // Offset into amd_queue_t for group_segment_aperture_base_hi /
4280  // private_segment_aperture_base_hi.
4281  uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4282 
4283  SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4284 
4285  // TODO: Use custom target PseudoSourceValue.
4286  // TODO: We should use the value from the IR intrinsic call, but it might not
4287  // be available and how do we get it?
4290 
4291  MachinePointerInfo PtrInfo(V, StructOffset);
4292  return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4293  MinAlign(64, StructOffset),
4296 }
4297 
4298 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4299  SelectionDAG &DAG) const {
4300  SDLoc SL(Op);
4301  const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4302 
4303  SDValue Src = ASC->getOperand(0);
4304  SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4305 
4306  const AMDGPUTargetMachine &TM =
4307  static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4308 
4309  // flat -> local/private
4311  unsigned DestAS = ASC->getDestAddressSpace();
4312 
4313  if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4314  DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4315  unsigned NullVal = TM.getNullPointerValue(DestAS);
4316  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4317  SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4318  SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4319 
4320  return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4321  NonNull, Ptr, SegmentNullPtr);
4322  }
4323  }
4324 
4325  // local/private -> flat
4327  unsigned SrcAS = ASC->getSrcAddressSpace();
4328 
4329  if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4330  SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4331  unsigned NullVal = TM.getNullPointerValue(SrcAS);
4332  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4333 
4334  SDValue NonNull
4335  = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4336 
4337  SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4338  SDValue CvtPtr
4339  = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4340 
4341  return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4342  DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4343  FlatNullPtr);
4344  }
4345  }
4346 
4347  // global <-> flat are no-ops and never emitted.
4348 
4349  const MachineFunction &MF = DAG.getMachineFunction();
4350  DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4351  MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4352  DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4353 
4354  return DAG.getUNDEF(ASC->getValueType(0));
4355 }
4356 
4357 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4358  SelectionDAG &DAG) const {
4359  SDValue Vec = Op.getOperand(0);
4360  SDValue InsVal = Op.getOperand(1);
4361  SDValue Idx = Op.getOperand(2);
4362  EVT VecVT = Vec.getValueType();
4363  EVT EltVT = VecVT.getVectorElementType();
4364  unsigned VecSize = VecVT.getSizeInBits();
4365  unsigned EltSize = EltVT.getSizeInBits();
4366 
4367 
4368  assert(VecSize <= 64);
4369 
4370  unsigned NumElts = VecVT.getVectorNumElements();
4371  SDLoc SL(Op);
4372  auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4373 
4374  if (NumElts == 4 && EltSize == 16 && KIdx) {
4375  SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4376 
4377  SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4378  DAG.getConstant(0, SL, MVT::i32));
4379  SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4380  DAG.getConstant(1, SL, MVT::i32));
4381 
4382  SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4383  SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4384 
4385  unsigned Idx = KIdx->getZExtValue();
4386  bool InsertLo = Idx < 2;
4387  SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4388  InsertLo ? LoVec : HiVec,
4389  DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4390  DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4391 
4392  InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4393 
4394  SDValue Concat = InsertLo ?
4395  DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4396  DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4397 
4398  return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4399  }
4400 
4401  if (isa<ConstantSDNode>(Idx))
4402  return SDValue();
4403 
4404  MVT IntVT = MVT::getIntegerVT(VecSize);
4405 
4406  // Avoid stack access for dynamic indexing.
4407  // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4408 
4409  // Create a congruent vector with the target value in each element so that
4410  // the required element can be masked and ORed into the target vector.
4411  SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4412  DAG.getSplatBuildVector(VecVT, SL, InsVal));
4413 
4414  assert(isPowerOf2_32(EltSize));
4415  SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4416 
4417  // Convert vector index to bit-index.
4418  SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4419 
4420  SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4421  SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4422  DAG.getConstant(0xffff, SL, IntVT),