LLVM  9.0.0svn
SystemZISelLowering.cpp
Go to the documentation of this file.
1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SystemZTargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SystemZISelLowering.h"
14 #include "SystemZCallingConv.h"
17 #include "SystemZTargetMachine.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/Support/KnownBits.h"
26 #include <cctype>
27 
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "systemz-lower"
31 
32 namespace {
33 // Represents information about a comparison.
34 struct Comparison {
35  Comparison(SDValue Op0In, SDValue Op1In)
36  : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
37 
38  // The operands to the comparison.
39  SDValue Op0, Op1;
40 
41  // The opcode that should be used to compare Op0 and Op1.
42  unsigned Opcode;
43 
44  // A SystemZICMP value. Only used for integer comparisons.
45  unsigned ICmpType;
46 
47  // The mask of CC values that Opcode can produce.
48  unsigned CCValid;
49 
50  // The mask of CC values for which the original condition is true.
51  unsigned CCMask;
52 };
53 } // end anonymous namespace
54 
55 // Classify VT as either 32 or 64 bit.
56 static bool is32Bit(EVT VT) {
57  switch (VT.getSimpleVT().SimpleTy) {
58  case MVT::i32:
59  return true;
60  case MVT::i64:
61  return false;
62  default:
63  llvm_unreachable("Unsupported type");
64  }
65 }
66 
67 // Return a version of MachineOperand that can be safely used before the
68 // final use.
70  if (Op.isReg())
71  Op.setIsKill(false);
72  return Op;
73 }
74 
76  const SystemZSubtarget &STI)
77  : TargetLowering(TM), Subtarget(STI) {
78  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
79 
80  // Set up the register classes.
81  if (Subtarget.hasHighWord())
82  addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
83  else
84  addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
85  addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
86  if (Subtarget.hasVector()) {
87  addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
88  addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
89  } else {
90  addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
91  addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
92  }
93  if (Subtarget.hasVectorEnhancements1())
94  addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
95  else
96  addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
97 
98  if (Subtarget.hasVector()) {
99  addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
100  addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
101  addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
102  addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
103  addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
104  addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
105  }
106 
107  // Compute derived properties from the register classes
109 
110  // Set up special registers.
112 
113  // TODO: It may be better to default to latency-oriented scheduling, however
114  // LLVM's current latency-oriented scheduler can't handle physreg definitions
115  // such as SystemZ has with CC, so set this to the register-pressure
116  // scheduler, because it can.
118 
121 
122  // Instructions are strings of 2-byte aligned 2-byte values.
124  // For performance reasons we prefer 16-byte alignment.
126 
127  // Handle operations that are handled in a similar way for all types.
128  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
130  ++I) {
131  MVT VT = MVT::SimpleValueType(I);
132  if (isTypeLegal(VT)) {
133  // Lower SET_CC into an IPM-based sequence.
135 
136  // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
138 
139  // Lower SELECT_CC and BR_CC into separate comparisons and branches.
142  }
143  }
144 
145  // Expand jump table branches as address arithmetic followed by an
146  // indirect jump.
148 
149  // Expand BRCOND into a BR_CC (see above).
151 
152  // Handle integer types.
153  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
155  ++I) {
156  MVT VT = MVT::SimpleValueType(I);
157  if (isTypeLegal(VT)) {
158  // Expand individual DIV and REMs into DIVREMs.
165 
166  // Support addition/subtraction with overflow.
169 
170  // Support addition/subtraction with carry.
173 
174  // Support carry in as value rather than glue.
177 
178  // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
179  // stores, putting a serialization instruction after the stores.
182 
183  // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
184  // available, or if the operand is constant.
186 
187  // Use POPCNT on z196 and above.
188  if (Subtarget.hasPopulationCount())
190  else
192 
193  // No special instructions for these.
196 
197  // Use *MUL_LOHI where possible instead of MULH*.
202 
203  // Only z196 and above have native support for conversions to unsigned.
204  // On z10, promoting to i64 doesn't generate an inexact condition for
205  // values that are outside the i32 range but in the i64 range, so use
206  // the default expansion.
207  if (!Subtarget.hasFPExtension())
209  }
210  }
211 
212  // Type legalization will convert 8- and 16-bit atomic operations into
213  // forms that operate on i32s (but still keeping the original memory VT).
214  // Lower them into full i32 operations.
226 
227  // Even though i128 is not a legal type, we still need to custom lower
228  // the atomic operations in order to exploit SystemZ instructions.
231 
232  // We can use the CC result of compare-and-swap to implement
233  // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
237 
239 
240  // Traps are legal, as we will convert them to "j .+2".
242 
243  // z10 has instructions for signed but not unsigned FP conversion.
244  // Handle unsigned 32-bit types as signed 64-bit types.
245  if (!Subtarget.hasFPExtension()) {
248  }
249 
250  // We have native support for a 64-bit CTLZ, via FLOGR.
254 
255  // On arch13 we have native support for a 64-bit CTPOP.
256  if (Subtarget.hasMiscellaneousExtensions3()) {
259  }
260 
261  // Give LowerOperation the chance to replace 64-bit ORs with subregs.
263 
264  // FIXME: Can we support these natively?
268 
269  // We have native instructions for i8, i16 and i32 extensions, but not i1.
271  for (MVT VT : MVT::integer_valuetypes()) {
275  }
276 
277  // Handle the various types of symbolic address.
283 
284  // We need to handle dynamic allocations specially because of the
285  // 160-byte area at the bottom of the stack.
288 
289  // Use custom expanders so that we can force the function to use
290  // a frame pointer.
293 
294  // Handle prefetches with PFD or PFDRL.
296 
297  for (MVT VT : MVT::vector_valuetypes()) {
298  // Assume by default that all vector operations need to be expanded.
299  for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
300  if (getOperationAction(Opcode, VT) == Legal)
301  setOperationAction(Opcode, VT, Expand);
302 
303  // Likewise all truncating stores and extending loads.
304  for (MVT InnerVT : MVT::vector_valuetypes()) {
305  setTruncStoreAction(VT, InnerVT, Expand);
306  setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
307  setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
308  setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
309  }
310 
311  if (isTypeLegal(VT)) {
312  // These operations are legal for anything that can be stored in a
313  // vector register, even if there is no native support for the format
314  // as such. In particular, we can do these for v4f32 even though there
315  // are no specific instructions for that format.
321 
322  // Likewise, except that we need to replace the nodes with something
323  // more specific.
326  }
327  }
328 
329  // Handle integer vector types.
330  for (MVT VT : MVT::integer_vector_valuetypes()) {
331  if (isTypeLegal(VT)) {
332  // These operations have direct equivalents.
337  if (VT != MVT::v2i64)
342  if (Subtarget.hasVectorEnhancements1())
344  else
348 
349  // Convert a GPR scalar to a vector by inserting it into element 0.
351 
352  // Use a series of unpacks for extensions.
355 
356  // Detect shifts by a scalar amount and convert them into
357  // V*_BY_SCALAR.
361 
362  // At present ROTL isn't matched by DAGCombiner. ROTR should be
363  // converted into ROTL.
366 
367  // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
368  // and inverting the result as necessary.
370  }
371  }
372 
373  if (Subtarget.hasVector()) {
374  // There should be no need to check for float types other than v2f64
375  // since <2 x f32> isn't a legal type.
384  }
385 
386  if (Subtarget.hasVectorEnhancements2()) {
395  }
396 
397  // Handle floating-point types.
398  for (unsigned I = MVT::FIRST_FP_VALUETYPE;
400  ++I) {
401  MVT VT = MVT::SimpleValueType(I);
402  if (isTypeLegal(VT)) {
403  // We can use FI for FRINT.
405 
406  // We can use the extended form of FI for other rounding operations.
407  if (Subtarget.hasFPExtension()) {
413  }
414 
415  // No special instructions for these.
421 
422  // Handle constrained floating-point operations.
432  if (Subtarget.hasFPExtension()) {
438  }
439  }
440  }
441 
442  // Handle floating-point vector types.
443  if (Subtarget.hasVector()) {
444  // Scalar-to-vector conversion is just a subreg.
447 
448  // Some insertions and extractions can be done directly but others
449  // need to go via integers.
454 
455  // These operations have direct equivalents.
470 
471  // Handle constrained floating-point operations.
484  }
485 
486  // The vector enhancements facility 1 has instructions for these.
487  if (Subtarget.hasVectorEnhancements1()) {
502 
507 
512 
517 
522 
527 
528  // Handle constrained floating-point operations.
541  for (auto VT : { MVT::f32, MVT::f64, MVT::f128,
542  MVT::v4f32, MVT::v2f64 }) {
545  }
546  }
547 
548  // We have fused multiply-addition for f32 and f64 but not f128.
551  if (Subtarget.hasVectorEnhancements1())
553  else
555 
556  // We don't have a copysign instruction on vector registers.
557  if (Subtarget.hasVectorEnhancements1())
559 
560  // Needed so that we don't try to implement f128 constant loads using
561  // a load-and-extend of a f80 constant (in cases where the constant
562  // would fit in an f80).
563  for (MVT VT : MVT::fp_valuetypes())
565 
566  // We don't have extending load instruction on vector registers.
567  if (Subtarget.hasVectorEnhancements1()) {
570  }
571 
572  // Floating-point truncation and stores need to be done separately.
576 
577  // We have 64-bit FPR<->GPR moves, but need special handling for
578  // 32-bit forms.
579  if (!Subtarget.hasVector()) {
582  }
583 
584  // VASTART and VACOPY need to deal with the SystemZ-specific varargs
585  // structure, but VAEND is a no-op.
589 
590  // Codes for which we want to perform some z-specific combinations.
605 
606  // Handle intrinsics.
609 
610  // We want to use MVC in preference to even a single load/store pair.
611  MaxStoresPerMemcpy = 0;
613 
614  // The main memset sequence is a byte store followed by an MVC.
615  // Two STC or MV..I stores win over that, but the kind of fused stores
616  // generated by target-independent code don't when the byte value is
617  // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
618  // than "STC;MVC". Handle the choice in target-specific code instead.
619  MaxStoresPerMemset = 0;
621 }
622 
624  LLVMContext &, EVT VT) const {
625  if (!VT.isVector())
626  return MVT::i32;
628 }
629 
631  VT = VT.getScalarType();
632 
633  if (!VT.isSimple())
634  return false;
635 
636  switch (VT.getSimpleVT().SimpleTy) {
637  case MVT::f32:
638  case MVT::f64:
639  return true;
640  case MVT::f128:
641  return Subtarget.hasVectorEnhancements1();
642  default:
643  break;
644  }
645 
646  return false;
647 }
648 
649 // Return true if the constant can be generated with a vector instruction,
650 // such as VGM, VGMB or VREPI.
652  const SystemZSubtarget &Subtarget) {
653  const SystemZInstrInfo *TII =
654  static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
655  if (!Subtarget.hasVector() ||
656  (isFP128 && !Subtarget.hasVectorEnhancements1()))
657  return false;
658 
659  // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
660  // preferred way of creating all-zero and all-one vectors so give it
661  // priority over other methods below.
662  unsigned Mask = 0;
663  unsigned I = 0;
664  for (; I < SystemZ::VectorBytes; ++I) {
665  uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
666  if (Byte == 0xff)
667  Mask |= 1ULL << I;
668  else if (Byte != 0)
669  break;
670  }
671  if (I == SystemZ::VectorBytes) {
672  Opcode = SystemZISD::BYTE_MASK;
673  OpVals.push_back(Mask);
674  VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16);
675  return true;
676  }
677 
678  if (SplatBitSize > 64)
679  return false;
680 
681  auto tryValue = [&](uint64_t Value) -> bool {
682  // Try VECTOR REPLICATE IMMEDIATE
683  int64_t SignedValue = SignExtend64(Value, SplatBitSize);
684  if (isInt<16>(SignedValue)) {
685  OpVals.push_back(((unsigned) SignedValue));
686  Opcode = SystemZISD::REPLICATE;
687  VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
688  SystemZ::VectorBits / SplatBitSize);
689  return true;
690  }
691  // Try VECTOR GENERATE MASK
692  unsigned Start, End;
693  if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) {
694  // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
695  // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
696  // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
697  OpVals.push_back(Start - (64 - SplatBitSize));
698  OpVals.push_back(End - (64 - SplatBitSize));
699  Opcode = SystemZISD::ROTATE_MASK;
700  VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
701  SystemZ::VectorBits / SplatBitSize);
702  return true;
703  }
704  return false;
705  };
706 
707  // First try assuming that any undefined bits above the highest set bit
708  // and below the lowest set bit are 1s. This increases the likelihood of
709  // being able to use a sign-extended element value in VECTOR REPLICATE
710  // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
711  uint64_t SplatBitsZ = SplatBits.getZExtValue();
712  uint64_t SplatUndefZ = SplatUndef.getZExtValue();
713  uint64_t Lower =
714  (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
715  uint64_t Upper =
716  (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
717  if (tryValue(SplatBitsZ | Upper | Lower))
718  return true;
719 
720  // Now try assuming that any undefined bits between the first and
721  // last defined set bits are set. This increases the chances of
722  // using a non-wraparound mask.
723  uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
724  return tryValue(SplatBitsZ | Middle);
725 }
726 
728  IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
729  isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
730 
731  // Find the smallest splat.
732  SplatBits = FPImm.bitcastToAPInt();
733  unsigned Width = SplatBits.getBitWidth();
734  while (Width > 8) {
735  unsigned HalfSize = Width / 2;
736  APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
737  APInt LowValue = SplatBits.trunc(HalfSize);
738 
739  // If the two halves do not match, stop here.
740  if (HighValue != LowValue || 8 > HalfSize)
741  break;
742 
743  SplatBits = HighValue;
744  Width = HalfSize;
745  }
746  SplatUndef = 0;
747  SplatBitSize = Width;
748 }
749 
751  assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR");
752  bool HasAnyUndefs;
753 
754  // Get IntBits by finding the 128 bit splat.
755  BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
756  true);
757 
758  // Get SplatBits by finding the 8 bit or greater splat.
759  BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
760  true);
761 }
762 
764  bool ForCodeSize) const {
765  // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
766  if (Imm.isZero() || Imm.isNegZero())
767  return true;
768 
769  return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
770 }
771 
773  // We can use CGFI or CLGFI.
774  return isInt<32>(Imm) || isUInt<32>(Imm);
775 }
776 
778  // We can use ALGFI or SLGFI.
779  return isUInt<32>(Imm) || isUInt<32>(-Imm);
780 }
781 
783  EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
784  // Unaligned accesses should never be slower than the expanded version.
785  // We check specifically for aligned accesses in the few cases where
786  // they are required.
787  if (Fast)
788  *Fast = true;
789  return true;
790 }
791 
792 // Information about the addressing mode for a memory access.
794  // True if a long displacement is supported.
796 
797  // True if use of index register is supported.
798  bool IndexReg;
799 
800  AddressingMode(bool LongDispl, bool IdxReg) :
801  LongDisplacement(LongDispl), IndexReg(IdxReg) {}
802 };
803 
804 // Return the desired addressing mode for a Load which has only one use (in
805 // the same block) which is a Store.
806 static AddressingMode getLoadStoreAddrMode(bool HasVector,
807  Type *Ty) {
808  // With vector support a Load->Store combination may be combined to either
809  // an MVC or vector operations and it seems to work best to allow the
810  // vector addressing mode.
811  if (HasVector)
812  return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
813 
814  // Otherwise only the MVC case is special.
815  bool MVC = Ty->isIntegerTy(8);
816  return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
817 }
818 
819 // Return the addressing mode which seems most desirable given an LLVM
820 // Instruction pointer.
821 static AddressingMode
823  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
824  switch (II->getIntrinsicID()) {
825  default: break;
826  case Intrinsic::memset:
827  case Intrinsic::memmove:
828  case Intrinsic::memcpy:
829  return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
830  }
831  }
832 
833  if (isa<LoadInst>(I) && I->hasOneUse()) {
834  auto *SingleUser = dyn_cast<Instruction>(*I->user_begin());
835  if (SingleUser->getParent() == I->getParent()) {
836  if (isa<ICmpInst>(SingleUser)) {
837  if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
838  if (C->getBitWidth() <= 64 &&
839  (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
840  // Comparison of memory with 16 bit signed / unsigned immediate
841  return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
842  } else if (isa<StoreInst>(SingleUser))
843  // Load->Store
844  return getLoadStoreAddrMode(HasVector, I->getType());
845  }
846  } else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
847  if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
848  if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
849  // Load->Store
850  return getLoadStoreAddrMode(HasVector, LoadI->getType());
851  }
852 
853  if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
854 
855  // * Use LDE instead of LE/LEY for z13 to avoid partial register
856  // dependencies (LDE only supports small offsets).
857  // * Utilize the vector registers to hold floating point
858  // values (vector load / store instructions only support small
859  // offsets).
860 
861  Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
862  I->getOperand(0)->getType());
863  bool IsFPAccess = MemAccessTy->isFloatingPointTy();
864  bool IsVectorAccess = MemAccessTy->isVectorTy();
865 
866  // A store of an extracted vector element will be combined into a VSTE type
867  // instruction.
868  if (!IsVectorAccess && isa<StoreInst>(I)) {
869  Value *DataOp = I->getOperand(0);
870  if (isa<ExtractElementInst>(DataOp))
871  IsVectorAccess = true;
872  }
873 
874  // A load which gets inserted into a vector element will be combined into a
875  // VLE type instruction.
876  if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
877  User *LoadUser = *I->user_begin();
878  if (isa<InsertElementInst>(LoadUser))
879  IsVectorAccess = true;
880  }
881 
882  if (IsFPAccess || IsVectorAccess)
883  return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
884  }
885 
886  return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
887 }
888 
890  const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
891  // Punt on globals for now, although they can be used in limited
892  // RELATIVE LONG cases.
893  if (AM.BaseGV)
894  return false;
895 
896  // Require a 20-bit signed offset.
897  if (!isInt<20>(AM.BaseOffs))
898  return false;
899 
900  AddressingMode SupportedAM(true, true);
901  if (I != nullptr)
902  SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
903 
904  if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
905  return false;
906 
907  if (!SupportedAM.IndexReg)
908  // No indexing allowed.
909  return AM.Scale == 0;
910  else
911  // Indexing is OK but no scale factor can be applied.
912  return AM.Scale == 0 || AM.Scale == 1;
913 }
914 
916  if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
917  return false;
918  unsigned FromBits = FromType->getPrimitiveSizeInBits();
919  unsigned ToBits = ToType->getPrimitiveSizeInBits();
920  return FromBits > ToBits;
921 }
922 
924  if (!FromVT.isInteger() || !ToVT.isInteger())
925  return false;
926  unsigned FromBits = FromVT.getSizeInBits();
927  unsigned ToBits = ToVT.getSizeInBits();
928  return FromBits > ToBits;
929 }
930 
931 //===----------------------------------------------------------------------===//
932 // Inline asm support
933 //===----------------------------------------------------------------------===//
934 
937  if (Constraint.size() == 1) {
938  switch (Constraint[0]) {
939  case 'a': // Address register
940  case 'd': // Data register (equivalent to 'r')
941  case 'f': // Floating-point register
942  case 'h': // High-part register
943  case 'r': // General-purpose register
944  case 'v': // Vector register
945  return C_RegisterClass;
946 
947  case 'Q': // Memory with base and unsigned 12-bit displacement
948  case 'R': // Likewise, plus an index
949  case 'S': // Memory with base and signed 20-bit displacement
950  case 'T': // Likewise, plus an index
951  case 'm': // Equivalent to 'T'.
952  return C_Memory;
953 
954  case 'I': // Unsigned 8-bit constant
955  case 'J': // Unsigned 12-bit constant
956  case 'K': // Signed 16-bit constant
957  case 'L': // Signed 20-bit displacement (on all targets we support)
958  case 'M': // 0x7fffffff
959  return C_Other;
960 
961  default:
962  break;
963  }
964  }
965  return TargetLowering::getConstraintType(Constraint);
966 }
967 
970  const char *constraint) const {
971  ConstraintWeight weight = CW_Invalid;
972  Value *CallOperandVal = info.CallOperandVal;
973  // If we don't have a value, we can't do a match,
974  // but allow it at the lowest weight.
975  if (!CallOperandVal)
976  return CW_Default;
977  Type *type = CallOperandVal->getType();
978  // Look at the constraint type.
979  switch (*constraint) {
980  default:
981  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
982  break;
983 
984  case 'a': // Address register
985  case 'd': // Data register (equivalent to 'r')
986  case 'h': // High-part register
987  case 'r': // General-purpose register
988  if (CallOperandVal->getType()->isIntegerTy())
989  weight = CW_Register;
990  break;
991 
992  case 'f': // Floating-point register
993  if (type->isFloatingPointTy())
994  weight = CW_Register;
995  break;
996 
997  case 'v': // Vector register
998  if ((type->isVectorTy() || type->isFloatingPointTy()) &&
999  Subtarget.hasVector())
1000  weight = CW_Register;
1001  break;
1002 
1003  case 'I': // Unsigned 8-bit constant
1004  if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1005  if (isUInt<8>(C->getZExtValue()))
1006  weight = CW_Constant;
1007  break;
1008 
1009  case 'J': // Unsigned 12-bit constant
1010  if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1011  if (isUInt<12>(C->getZExtValue()))
1012  weight = CW_Constant;
1013  break;
1014 
1015  case 'K': // Signed 16-bit constant
1016  if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1017  if (isInt<16>(C->getSExtValue()))
1018  weight = CW_Constant;
1019  break;
1020 
1021  case 'L': // Signed 20-bit displacement (on all targets we support)
1022  if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1023  if (isInt<20>(C->getSExtValue()))
1024  weight = CW_Constant;
1025  break;
1026 
1027  case 'M': // 0x7fffffff
1028  if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1029  if (C->getZExtValue() == 0x7fffffff)
1030  weight = CW_Constant;
1031  break;
1032  }
1033  return weight;
1034 }
1035 
1036 // Parse a "{tNNN}" register constraint for which the register type "t"
1037 // has already been verified. MC is the class associated with "t" and
1038 // Map maps 0-based register numbers to LLVM register numbers.
1039 static std::pair<unsigned, const TargetRegisterClass *>
1041  const unsigned *Map, unsigned Size) {
1042  assert(*(Constraint.end()-1) == '}' && "Missing '}'");
1043  if (isdigit(Constraint[2])) {
1044  unsigned Index;
1045  bool Failed =
1046  Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
1047  if (!Failed && Index < Size && Map[Index])
1048  return std::make_pair(Map[Index], RC);
1049  }
1050  return std::make_pair(0U, nullptr);
1051 }
1052 
1053 std::pair<unsigned, const TargetRegisterClass *>
1055  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
1056  if (Constraint.size() == 1) {
1057  // GCC Constraint Letters
1058  switch (Constraint[0]) {
1059  default: break;
1060  case 'd': // Data register (equivalent to 'r')
1061  case 'r': // General-purpose register
1062  if (VT == MVT::i64)
1063  return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1064  else if (VT == MVT::i128)
1065  return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1066  return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1067 
1068  case 'a': // Address register
1069  if (VT == MVT::i64)
1070  return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1071  else if (VT == MVT::i128)
1072  return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1073  return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1074 
1075  case 'h': // High-part register (an LLVM extension)
1076  return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1077 
1078  case 'f': // Floating-point register
1079  if (VT == MVT::f64)
1080  return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1081  else if (VT == MVT::f128)
1082  return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1083  return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1084 
1085  case 'v': // Vector register
1086  if (Subtarget.hasVector()) {
1087  if (VT == MVT::f32)
1088  return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1089  if (VT == MVT::f64)
1090  return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1091  return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1092  }
1093  break;
1094  }
1095  }
1096  if (Constraint.size() > 0 && Constraint[0] == '{') {
1097  // We need to override the default register parsing for GPRs and FPRs
1098  // because the interpretation depends on VT. The internal names of
1099  // the registers are also different from the external names
1100  // (F0D and F0S instead of F0, etc.).
1101  if (Constraint[1] == 'r') {
1102  if (VT == MVT::i32)
1103  return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
1104  SystemZMC::GR32Regs, 16);
1105  if (VT == MVT::i128)
1106  return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
1107  SystemZMC::GR128Regs, 16);
1108  return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
1109  SystemZMC::GR64Regs, 16);
1110  }
1111  if (Constraint[1] == 'f') {
1112  if (VT == MVT::f32)
1113  return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
1114  SystemZMC::FP32Regs, 16);
1115  if (VT == MVT::f128)
1116  return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
1117  SystemZMC::FP128Regs, 16);
1118  return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
1119  SystemZMC::FP64Regs, 16);
1120  }
1121  if (Constraint[1] == 'v') {
1122  if (VT == MVT::f32)
1123  return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
1124  SystemZMC::VR32Regs, 32);
1125  if (VT == MVT::f64)
1126  return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
1127  SystemZMC::VR64Regs, 32);
1128  return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
1129  SystemZMC::VR128Regs, 32);
1130  }
1131  }
1132  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1133 }
1134 
1136 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1137  std::vector<SDValue> &Ops,
1138  SelectionDAG &DAG) const {
1139  // Only support length 1 constraints for now.
1140  if (Constraint.length() == 1) {
1141  switch (Constraint[0]) {
1142  case 'I': // Unsigned 8-bit constant
1143  if (auto *C = dyn_cast<ConstantSDNode>(Op))
1144  if (isUInt<8>(C->getZExtValue()))
1145  Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1146  Op.getValueType()));
1147  return;
1148 
1149  case 'J': // Unsigned 12-bit constant
1150  if (auto *C = dyn_cast<ConstantSDNode>(Op))
1151  if (isUInt<12>(C->getZExtValue()))
1152  Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1153  Op.getValueType()));
1154  return;
1155 
1156  case 'K': // Signed 16-bit constant
1157  if (auto *C = dyn_cast<ConstantSDNode>(Op))
1158  if (isInt<16>(C->getSExtValue()))
1159  Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1160  Op.getValueType()));
1161  return;
1162 
1163  case 'L': // Signed 20-bit displacement (on all targets we support)
1164  if (auto *C = dyn_cast<ConstantSDNode>(Op))
1165  if (isInt<20>(C->getSExtValue()))
1166  Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1167  Op.getValueType()));
1168  return;
1169 
1170  case 'M': // 0x7fffffff
1171  if (auto *C = dyn_cast<ConstantSDNode>(Op))
1172  if (C->getZExtValue() == 0x7fffffff)
1173  Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1174  Op.getValueType()));
1175  return;
1176  }
1177  }
1178  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1179 }
1180 
1181 //===----------------------------------------------------------------------===//
1182 // Calling conventions
1183 //===----------------------------------------------------------------------===//
1184 
1185 #include "SystemZGenCallingConv.inc"
1186 
1188  CallingConv::ID) const {
1189  static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1190  SystemZ::R14D, 0 };
1191  return ScratchRegs;
1192 }
1193 
1195  Type *ToType) const {
1196  return isTruncateFree(FromType, ToType);
1197 }
1198 
1200  return CI->isTailCall();
1201 }
1202 
1203 // We do not yet support 128-bit single-element vector types. If the user
1204 // attempts to use such types as function argument or return type, prefer
1205 // to error out instead of emitting code violating the ABI.
1206 static void VerifyVectorType(MVT VT, EVT ArgVT) {
1207  if (ArgVT.isVector() && !VT.isVector())
1208  report_fatal_error("Unsupported vector argument or return type");
1209 }
1210 
1212  for (unsigned i = 0; i < Ins.size(); ++i)
1213  VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
1214 }
1215 
1217  for (unsigned i = 0; i < Outs.size(); ++i)
1218  VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
1219 }
1220 
1221 // Value is a value that has been passed to us in the location described by VA
1222 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1223 // any loads onto Chain.
1225  CCValAssign &VA, SDValue Chain,
1226  SDValue Value) {
1227  // If the argument has been promoted from a smaller type, insert an
1228  // assertion to capture this.
1229  if (VA.getLocInfo() == CCValAssign::SExt)
1230  Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
1231  DAG.getValueType(VA.getValVT()));
1232  else if (VA.getLocInfo() == CCValAssign::ZExt)
1233  Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
1234  DAG.getValueType(VA.getValVT()));
1235 
1236  if (VA.isExtInLoc())
1237  Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
1238  else if (VA.getLocInfo() == CCValAssign::BCvt) {
1239  // If this is a short vector argument loaded from the stack,
1240  // extend from i64 to full vector size and then bitcast.
1241  assert(VA.getLocVT() == MVT::i64);
1242  assert(VA.getValVT().isVector());
1243  Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
1244  Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
1245  } else
1246  assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
1247  return Value;
1248 }
1249 
1250 // Value is a value of type VA.getValVT() that we need to copy into
1251 // the location described by VA. Return a copy of Value converted to
1252 // VA.getValVT(). The caller is responsible for handling indirect values.
1254  CCValAssign &VA, SDValue Value) {
1255  switch (VA.getLocInfo()) {
1256  case CCValAssign::SExt:
1257  return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
1258  case CCValAssign::ZExt:
1259  return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
1260  case CCValAssign::AExt:
1261  return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
1262  case CCValAssign::BCvt:
1263  // If this is a short vector argument to be stored to the stack,
1264  // bitcast to v2i64 and then extract first element.
1265  assert(VA.getLocVT() == MVT::i64);
1266  assert(VA.getValVT().isVector());
1267  Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
1268  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
1269  DAG.getConstant(0, DL, MVT::i32));
1270  case CCValAssign::Full:
1271  return Value;
1272  default:
1273  llvm_unreachable("Unhandled getLocInfo()");
1274  }
1275 }
1276 
1278  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1279  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1280  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1281  MachineFunction &MF = DAG.getMachineFunction();
1282  MachineFrameInfo &MFI = MF.getFrameInfo();
1284  SystemZMachineFunctionInfo *FuncInfo =
1286  auto *TFL =
1287  static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
1288  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1289 
1290  // Detect unsupported vector argument types.
1291  if (Subtarget.hasVector())
1292  VerifyVectorTypes(Ins);
1293 
1294  // Assign locations to all of the incoming arguments.
1296  SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1297  CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
1298 
1299  unsigned NumFixedGPRs = 0;
1300  unsigned NumFixedFPRs = 0;
1301  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1302  SDValue ArgValue;
1303  CCValAssign &VA = ArgLocs[I];
1304  EVT LocVT = VA.getLocVT();
1305  if (VA.isRegLoc()) {
1306  // Arguments passed in registers
1307  const TargetRegisterClass *RC;
1308  switch (LocVT.getSimpleVT().SimpleTy) {
1309  default:
1310  // Integers smaller than i64 should be promoted to i64.
1311  llvm_unreachable("Unexpected argument type");
1312  case MVT::i32:
1313  NumFixedGPRs += 1;
1314  RC = &SystemZ::GR32BitRegClass;
1315  break;
1316  case MVT::i64:
1317  NumFixedGPRs += 1;
1318  RC = &SystemZ::GR64BitRegClass;
1319  break;
1320  case MVT::f32:
1321  NumFixedFPRs += 1;
1322  RC = &SystemZ::FP32BitRegClass;
1323  break;
1324  case MVT::f64:
1325  NumFixedFPRs += 1;
1326  RC = &SystemZ::FP64BitRegClass;
1327  break;
1328  case MVT::v16i8:
1329  case MVT::v8i16:
1330  case MVT::v4i32:
1331  case MVT::v2i64:
1332  case MVT::v4f32:
1333  case MVT::v2f64:
1334  RC = &SystemZ::VR128BitRegClass;
1335  break;
1336  }
1337 
1338  unsigned VReg = MRI.createVirtualRegister(RC);
1339  MRI.addLiveIn(VA.getLocReg(), VReg);
1340  ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1341  } else {
1342  assert(VA.isMemLoc() && "Argument not register or memory");
1343 
1344  // Create the frame index object for this incoming parameter.
1345  int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1346  VA.getLocMemOffset(), true);
1347 
1348  // Create the SelectionDAG nodes corresponding to a load
1349  // from this parameter. Unpromoted ints and floats are
1350  // passed as right-justified 8-byte values.
1351  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1352  if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1353  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
1354  DAG.getIntPtrConstant(4, DL));
1355  ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
1357  }
1358 
1359  // Convert the value of the argument register into the value that's
1360  // being passed.
1361  if (VA.getLocInfo() == CCValAssign::Indirect) {
1362  InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1363  MachinePointerInfo()));
1364  // If the original argument was split (e.g. i128), we need
1365  // to load all parts of it here (using the same address).
1366  unsigned ArgIndex = Ins[I].OrigArgIndex;
1367  assert (Ins[I].PartOffset == 0);
1368  while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1369  CCValAssign &PartVA = ArgLocs[I + 1];
1370  unsigned PartOffset = Ins[I + 1].PartOffset;
1371  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1372  DAG.getIntPtrConstant(PartOffset, DL));
1373  InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1374  MachinePointerInfo()));
1375  ++I;
1376  }
1377  } else
1378  InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1379  }
1380 
1381  if (IsVarArg) {
1382  // Save the number of non-varargs registers for later use by va_start, etc.
1383  FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1384  FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1385 
1386  // Likewise the address (in the form of a frame index) of where the
1387  // first stack vararg would be. The 1-byte size here is arbitrary.
1388  int64_t StackSize = CCInfo.getNextStackOffset();
1389  FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1390 
1391  // ...and a similar frame index for the caller-allocated save area
1392  // that will be used to store the incoming registers.
1393  int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1394  unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1395  FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1396 
1397  // Store the FPR varargs in the reserved frame slots. (We store the
1398  // GPRs as part of the prologue.)
1399  if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1400  SDValue MemOps[SystemZ::NumArgFPRs];
1401  for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1402  unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1403  int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1404  SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1405  unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1406  &SystemZ::FP64BitRegClass);
1407  SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1408  MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1410  }
1411  // Join the stores, which are independent of one another.
1412  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1413  makeArrayRef(&MemOps[NumFixedFPRs],
1414  SystemZ::NumArgFPRs-NumFixedFPRs));
1415  }
1416  }
1417 
1418  return Chain;
1419 }
1420 
1421 static bool canUseSiblingCall(const CCState &ArgCCInfo,
1424  // Punt if there are any indirect or stack arguments, or if the call
1425  // needs the callee-saved argument register R6, or if the call uses
1426  // the callee-saved register arguments SwiftSelf and SwiftError.
1427  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1428  CCValAssign &VA = ArgLocs[I];
1429  if (VA.getLocInfo() == CCValAssign::Indirect)
1430  return false;
1431  if (!VA.isRegLoc())
1432  return false;
1433  unsigned Reg = VA.getLocReg();
1434  if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1435  return false;
1436  if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1437  return false;
1438  }
1439  return true;
1440 }
1441 
1442 SDValue
1444  SmallVectorImpl<SDValue> &InVals) const {
1445  SelectionDAG &DAG = CLI.DAG;
1446  SDLoc &DL = CLI.DL;
1448  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1450  SDValue Chain = CLI.Chain;
1451  SDValue Callee = CLI.Callee;
1452  bool &IsTailCall = CLI.IsTailCall;
1453  CallingConv::ID CallConv = CLI.CallConv;
1454  bool IsVarArg = CLI.IsVarArg;
1455  MachineFunction &MF = DAG.getMachineFunction();
1456  EVT PtrVT = getPointerTy(MF.getDataLayout());
1457 
1458  // Detect unsupported vector argument and return types.
1459  if (Subtarget.hasVector()) {
1460  VerifyVectorTypes(Outs);
1461  VerifyVectorTypes(Ins);
1462  }
1463 
1464  // Analyze the operands of the call, assigning locations to each operand.
1466  SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1467  ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1468 
1469  // We don't support GuaranteedTailCallOpt, only automatically-detected
1470  // sibling calls.
1471  if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1472  IsTailCall = false;
1473 
1474  // Get a count of how many bytes are to be pushed on the stack.
1475  unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1476 
1477  // Mark the start of the call.
1478  if (!IsTailCall)
1479  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1480 
1481  // Copy argument values to their designated locations.
1483  SmallVector<SDValue, 8> MemOpChains;
1484  SDValue StackPtr;
1485  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1486  CCValAssign &VA = ArgLocs[I];
1487  SDValue ArgValue = OutVals[I];
1488 
1489  if (VA.getLocInfo() == CCValAssign::Indirect) {
1490  // Store the argument in a stack slot and pass its address.
1491  SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1492  int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1493  MemOpChains.push_back(
1494  DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1496  // If the original argument was split (e.g. i128), we need
1497  // to store all parts of it here (and pass just one address).
1498  unsigned ArgIndex = Outs[I].OrigArgIndex;
1499  assert (Outs[I].PartOffset == 0);
1500  while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1501  SDValue PartValue = OutVals[I + 1];
1502  unsigned PartOffset = Outs[I + 1].PartOffset;
1503  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1504  DAG.getIntPtrConstant(PartOffset, DL));
1505  MemOpChains.push_back(
1506  DAG.getStore(Chain, DL, PartValue, Address,
1508  ++I;
1509  }
1510  ArgValue = SpillSlot;
1511  } else
1512  ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1513 
1514  if (VA.isRegLoc())
1515  // Queue up the argument copies and emit them at the end.
1516  RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1517  else {
1518  assert(VA.isMemLoc() && "Argument not register or memory");
1519 
1520  // Work out the address of the stack slot. Unpromoted ints and
1521  // floats are passed as right-justified 8-byte values.
1522  if (!StackPtr.getNode())
1523  StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1525  if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1526  Offset += 4;
1527  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1528  DAG.getIntPtrConstant(Offset, DL));
1529 
1530  // Emit the store.
1531  MemOpChains.push_back(
1532  DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1533  }
1534  }
1535 
1536  // Join the stores, which are independent of one another.
1537  if (!MemOpChains.empty())
1538  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1539 
1540  // Accept direct calls by converting symbolic call addresses to the
1541  // associated Target* opcodes. Force %r1 to be used for indirect
1542  // tail calls.
1543  SDValue Glue;
1544  if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1545  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1546  Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1547  } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1548  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1549  Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1550  } else if (IsTailCall) {
1551  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1552  Glue = Chain.getValue(1);
1553  Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1554  }
1555 
1556  // Build a sequence of copy-to-reg nodes, chained and glued together.
1557  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1558  Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1559  RegsToPass[I].second, Glue);
1560  Glue = Chain.getValue(1);
1561  }
1562 
1563  // The first call operand is the chain and the second is the target address.
1565  Ops.push_back(Chain);
1566  Ops.push_back(Callee);
1567 
1568  // Add argument registers to the end of the list so that they are
1569  // known live into the call.
1570  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1571  Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1572  RegsToPass[I].second.getValueType()));
1573 
1574  // Add a register mask operand representing the call-preserved registers.
1575  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1576  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1577  assert(Mask && "Missing call preserved mask for calling convention");
1578  Ops.push_back(DAG.getRegisterMask(Mask));
1579 
1580  // Glue the call to the argument copies, if any.
1581  if (Glue.getNode())
1582  Ops.push_back(Glue);
1583 
1584  // Emit the call.
1585  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1586  if (IsTailCall)
1587  return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1588  Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1589  Glue = Chain.getValue(1);
1590 
1591  // Mark the end of the call, which is glued to the call itself.
1592  Chain = DAG.getCALLSEQ_END(Chain,
1593  DAG.getConstant(NumBytes, DL, PtrVT, true),
1594  DAG.getConstant(0, DL, PtrVT, true),
1595  Glue, DL);
1596  Glue = Chain.getValue(1);
1597 
1598  // Assign locations to each value returned by this call.
1600  CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1601  RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1602 
1603  // Copy all of the result registers out of their specified physreg.
1604  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1605  CCValAssign &VA = RetLocs[I];
1606 
1607  // Copy the value out, gluing the copy to the end of the call sequence.
1608  SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1609  VA.getLocVT(), Glue);
1610  Chain = RetValue.getValue(1);
1611  Glue = RetValue.getValue(2);
1612 
1613  // Convert the value of the return register into the value that's
1614  // being returned.
1615  InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1616  }
1617 
1618  return Chain;
1619 }
1620 
1623  MachineFunction &MF, bool isVarArg,
1624  const SmallVectorImpl<ISD::OutputArg> &Outs,
1625  LLVMContext &Context) const {
1626  // Detect unsupported vector return types.
1627  if (Subtarget.hasVector())
1628  VerifyVectorTypes(Outs);
1629 
1630  // Special case that we cannot easily detect in RetCC_SystemZ since
1631  // i128 is not a legal type.
1632  for (auto &Out : Outs)
1633  if (Out.ArgVT == MVT::i128)
1634  return false;
1635 
1637  CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1638  return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1639 }
1640 
1641 SDValue
1643  bool IsVarArg,
1644  const SmallVectorImpl<ISD::OutputArg> &Outs,
1645  const SmallVectorImpl<SDValue> &OutVals,
1646  const SDLoc &DL, SelectionDAG &DAG) const {
1647  MachineFunction &MF = DAG.getMachineFunction();
1648 
1649  // Detect unsupported vector return types.
1650  if (Subtarget.hasVector())
1651  VerifyVectorTypes(Outs);
1652 
1653  // Assign locations to each returned value.
1655  CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1656  RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1657 
1658  // Quick exit for void returns
1659  if (RetLocs.empty())
1660  return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1661 
1662  // Copy the result values into the output registers.
1663  SDValue Glue;
1664  SmallVector<SDValue, 4> RetOps;
1665  RetOps.push_back(Chain);
1666  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1667  CCValAssign &VA = RetLocs[I];
1668  SDValue RetValue = OutVals[I];
1669 
1670  // Make the return register live on exit.
1671  assert(VA.isRegLoc() && "Can only return in registers!");
1672 
1673  // Promote the value as required.
1674  RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1675 
1676  // Chain and glue the copies together.
1677  unsigned Reg = VA.getLocReg();
1678  Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1679  Glue = Chain.getValue(1);
1680  RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1681  }
1682 
1683  // Update chain and glue.
1684  RetOps[0] = Chain;
1685  if (Glue.getNode())
1686  RetOps.push_back(Glue);
1687 
1688  return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1689 }
1690 
1691 // Return true if Op is an intrinsic node with chain that returns the CC value
1692 // as its only (other) argument. Provide the associated SystemZISD opcode and
1693 // the mask of valid CC values if so.
1694 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1695  unsigned &CCValid) {
1696  unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1697  switch (Id) {
1698  case Intrinsic::s390_tbegin:
1699  Opcode = SystemZISD::TBEGIN;
1700  CCValid = SystemZ::CCMASK_TBEGIN;
1701  return true;
1702 
1703  case Intrinsic::s390_tbegin_nofloat:
1704  Opcode = SystemZISD::TBEGIN_NOFLOAT;
1705  CCValid = SystemZ::CCMASK_TBEGIN;
1706  return true;
1707 
1708  case Intrinsic::s390_tend:
1709  Opcode = SystemZISD::TEND;
1710  CCValid = SystemZ::CCMASK_TEND;
1711  return true;
1712 
1713  default:
1714  return false;
1715  }
1716 }
1717 
1718 // Return true if Op is an intrinsic node without chain that returns the
1719 // CC value as its final argument. Provide the associated SystemZISD
1720 // opcode and the mask of valid CC values if so.
1721 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1722  unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1723  switch (Id) {
1724  case Intrinsic::s390_vpkshs:
1725  case Intrinsic::s390_vpksfs:
1726  case Intrinsic::s390_vpksgs:
1727  Opcode = SystemZISD::PACKS_CC;
1728  CCValid = SystemZ::CCMASK_VCMP;
1729  return true;
1730 
1731  case Intrinsic::s390_vpklshs:
1732  case Intrinsic::s390_vpklsfs:
1733  case Intrinsic::s390_vpklsgs:
1734  Opcode = SystemZISD::PACKLS_CC;
1735  CCValid = SystemZ::CCMASK_VCMP;
1736  return true;
1737 
1738  case Intrinsic::s390_vceqbs:
1739  case Intrinsic::s390_vceqhs:
1740  case Intrinsic::s390_vceqfs:
1741  case Intrinsic::s390_vceqgs:
1742  Opcode = SystemZISD::VICMPES;
1743  CCValid = SystemZ::CCMASK_VCMP;
1744  return true;
1745 
1746  case Intrinsic::s390_vchbs:
1747  case Intrinsic::s390_vchhs:
1748  case Intrinsic::s390_vchfs:
1749  case Intrinsic::s390_vchgs:
1750  Opcode = SystemZISD::VICMPHS;
1751  CCValid = SystemZ::CCMASK_VCMP;
1752  return true;
1753 
1754  case Intrinsic::s390_vchlbs:
1755  case Intrinsic::s390_vchlhs:
1756  case Intrinsic::s390_vchlfs:
1757  case Intrinsic::s390_vchlgs:
1758  Opcode = SystemZISD::VICMPHLS;
1759  CCValid = SystemZ::CCMASK_VCMP;
1760  return true;
1761 
1762  case Intrinsic::s390_vtm:
1763  Opcode = SystemZISD::VTM;
1764  CCValid = SystemZ::CCMASK_VCMP;
1765  return true;
1766 
1767  case Intrinsic::s390_vfaebs:
1768  case Intrinsic::s390_vfaehs:
1769  case Intrinsic::s390_vfaefs:
1770  Opcode = SystemZISD::VFAE_CC;
1771  CCValid = SystemZ::CCMASK_ANY;
1772  return true;
1773 
1774  case Intrinsic::s390_vfaezbs:
1775  case Intrinsic::s390_vfaezhs:
1776  case Intrinsic::s390_vfaezfs:
1777  Opcode = SystemZISD::VFAEZ_CC;
1778  CCValid = SystemZ::CCMASK_ANY;
1779  return true;
1780 
1781  case Intrinsic::s390_vfeebs:
1782  case Intrinsic::s390_vfeehs:
1783  case Intrinsic::s390_vfeefs:
1784  Opcode = SystemZISD::VFEE_CC;
1785  CCValid = SystemZ::CCMASK_ANY;
1786  return true;
1787 
1788  case Intrinsic::s390_vfeezbs:
1789  case Intrinsic::s390_vfeezhs:
1790  case Intrinsic::s390_vfeezfs:
1791  Opcode = SystemZISD::VFEEZ_CC;
1792  CCValid = SystemZ::CCMASK_ANY;
1793  return true;
1794 
1795  case Intrinsic::s390_vfenebs:
1796  case Intrinsic::s390_vfenehs:
1797  case Intrinsic::s390_vfenefs:
1798  Opcode = SystemZISD::VFENE_CC;
1799  CCValid = SystemZ::CCMASK_ANY;
1800  return true;
1801 
1802  case Intrinsic::s390_vfenezbs:
1803  case Intrinsic::s390_vfenezhs:
1804  case Intrinsic::s390_vfenezfs:
1805  Opcode = SystemZISD::VFENEZ_CC;
1806  CCValid = SystemZ::CCMASK_ANY;
1807  return true;
1808 
1809  case Intrinsic::s390_vistrbs:
1810  case Intrinsic::s390_vistrhs:
1811  case Intrinsic::s390_vistrfs:
1812  Opcode = SystemZISD::VISTR_CC;
1814  return true;
1815 
1816  case Intrinsic::s390_vstrcbs:
1817  case Intrinsic::s390_vstrchs:
1818  case Intrinsic::s390_vstrcfs:
1819  Opcode = SystemZISD::VSTRC_CC;
1820  CCValid = SystemZ::CCMASK_ANY;
1821  return true;
1822 
1823  case Intrinsic::s390_vstrczbs:
1824  case Intrinsic::s390_vstrczhs:
1825  case Intrinsic::s390_vstrczfs:
1826  Opcode = SystemZISD::VSTRCZ_CC;
1827  CCValid = SystemZ::CCMASK_ANY;
1828  return true;
1829 
1830  case Intrinsic::s390_vstrsb:
1831  case Intrinsic::s390_vstrsh:
1832  case Intrinsic::s390_vstrsf:
1833  Opcode = SystemZISD::VSTRS_CC;
1834  CCValid = SystemZ::CCMASK_ANY;
1835  return true;
1836 
1837  case Intrinsic::s390_vstrszb:
1838  case Intrinsic::s390_vstrszh:
1839  case Intrinsic::s390_vstrszf:
1840  Opcode = SystemZISD::VSTRSZ_CC;
1841  CCValid = SystemZ::CCMASK_ANY;
1842  return true;
1843 
1844  case Intrinsic::s390_vfcedbs:
1845  case Intrinsic::s390_vfcesbs:
1846  Opcode = SystemZISD::VFCMPES;
1847  CCValid = SystemZ::CCMASK_VCMP;
1848  return true;
1849 
1850  case Intrinsic::s390_vfchdbs:
1851  case Intrinsic::s390_vfchsbs:
1852  Opcode = SystemZISD::VFCMPHS;
1853  CCValid = SystemZ::CCMASK_VCMP;
1854  return true;
1855 
1856  case Intrinsic::s390_vfchedbs:
1857  case Intrinsic::s390_vfchesbs:
1858  Opcode = SystemZISD::VFCMPHES;
1859  CCValid = SystemZ::CCMASK_VCMP;
1860  return true;
1861 
1862  case Intrinsic::s390_vftcidb:
1863  case Intrinsic::s390_vftcisb:
1864  Opcode = SystemZISD::VFTCI;
1865  CCValid = SystemZ::CCMASK_VCMP;
1866  return true;
1867 
1868  case Intrinsic::s390_tdc:
1869  Opcode = SystemZISD::TDC;
1870  CCValid = SystemZ::CCMASK_TDC;
1871  return true;
1872 
1873  default:
1874  return false;
1875  }
1876 }
1877 
1878 // Emit an intrinsic with chain and an explicit CC register result.
1880  unsigned Opcode) {
1881  // Copy all operands except the intrinsic ID.
1882  unsigned NumOps = Op.getNumOperands();
1884  Ops.reserve(NumOps - 1);
1885  Ops.push_back(Op.getOperand(0));
1886  for (unsigned I = 2; I < NumOps; ++I)
1887  Ops.push_back(Op.getOperand(I));
1888 
1889  assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1890  SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
1891  SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1892  SDValue OldChain = SDValue(Op.getNode(), 1);
1893  SDValue NewChain = SDValue(Intr.getNode(), 1);
1894  DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1895  return Intr.getNode();
1896 }
1897 
1898 // Emit an intrinsic with an explicit CC register result.
1900  unsigned Opcode) {
1901  // Copy all operands except the intrinsic ID.
1902  unsigned NumOps = Op.getNumOperands();
1904  Ops.reserve(NumOps - 1);
1905  for (unsigned I = 1; I < NumOps; ++I)
1906  Ops.push_back(Op.getOperand(I));
1907 
1908  SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
1909  return Intr.getNode();
1910 }
1911 
1912 // CC is a comparison that will be implemented using an integer or
1913 // floating-point comparison. Return the condition code mask for
1914 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1915 // unsigned comparisons and clear for signed ones. In the floating-point
1916 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1917 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1918 #define CONV(X) \
1919  case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1920  case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1921  case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1922 
1923  switch (CC) {
1924  default:
1925  llvm_unreachable("Invalid integer condition!");
1926 
1927  CONV(EQ);
1928  CONV(NE);
1929  CONV(GT);
1930  CONV(GE);
1931  CONV(LT);
1932  CONV(LE);
1933 
1934  case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1935  case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1936  }
1937 #undef CONV
1938 }
1939 
1940 // If C can be converted to a comparison against zero, adjust the operands
1941 // as necessary.
1942 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1943  if (C.ICmpType == SystemZICMP::UnsignedOnly)
1944  return;
1945 
1946  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1947  if (!ConstOp1)
1948  return;
1949 
1950  int64_t Value = ConstOp1->getSExtValue();
1951  if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1952  (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1953  (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1954  (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1955  C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1956  C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1957  }
1958 }
1959 
1960 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1961 // adjust the operands as necessary.
1962 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
1963  Comparison &C) {
1964  // For us to make any changes, it must a comparison between a single-use
1965  // load and a constant.
1966  if (!C.Op0.hasOneUse() ||
1967  C.Op0.getOpcode() != ISD::LOAD ||
1968  C.Op1.getOpcode() != ISD::Constant)
1969  return;
1970 
1971  // We must have an 8- or 16-bit load.
1972  auto *Load = cast<LoadSDNode>(C.Op0);
1973  unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1974  if (NumBits != 8 && NumBits != 16)
1975  return;
1976 
1977  // The load must be an extending one and the constant must be within the
1978  // range of the unextended value.
1979  auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1980  uint64_t Value = ConstOp1->getZExtValue();
1981  uint64_t Mask = (1 << NumBits) - 1;
1982  if (Load->getExtensionType() == ISD::SEXTLOAD) {
1983  // Make sure that ConstOp1 is in range of C.Op0.
1984  int64_t SignedValue = ConstOp1->getSExtValue();
1985  if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1986  return;
1987  if (C.ICmpType != SystemZICMP::SignedOnly) {
1988  // Unsigned comparison between two sign-extended values is equivalent
1989  // to unsigned comparison between two zero-extended values.
1990  Value &= Mask;
1991  } else if (NumBits == 8) {
1992  // Try to treat the comparison as unsigned, so that we can use CLI.
1993  // Adjust CCMask and Value as necessary.
1994  if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
1995  // Test whether the high bit of the byte is set.
1996  Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
1997  else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
1998  // Test whether the high bit of the byte is clear.
1999  Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
2000  else
2001  // No instruction exists for this combination.
2002  return;
2003  C.ICmpType = SystemZICMP::UnsignedOnly;
2004  }
2005  } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
2006  if (Value > Mask)
2007  return;
2008  // If the constant is in range, we can use any comparison.
2009  C.ICmpType = SystemZICMP::Any;
2010  } else
2011  return;
2012 
2013  // Make sure that the first operand is an i32 of the right extension type.
2015  ISD::SEXTLOAD :
2016  ISD::ZEXTLOAD);
2017  if (C.Op0.getValueType() != MVT::i32 ||
2018  Load->getExtensionType() != ExtType) {
2019  C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
2020  Load->getBasePtr(), Load->getPointerInfo(),
2021  Load->getMemoryVT(), Load->getAlignment(),
2022  Load->getMemOperand()->getFlags());
2023  // Update the chain uses.
2024  DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
2025  }
2026 
2027  // Make sure that the second operand is an i32 with the right value.
2028  if (C.Op1.getValueType() != MVT::i32 ||
2029  Value != ConstOp1->getZExtValue())
2030  C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
2031 }
2032 
2033 // Return true if Op is either an unextended load, or a load suitable
2034 // for integer register-memory comparisons of type ICmpType.
2035 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
2036  auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
2037  if (Load) {
2038  // There are no instructions to compare a register with a memory byte.
2039  if (Load->getMemoryVT() == MVT::i8)
2040  return false;
2041  // Otherwise decide on extension type.
2042  switch (Load->getExtensionType()) {
2043  case ISD::NON_EXTLOAD:
2044  return true;
2045  case ISD::SEXTLOAD:
2046  return ICmpType != SystemZICMP::UnsignedOnly;
2047  case ISD::ZEXTLOAD:
2048  return ICmpType != SystemZICMP::SignedOnly;
2049  default:
2050  break;
2051  }
2052  }
2053  return false;
2054 }
2055 
2056 // Return true if it is better to swap the operands of C.
2057 static bool shouldSwapCmpOperands(const Comparison &C) {
2058  // Leave f128 comparisons alone, since they have no memory forms.
2059  if (C.Op0.getValueType() == MVT::f128)
2060  return false;
2061 
2062  // Always keep a floating-point constant second, since comparisons with
2063  // zero can use LOAD TEST and comparisons with other constants make a
2064  // natural memory operand.
2065  if (isa<ConstantFPSDNode>(C.Op1))
2066  return false;
2067 
2068  // Never swap comparisons with zero since there are many ways to optimize
2069  // those later.
2070  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2071  if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2072  return false;
2073 
2074  // Also keep natural memory operands second if the loaded value is
2075  // only used here. Several comparisons have memory forms.
2076  if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
2077  return false;
2078 
2079  // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2080  // In that case we generally prefer the memory to be second.
2081  if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
2082  // The only exceptions are when the second operand is a constant and
2083  // we can use things like CHHSI.
2084  if (!ConstOp1)
2085  return true;
2086  // The unsigned memory-immediate instructions can handle 16-bit
2087  // unsigned integers.
2088  if (C.ICmpType != SystemZICMP::SignedOnly &&
2089  isUInt<16>(ConstOp1->getZExtValue()))
2090  return false;
2091  // The signed memory-immediate instructions can handle 16-bit
2092  // signed integers.
2093  if (C.ICmpType != SystemZICMP::UnsignedOnly &&
2094  isInt<16>(ConstOp1->getSExtValue()))
2095  return false;
2096  return true;
2097  }
2098 
2099  // Try to promote the use of CGFR and CLGFR.
2100  unsigned Opcode0 = C.Op0.getOpcode();
2101  if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
2102  return true;
2103  if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
2104  return true;
2105  if (C.ICmpType != SystemZICMP::SignedOnly &&
2106  Opcode0 == ISD::AND &&
2107  C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
2108  cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2109  return true;
2110 
2111  return false;
2112 }
2113 
2114 // Return a version of comparison CC mask CCMask in which the LT and GT
2115 // actions are swapped.
2116 static unsigned reverseCCMask(unsigned CCMask) {
2117  return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2119  (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
2120  (CCMask & SystemZ::CCMASK_CMP_UO));
2121 }
2122 
2123 // Check whether C tests for equality between X and Y and whether X - Y
2124 // or Y - X is also computed. In that case it's better to compare the
2125 // result of the subtraction against zero.
2126 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
2127  Comparison &C) {
2128  if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2129  C.CCMask == SystemZ::CCMASK_CMP_NE) {
2130  for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2131  SDNode *N = *I;
2132  if (N->getOpcode() == ISD::SUB &&
2133  ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
2134  (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
2135  C.Op0 = SDValue(N, 0);
2136  C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
2137  return;
2138  }
2139  }
2140  }
2141 }
2142 
2143 // Check whether C compares a floating-point value with zero and if that
2144 // floating-point value is also negated. In this case we can use the
2145 // negation to set CC, so avoiding separate LOAD AND TEST and
2146 // LOAD (NEGATIVE/COMPLEMENT) instructions.
2147 static void adjustForFNeg(Comparison &C) {
2148  auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
2149  if (C1 && C1->isZero()) {
2150  for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2151  SDNode *N = *I;
2152  if (N->getOpcode() == ISD::FNEG) {
2153  C.Op0 = SDValue(N, 0);
2154  C.CCMask = reverseCCMask(C.CCMask);
2155  return;
2156  }
2157  }
2158  }
2159 }
2160 
2161 // Check whether C compares (shl X, 32) with 0 and whether X is
2162 // also sign-extended. In that case it is better to test the result
2163 // of the sign extension using LTGFR.
2164 //
2165 // This case is important because InstCombine transforms a comparison
2166 // with (sext (trunc X)) into a comparison with (shl X, 32).
2167 static void adjustForLTGFR(Comparison &C) {
2168  // Check for a comparison between (shl X, 32) and 0.
2169  if (C.Op0.getOpcode() == ISD::SHL &&
2170  C.Op0.getValueType() == MVT::i64 &&
2171  C.Op1.getOpcode() == ISD::Constant &&
2172  cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2173  auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2174  if (C1 && C1->getZExtValue() == 32) {
2175  SDValue ShlOp0 = C.Op0.getOperand(0);
2176  // See whether X has any SIGN_EXTEND_INREG uses.
2177  for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
2178  SDNode *N = *I;
2179  if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
2180  cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
2181  C.Op0 = SDValue(N, 0);
2182  return;
2183  }
2184  }
2185  }
2186  }
2187 }
2188 
2189 // If C compares the truncation of an extending load, try to compare
2190 // the untruncated value instead. This exposes more opportunities to
2191 // reuse CC.
2192 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
2193  Comparison &C) {
2194  if (C.Op0.getOpcode() == ISD::TRUNCATE &&
2195  C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
2196  C.Op1.getOpcode() == ISD::Constant &&
2197  cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2198  auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
2199  if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
2200  unsigned Type = L->getExtensionType();
2201  if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
2202  (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
2203  C.Op0 = C.Op0.getOperand(0);
2204  C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
2205  }
2206  }
2207  }
2208 }
2209 
2210 // Return true if shift operation N has an in-range constant shift value.
2211 // Store it in ShiftVal if so.
2212 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
2213  auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
2214  if (!Shift)
2215  return false;
2216 
2217  uint64_t Amount = Shift->getZExtValue();
2218  if (Amount >= N.getValueSizeInBits())
2219  return false;
2220 
2221  ShiftVal = Amount;
2222  return true;
2223 }
2224 
2225 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
2226 // instruction and whether the CC value is descriptive enough to handle
2227 // a comparison of type Opcode between the AND result and CmpVal.
2228 // CCMask says which comparison result is being tested and BitSize is
2229 // the number of bits in the operands. If TEST UNDER MASK can be used,
2230 // return the corresponding CC mask, otherwise return 0.
2231 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
2232  uint64_t Mask, uint64_t CmpVal,
2233  unsigned ICmpType) {
2234  assert(Mask != 0 && "ANDs with zero should have been removed by now");
2235 
2236  // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2237  if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
2238  !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
2239  return 0;
2240 
2241  // Work out the masks for the lowest and highest bits.
2242  unsigned HighShift = 63 - countLeadingZeros(Mask);
2243  uint64_t High = uint64_t(1) << HighShift;
2244  uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
2245 
2246  // Signed ordered comparisons are effectively unsigned if the sign
2247  // bit is dropped.
2248  bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
2249 
2250  // Check for equality comparisons with 0, or the equivalent.
2251  if (CmpVal == 0) {
2252  if (CCMask == SystemZ::CCMASK_CMP_EQ)
2253  return SystemZ::CCMASK_TM_ALL_0;
2254  if (CCMask == SystemZ::CCMASK_CMP_NE)
2256  }
2257  if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2258  if (CCMask == SystemZ::CCMASK_CMP_LT)
2259  return SystemZ::CCMASK_TM_ALL_0;
2260  if (CCMask == SystemZ::CCMASK_CMP_GE)
2262  }
2263  if (EffectivelyUnsigned && CmpVal < Low) {
2264  if (CCMask == SystemZ::CCMASK_CMP_LE)
2265  return SystemZ::CCMASK_TM_ALL_0;
2266  if (CCMask == SystemZ::CCMASK_CMP_GT)
2268  }
2269 
2270  // Check for equality comparisons with the mask, or the equivalent.
2271  if (CmpVal == Mask) {
2272  if (CCMask == SystemZ::CCMASK_CMP_EQ)
2273  return SystemZ::CCMASK_TM_ALL_1;
2274  if (CCMask == SystemZ::CCMASK_CMP_NE)
2276  }
2277  if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2278  if (CCMask == SystemZ::CCMASK_CMP_GT)
2279  return SystemZ::CCMASK_TM_ALL_1;
2280  if (CCMask == SystemZ::CCMASK_CMP_LE)
2282  }
2283  if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2284  if (CCMask == SystemZ::CCMASK_CMP_GE)
2285  return SystemZ::CCMASK_TM_ALL_1;
2286  if (CCMask == SystemZ::CCMASK_CMP_LT)
2288  }
2289 
2290  // Check for ordered comparisons with the top bit.
2291  if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2292  if (CCMask == SystemZ::CCMASK_CMP_LE)
2293  return SystemZ::CCMASK_TM_MSB_0;
2294  if (CCMask == SystemZ::CCMASK_CMP_GT)
2295  return SystemZ::CCMASK_TM_MSB_1;
2296  }
2297  if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2298  if (CCMask == SystemZ::CCMASK_CMP_LT)
2299  return SystemZ::CCMASK_TM_MSB_0;
2300  if (CCMask == SystemZ::CCMASK_CMP_GE)
2301  return SystemZ::CCMASK_TM_MSB_1;
2302  }
2303 
2304  // If there are just two bits, we can do equality checks for Low and High
2305  // as well.
2306  if (Mask == Low + High) {
2307  if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
2309  if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
2311  if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
2313  if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
2315  }
2316 
2317  // Looks like we've exhausted our options.
2318  return 0;
2319 }
2320 
2321 // See whether C can be implemented as a TEST UNDER MASK instruction.
2322 // Update the arguments with the TM version if so.
2323 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2324  Comparison &C) {
2325  // Check that we have a comparison with a constant.
2326  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2327  if (!ConstOp1)
2328  return;
2329  uint64_t CmpVal = ConstOp1->getZExtValue();
2330 
2331  // Check whether the nonconstant input is an AND with a constant mask.
2332  Comparison NewC(C);
2333  uint64_t MaskVal;
2334  ConstantSDNode *Mask = nullptr;
2335  if (C.Op0.getOpcode() == ISD::AND) {
2336  NewC.Op0 = C.Op0.getOperand(0);
2337  NewC.Op1 = C.Op0.getOperand(1);
2338  Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2339  if (!Mask)
2340  return;
2341  MaskVal = Mask->getZExtValue();
2342  } else {
2343  // There is no instruction to compare with a 64-bit immediate
2344  // so use TMHH instead if possible. We need an unsigned ordered
2345  // comparison with an i64 immediate.
2346  if (NewC.Op0.getValueType() != MVT::i64 ||
2347  NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2348  NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2349  NewC.ICmpType == SystemZICMP::SignedOnly)
2350  return;
2351  // Convert LE and GT comparisons into LT and GE.
2352  if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2353  NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2354  if (CmpVal == uint64_t(-1))
2355  return;
2356  CmpVal += 1;
2357  NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2358  }
2359  // If the low N bits of Op1 are zero than the low N bits of Op0 can
2360  // be masked off without changing the result.
2361  MaskVal = -(CmpVal & -CmpVal);
2362  NewC.ICmpType = SystemZICMP::UnsignedOnly;
2363  }
2364  if (!MaskVal)
2365  return;
2366 
2367  // Check whether the combination of mask, comparison value and comparison
2368  // type are suitable.
2369  unsigned BitSize = NewC.Op0.getValueSizeInBits();
2370  unsigned NewCCMask, ShiftVal;
2371  if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2372  NewC.Op0.getOpcode() == ISD::SHL &&
2373  isSimpleShift(NewC.Op0, ShiftVal) &&
2374  (MaskVal >> ShiftVal != 0) &&
2375  ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2376  (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2377  MaskVal >> ShiftVal,
2378  CmpVal >> ShiftVal,
2379  SystemZICMP::Any))) {
2380  NewC.Op0 = NewC.Op0.getOperand(0);
2381  MaskVal >>= ShiftVal;
2382  } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2383  NewC.Op0.getOpcode() == ISD::SRL &&
2384  isSimpleShift(NewC.Op0, ShiftVal) &&
2385  (MaskVal << ShiftVal != 0) &&
2386  ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2387  (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2388  MaskVal << ShiftVal,
2389  CmpVal << ShiftVal,
2391  NewC.Op0 = NewC.Op0.getOperand(0);
2392  MaskVal <<= ShiftVal;
2393  } else {
2394  NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2395  NewC.ICmpType);
2396  if (!NewCCMask)
2397  return;
2398  }
2399 
2400  // Go ahead and make the change.
2401  C.Opcode = SystemZISD::TM;
2402  C.Op0 = NewC.Op0;
2403  if (Mask && Mask->getZExtValue() == MaskVal)
2404  C.Op1 = SDValue(Mask, 0);
2405  else
2406  C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2407  C.CCValid = SystemZ::CCMASK_TM;
2408  C.CCMask = NewCCMask;
2409 }
2410 
2411 // See whether the comparison argument contains a redundant AND
2412 // and remove it if so. This sometimes happens due to the generic
2413 // BRCOND expansion.
2414 static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
2415  Comparison &C) {
2416  if (C.Op0.getOpcode() != ISD::AND)
2417  return;
2418  auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2419  if (!Mask)
2420  return;
2421  KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
2422  if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
2423  return;
2424 
2425  C.Op0 = C.Op0.getOperand(0);
2426 }
2427 
2428 // Return a Comparison that tests the condition-code result of intrinsic
2429 // node Call against constant integer CC using comparison code Cond.
2430 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2431 // and CCValid is the set of possible condition-code results.
2432 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2433  SDValue Call, unsigned CCValid, uint64_t CC,
2434  ISD::CondCode Cond) {
2435  Comparison C(Call, SDValue());
2436  C.Opcode = Opcode;
2437  C.CCValid = CCValid;
2438  if (Cond == ISD::SETEQ)
2439  // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2440  C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2441  else if (Cond == ISD::SETNE)
2442  // ...and the inverse of that.
2443  C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2444  else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2445  // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2446  // always true for CC>3.
2447  C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2448  else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2449  // ...and the inverse of that.
2450  C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2451  else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2452  // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2453  // always true for CC>3.
2454  C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2455  else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2456  // ...and the inverse of that.
2457  C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2458  else
2459  llvm_unreachable("Unexpected integer comparison type");
2460  C.CCMask &= CCValid;
2461  return C;
2462 }
2463 
2464 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2465 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2466  ISD::CondCode Cond, const SDLoc &DL) {
2467  if (CmpOp1.getOpcode() == ISD::Constant) {
2468  uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2469  unsigned Opcode, CCValid;
2470  if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2471  CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2472  isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2473  return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2474  if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2475  CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2476  isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2477  return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2478  }
2479  Comparison C(CmpOp0, CmpOp1);
2480  C.CCMask = CCMaskForCondCode(Cond);
2481  if (C.Op0.getValueType().isFloatingPoint()) {
2482  C.CCValid = SystemZ::CCMASK_FCMP;
2483  C.Opcode = SystemZISD::FCMP;
2484  adjustForFNeg(C);
2485  } else {
2486  C.CCValid = SystemZ::CCMASK_ICMP;
2487  C.Opcode = SystemZISD::ICMP;
2488  // Choose the type of comparison. Equality and inequality tests can
2489  // use either signed or unsigned comparisons. The choice also doesn't
2490  // matter if both sign bits are known to be clear. In those cases we
2491  // want to give the main isel code the freedom to choose whichever
2492  // form fits best.
2493  if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2494  C.CCMask == SystemZ::CCMASK_CMP_NE ||
2495  (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2496  C.ICmpType = SystemZICMP::Any;
2497  else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2498  C.ICmpType = SystemZICMP::UnsignedOnly;
2499  else
2500  C.ICmpType = SystemZICMP::SignedOnly;
2501  C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2502  adjustForRedundantAnd(DAG, DL, C);
2503  adjustZeroCmp(DAG, DL, C);
2504  adjustSubwordCmp(DAG, DL, C);
2505  adjustForSubtraction(DAG, DL, C);
2506  adjustForLTGFR(C);
2507  adjustICmpTruncate(DAG, DL, C);
2508  }
2509 
2510  if (shouldSwapCmpOperands(C)) {
2511  std::swap(C.Op0, C.Op1);
2512  C.CCMask = reverseCCMask(C.CCMask);
2513  }
2514 
2515  adjustForTestUnderMask(DAG, DL, C);
2516  return C;
2517 }
2518 
2519 // Emit the comparison instruction described by C.
2520 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2521  if (!C.Op1.getNode()) {
2522  SDNode *Node;
2523  switch (C.Op0.getOpcode()) {
2525  Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
2526  return SDValue(Node, 0);
2528  Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
2529  return SDValue(Node, Node->getNumValues() - 1);
2530  default:
2531  llvm_unreachable("Invalid comparison operands");
2532  }
2533  }
2534  if (C.Opcode == SystemZISD::ICMP)
2535  return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
2536  DAG.getConstant(C.ICmpType, DL, MVT::i32));
2537  if (C.Opcode == SystemZISD::TM) {
2538  bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2539  bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2540  return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
2541  DAG.getConstant(RegisterOnly, DL, MVT::i32));
2542  }
2543  return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
2544 }
2545 
2546 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2547 // 64 bits. Extend is the extension type to use. Store the high part
2548 // in Hi and the low part in Lo.
2549 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2550  SDValue Op0, SDValue Op1, SDValue &Hi,
2551  SDValue &Lo) {
2552  Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2553  Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2554  SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2555  Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2556  DAG.getConstant(32, DL, MVT::i64));
2557  Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2558  Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2559 }
2560 
2561 // Lower a binary operation that produces two VT results, one in each
2562 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2563 // and Opcode performs the GR128 operation. Store the even register result
2564 // in Even and the odd register result in Odd.
2565 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2566  unsigned Opcode, SDValue Op0, SDValue Op1,
2567  SDValue &Even, SDValue &Odd) {
2568  SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2569  bool Is32Bit = is32Bit(VT);
2570  Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2571  Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2572 }
2573 
2574 // Return an i32 value that is 1 if the CC value produced by CCReg is
2575 // in the mask CCMask and 0 otherwise. CC is known to have a value
2576 // in CCValid, so other values can be ignored.
2577 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
2578  unsigned CCValid, unsigned CCMask) {
2579  SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32),
2580  DAG.getConstant(0, DL, MVT::i32),
2581  DAG.getConstant(CCValid, DL, MVT::i32),
2582  DAG.getConstant(CCMask, DL, MVT::i32), CCReg };
2583  return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
2584 }
2585 
2586 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2587 // be done directly. IsFP is true if CC is for a floating-point rather than
2588 // integer comparison.
2589 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
2590  switch (CC) {
2591  case ISD::SETOEQ:
2592  case ISD::SETEQ:
2593  return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
2594 
2595  case ISD::SETOGE:
2596  case ISD::SETGE:
2597  return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
2598 
2599  case ISD::SETOGT:
2600  case ISD::SETGT:
2601  return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
2602 
2603  case ISD::SETUGT:
2604  return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
2605 
2606  default:
2607  return 0;
2608  }
2609 }
2610 
2611 // Return the SystemZISD vector comparison operation for CC or its inverse,
2612 // or 0 if neither can be done directly. Indicate in Invert whether the
2613 // result is for the inverse of CC. IsFP is true if CC is for a
2614 // floating-point rather than integer comparison.
2615 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
2616  bool &Invert) {
2617  if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2618  Invert = false;
2619  return Opcode;
2620  }
2621 
2622  CC = ISD::getSetCCInverse(CC, !IsFP);
2623  if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2624  Invert = true;
2625  return Opcode;
2626  }
2627 
2628  return 0;
2629 }
2630 
2631 // Return a v2f64 that contains the extended form of elements Start and Start+1
2632 // of v4f32 value Op.
2633 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2634  SDValue Op) {
2635  int Mask[] = { Start, -1, Start + 1, -1 };
2636  Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2637  return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2638 }
2639 
2640 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2641 // producing a result of type VT.
2642 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
2643  const SDLoc &DL, EVT VT,
2644  SDValue CmpOp0,
2645  SDValue CmpOp1) const {
2646  // There is no hardware support for v4f32 (unless we have the vector
2647  // enhancements facility 1), so extend the vector into two v2f64s
2648  // and compare those.
2649  if (CmpOp0.getValueType() == MVT::v4f32 &&
2650  !Subtarget.hasVectorEnhancements1()) {
2651  SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
2652  SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
2653  SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
2654  SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
2655  SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2656  SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2657  return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2658  }
2659  return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2660 }
2661 
2662 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2663 // an integer mask of type VT.
2664 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
2665  const SDLoc &DL, EVT VT,
2666  ISD::CondCode CC,
2667  SDValue CmpOp0,
2668  SDValue CmpOp1) const {
2669  bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2670  bool Invert = false;
2671  SDValue Cmp;
2672  switch (CC) {
2673  // Handle tests for order using (or (ogt y x) (oge x y)).
2674  case ISD::SETUO:
2675  Invert = true;
2677  case ISD::SETO: {
2678  assert(IsFP && "Unexpected integer comparison");
2679  SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2680  SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
2681  Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2682  break;
2683  }
2684 
2685  // Handle <> tests using (or (ogt y x) (ogt x y)).
2686  case ISD::SETUEQ:
2687  Invert = true;
2689  case ISD::SETONE: {
2690  assert(IsFP && "Unexpected integer comparison");
2691  SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2692  SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
2693  Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2694  break;
2695  }
2696 
2697  // Otherwise a single comparison is enough. It doesn't really
2698  // matter whether we try the inversion or the swap first, since
2699  // there are no cases where both work.
2700  default:
2701  if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2702  Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2703  else {
2705  if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2706  Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2707  else
2708  llvm_unreachable("Unhandled comparison");
2709  }
2710  break;
2711  }
2712  if (Invert) {
2713  SDValue Mask =
2714  DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64));
2715  Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2716  }
2717  return Cmp;
2718 }
2719 
2720 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2721  SelectionDAG &DAG) const {
2722  SDValue CmpOp0 = Op.getOperand(0);
2723  SDValue CmpOp1 = Op.getOperand(1);
2724  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2725  SDLoc DL(Op);
2726  EVT VT = Op.getValueType();
2727  if (VT.isVector())
2728  return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2729 
2730  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2731  SDValue CCReg = emitCmp(DAG, DL, C);
2732  return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2733 }
2734 
2735 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2736  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2737  SDValue CmpOp0 = Op.getOperand(2);
2738  SDValue CmpOp1 = Op.getOperand(3);
2739  SDValue Dest = Op.getOperand(4);
2740  SDLoc DL(Op);
2741 
2742  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2743  SDValue CCReg = emitCmp(DAG, DL, C);
2744  return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
2745  Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
2746  DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
2747 }
2748 
2749 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2750 // allowing Pos and Neg to be wider than CmpOp.
2751 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2752  return (Neg.getOpcode() == ISD::SUB &&
2753  Neg.getOperand(0).getOpcode() == ISD::Constant &&
2754  cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2755  Neg.getOperand(1) == Pos &&
2756  (Pos == CmpOp ||
2757  (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2758  Pos.getOperand(0) == CmpOp)));
2759 }
2760 
2761 // Return the absolute or negative absolute of Op; IsNegative decides which.
2762 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2763  bool IsNegative) {
2764  Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2765  if (IsNegative)
2766  Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2767  DAG.getConstant(0, DL, Op.getValueType()), Op);
2768  return Op;
2769 }
2770 
2771 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2772  SelectionDAG &DAG) const {
2773  SDValue CmpOp0 = Op.getOperand(0);
2774  SDValue CmpOp1 = Op.getOperand(1);
2775  SDValue TrueOp = Op.getOperand(2);
2776  SDValue FalseOp = Op.getOperand(3);
2777  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2778  SDLoc DL(Op);
2779 
2780  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2781 
2782  // Check for absolute and negative-absolute selections, including those
2783  // where the comparison value is sign-extended (for LPGFR and LNGFR).
2784  // This check supplements the one in DAGCombiner.
2785  if (C.Opcode == SystemZISD::ICMP &&
2786  C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2787  C.CCMask != SystemZ::CCMASK_CMP_NE &&
2788  C.Op1.getOpcode() == ISD::Constant &&
2789  cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2790  if (isAbsolute(C.Op0, TrueOp, FalseOp))
2791  return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2792  if (isAbsolute(C.Op0, FalseOp, TrueOp))
2793  return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2794  }
2795 
2796  SDValue CCReg = emitCmp(DAG, DL, C);
2797  SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
2798  DAG.getConstant(C.CCMask, DL, MVT::i32), CCReg};
2799 
2800  return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
2801 }
2802 
2803 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2804  SelectionDAG &DAG) const {
2805  SDLoc DL(Node);
2806  const GlobalValue *GV = Node->getGlobal();
2807  int64_t Offset = Node->getOffset();
2808  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2810 
2811  SDValue Result;
2812  if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2813  // Assign anchors at 1<<12 byte boundaries.
2814  uint64_t Anchor = Offset & ~uint64_t(0xfff);
2815  Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2816  Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2817 
2818  // The offset can be folded into the address if it is aligned to a halfword.
2819  Offset -= Anchor;
2820  if (Offset != 0 && (Offset & 1) == 0) {
2821  SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2822  Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2823  Offset = 0;
2824  }
2825  } else {
2826  Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2827  Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2828  Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2830  }
2831 
2832  // If there was a non-zero offset that we didn't fold, create an explicit
2833  // addition for it.
2834  if (Offset != 0)
2835  Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2836  DAG.getConstant(Offset, DL, PtrVT));
2837 
2838  return Result;
2839 }
2840 
2841 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
2842  SelectionDAG &DAG,
2843  unsigned Opcode,
2844  SDValue GOTOffset) const {
2845  SDLoc DL(Node);
2846  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2847  SDValue Chain = DAG.getEntryNode();
2848  SDValue Glue;
2849 
2850  // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2851  SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2852  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2853  Glue = Chain.getValue(1);
2854  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2855  Glue = Chain.getValue(1);
2856 
2857  // The first call operand is the chain and the second is the TLS symbol.
2859  Ops.push_back(Chain);
2860  Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
2861  Node->getValueType(0),
2862  0, 0));
2863 
2864  // Add argument registers to the end of the list so that they are
2865  // known live into the call.
2866  Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
2867  Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
2868 
2869  // Add a register mask operand representing the call-preserved registers.
2870  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2871  const uint32_t *Mask =
2873  assert(Mask && "Missing call preserved mask for calling convention");
2874  Ops.push_back(DAG.getRegisterMask(Mask));
2875 
2876  // Glue the call to the argument copies.
2877  Ops.push_back(Glue);
2878 
2879  // Emit the call.
2880  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2881  Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
2882  Glue = Chain.getValue(1);
2883 
2884  // Copy the return value from %r2.
2885  return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2886 }
2887 
2888 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
2889  SelectionDAG &DAG) const {
2890  SDValue Chain = DAG.getEntryNode();
2891  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2892 
2893  // The high part of the thread pointer is in access register 0.
2894  SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
2895  TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2896 
2897  // The low part of the thread pointer is in access register 1.
2898  SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
2899  TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2900 
2901  // Merge them into a single 64-bit address.
2902  SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2903  DAG.getConstant(32, DL, PtrVT));
2904  return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2905 }
2906 
2907 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2908  SelectionDAG &DAG) const {
2909  if (DAG.getTarget().useEmulatedTLS())
2910  return LowerToTLSEmulatedModel(Node, DAG);
2911  SDLoc DL(Node);
2912  const GlobalValue *GV = Node->getGlobal();
2913  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2914  TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2915 
2916  SDValue TP = lowerThreadPointer(DL, DAG);
2917 
2918  // Get the offset of GA from the thread pointer, based on the TLS model.
2919  SDValue Offset;
2920  switch (model) {
2921  case TLSModel::GeneralDynamic: {
2922  // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2925 
2926  Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2927  Offset = DAG.getLoad(
2928  PtrVT, DL, DAG.getEntryNode(), Offset,
2930 
2931  // Call __tls_get_offset to retrieve the offset.
2932  Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2933  break;
2934  }
2935 
2936  case TLSModel::LocalDynamic: {
2937  // Load the GOT offset of the module ID.
2940 
2941  Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2942  Offset = DAG.getLoad(
2943  PtrVT, DL, DAG.getEntryNode(), Offset,
2945 
2946  // Call __tls_get_offset to retrieve the module base offset.
2947  Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2948 
2949  // Note: The SystemZLDCleanupPass will remove redundant computations
2950  // of the module base offset. Count total number of local-dynamic
2951  // accesses to trigger execution of that pass.
2955 
2956  // Add the per-symbol offset.
2958 
2959  SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2960  DTPOffset = DAG.getLoad(
2961  PtrVT, DL, DAG.getEntryNode(), DTPOffset,
2963 
2964  Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
2965  break;
2966  }
2967 
2968  case TLSModel::InitialExec: {
2969  // Load the offset from the GOT.
2970  Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2972  Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
2973  Offset =
2974  DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
2976  break;
2977  }
2978 
2979  case TLSModel::LocalExec: {
2980  // Force the offset into the constant pool and load it from there.
2983 
2984  Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2985  Offset = DAG.getLoad(
2986  PtrVT, DL, DAG.getEntryNode(), Offset,
2988  break;
2989  }
2990  }
2991 
2992  // Add the base and offset together.
2993  return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
2994 }
2995 
2996 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
2997  SelectionDAG &DAG) const {
2998  SDLoc DL(Node);
2999  const BlockAddress *BA = Node->getBlockAddress();
3000  int64_t Offset = Node->getOffset();
3001  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3002 
3003  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
3004  Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3005  return Result;
3006 }
3007 
3008 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
3009  SelectionDAG &DAG) const {
3010  SDLoc DL(JT);
3011  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3012  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3013 
3014  // Use LARL to load the address of the table.
3015  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3016 }
3017 
3018 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
3019  SelectionDAG &DAG) const {
3020  SDLoc DL(CP);
3021  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3022 
3023  SDValue Result;
3024  if (CP->isMachineConstantPoolEntry())
3025  Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
3026  CP->getAlignment());
3027  else
3028  Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
3029  CP->getAlignment(), CP->getOffset());
3030 
3031  // Use LARL to load the address of the constant pool entry.
3032  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3033 }
3034 
3035 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
3036  SelectionDAG &DAG) const {
3037  MachineFunction &MF = DAG.getMachineFunction();
3038  MachineFrameInfo &MFI = MF.getFrameInfo();
3039  MFI.setFrameAddressIsTaken(true);
3040 
3041  SDLoc DL(Op);
3042  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3043  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3044 
3045  // If the back chain frame index has not been allocated yet, do so.
3047  int BackChainIdx = FI->getFramePointerSaveIndex();
3048  if (!BackChainIdx) {
3049  // By definition, the frame address is the address of the back chain.
3050  BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
3051  FI->setFramePointerSaveIndex(BackChainIdx);
3052  }
3053  SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
3054 
3055  // FIXME The frontend should detect this case.
3056  if (Depth > 0) {
3057  report_fatal_error("Unsupported stack frame traversal count");
3058  }
3059 
3060  return BackChain;
3061 }
3062 
3063 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
3064  SelectionDAG &DAG) const {
3065  MachineFunction &MF = DAG.getMachineFunction();
3066  MachineFrameInfo &MFI = MF.getFrameInfo();
3067  MFI.setReturnAddressIsTaken(true);
3068 
3070  return SDValue();
3071 
3072  SDLoc DL(Op);
3073  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3074  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3075 
3076  // FIXME The frontend should detect this case.
3077  if (Depth > 0) {
3078  report_fatal_error("Unsupported stack frame traversal count");
3079  }
3080 
3081  // Return R14D, which has the return address. Mark it an implicit live-in.
3082  unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
3083  return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
3084 }
3085 
3086 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
3087  SelectionDAG &DAG) const {
3088  SDLoc DL(Op);
3089  SDValue In = Op.getOperand(0);
3090  EVT InVT = In.getValueType();
3091  EVT ResVT = Op.getValueType();
3092 
3093  // Convert loads directly. This is normally done by DAGCombiner,
3094  // but we need this case for bitcasts that are created during lowering
3095  // and which are then lowered themselves.
3096  if (auto *LoadN = dyn_cast<LoadSDNode>(In))
3097  if (ISD::isNormalLoad(LoadN)) {
3098  SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
3099  LoadN->getBasePtr(), LoadN->getMemOperand());
3100  // Update the chain uses.
3101  DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
3102  return NewLoad;
3103  }
3104 
3105  if (InVT == MVT::i32 && ResVT == MVT::f32) {
3106  SDValue In64;
3107  if (Subtarget.hasHighWord()) {
3108  SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
3109  MVT::i64);
3110  In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3111  MVT::i64, SDValue(U64, 0), In);
3112  } else {
3113  In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
3114  In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
3115  DAG.getConstant(32, DL, MVT::i64));
3116  }
3117  SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
3118  return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
3119  DL, MVT::f32, Out64);
3120  }
3121  if (InVT == MVT::f32 && ResVT == MVT::i32) {
3122  SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
3123  SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3124  MVT::f64, SDValue(U64, 0), In);
3125  SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
3126  if (Subtarget.hasHighWord())
3127  return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
3128  MVT::i32, Out64);
3129  SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
3130  DAG.getConstant(32, DL, MVT::i64));
3131  return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
3132  }
3133  llvm_unreachable("Unexpected bitcast combination");
3134 }
3135 
3136 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
3137  SelectionDAG &DAG) const {
3138  MachineFunction &MF = DAG.getMachineFunction();
3139  SystemZMachineFunctionInfo *FuncInfo =
3141  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3142 
3143  SDValue Chain = Op.getOperand(0);
3144  SDValue Addr = Op.getOperand(1);
3145  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3146  SDLoc DL(Op);
3147 
3148  // The initial values of each field.
3149  const unsigned NumFields = 4;
3150  SDValue Fields[NumFields] = {
3151  DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
3152  DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
3153  DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
3154  DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
3155  };
3156 
3157  // Store each field into its respective slot.
3158  SDValue MemOps[NumFields];
3159  unsigned Offset = 0;
3160  for (unsigned I = 0; I < NumFields; ++I) {
3161  SDValue FieldAddr = Addr;
3162  if (Offset != 0)
3163  FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
3164  DAG.getIntPtrConstant(Offset, DL));
3165  MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
3166  MachinePointerInfo(SV, Offset));
3167  Offset += 8;
3168  }
3169  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3170 }
3171 
3172 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
3173  SelectionDAG &DAG) const {
3174  SDValue Chain = Op.getOperand(0);
3175  SDValue DstPtr = Op.getOperand(1);
3176  SDValue SrcPtr = Op.getOperand(2);
3177  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3178  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3179  SDLoc DL(Op);
3180 
3181  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
3182  /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
3183  /*isTailCall*/false,
3184  MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
3185 }
3186 
3187 SDValue SystemZTargetLowering::
3188 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
3189  const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
3190  MachineFunction &MF = DAG.getMachineFunction();
3191  bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
3192  bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3193 
3194  SDValue Chain = Op.getOperand(0);
3195  SDValue Size = Op.getOperand(1);
3196  SDValue Align = Op.getOperand(2);
3197  SDLoc DL(Op);
3198 
3199  // If user has set the no alignment function attribute, ignore
3200  // alloca alignments.
3201  uint64_t AlignVal = (RealignOpt ?
3202  dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
3203 
3204  uint64_t StackAlign = TFI->getStackAlignment();
3205  uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3206  uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3207 
3208  unsigned SPReg = getStackPointerRegisterToSaveRestore();
3209  SDValue NeededSpace = Size;
3210 
3211  // Get a reference to the stack pointer.
3212  SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
3213 
3214  // If we need a backchain, save it now.
3215  SDValue Backchain;
3216  if (StoreBackchain)
3217  Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3218 
3219  // Add extra space for alignment if needed.
3220  if (ExtraAlignSpace)
3221  NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
3222  DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3223 
3224  // Get the new stack pointer value.
3225  SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
3226 
3227  // Copy the new stack pointer back.
3228  Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
3229 
3230  // The allocated data lives above the 160 bytes allocated for the standard
3231  // frame, plus any outgoing stack arguments. We don't know how much that
3232  // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3233  SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3234  SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
3235 
3236  // Dynamically realign if needed.
3237  if (RequiredAlign > StackAlign) {
3238  Result =
3239  DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
3240  DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3241  Result =
3242  DAG.getNode(ISD::AND, DL, MVT::i64, Result,
3243  DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
3244  }
3245 
3246  if (StoreBackchain)
3247  Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3248 
3249  SDValue Ops[2] = { Result, Chain };
3250  return DAG.getMergeValues(Ops, DL);
3251 }
3252 
3253 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3254  SDValue Op, SelectionDAG &DAG) const {
3255  SDLoc DL(Op);
3256 
3257  return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3258 }
3259 
3260 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
3261  SelectionDAG &DAG) const {
3262  EVT VT = Op.getValueType();
3263  SDLoc DL(Op);
3264  SDValue Ops[2];
3265  if (is32Bit(VT))
3266  // Just do a normal 64-bit multiplication and extract the results.
3267  // We define this so that it can be used for constant division.
3269  Op.getOperand(1), Ops[1], Ops[0]);
3270  else if (Subtarget.hasMiscellaneousExtensions2())
3271  // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3272  // the high result in the even register. ISD::SMUL_LOHI is defined to
3273  // return the low half first, so the results are in reverse order.
3275  Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3276  else {
3277  // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3278  //
3279  // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3280  //
3281  // but using the fact that the upper halves are either all zeros
3282  // or all ones:
3283  //
3284  // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3285  //
3286  // and grouping the right terms together since they are quicker than the
3287  // multiplication:
3288  //
3289  // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3290  SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
3291  SDValue LL = Op.getOperand(0);
3292  SDValue RL = Op.getOperand(1);
3293  SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
3294  SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
3295  // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3296  // the high result in the even register. ISD::SMUL_LOHI is defined to
3297  // return the low half first, so the results are in reverse order.
3299  LL, RL, Ops[1], Ops[0]);
3300  SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
3301  SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
3302  SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
3303  Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
3304  }
3305  return DAG.getMergeValues(Ops, DL);
3306 }
3307 
3308 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
3309  SelectionDAG &DAG) const {
3310  EVT VT = Op.getValueType();
3311  SDLoc DL(Op);
3312  SDValue Ops[2];
3313  if (is32Bit(VT))
3314  // Just do a normal 64-bit multiplication and extract the results.
3315  // We define this so that it can be used for constant division.
3317  Op.getOperand(1), Ops[1], Ops[0]);
3318  else
3319  // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3320  // the high result in the even register. ISD::UMUL_LOHI is defined to
3321  // return the low half first, so the results are in reverse order.
3323  Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3324  return DAG.getMergeValues(Ops, DL);
3325 }
3326 
3327 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3328  SelectionDAG &DAG) const {
3329  SDValue Op0 = Op.getOperand(0);
3330  SDValue Op1 = Op.getOperand(1);
3331  EVT VT = Op.getValueType();
3332  SDLoc DL(Op);
3333 
3334  // We use DSGF for 32-bit division. This means the first operand must
3335  // always be 64-bit, and the second operand should be 32-bit whenever
3336  // that is possible, to improve performance.
3337  if (is32Bit(VT))
3338  Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3339  else if (DAG.ComputeNumSignBits(Op1) > 32)
3340  Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3341 
3342  // DSG(F) returns the remainder in the even register and the
3343  // quotient in the odd register.
3344  SDValue Ops[2];
3345  lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3346  return DAG.getMergeValues(Ops, DL);
3347 }
3348 
3349 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3350  SelectionDAG &DAG) const {
3351  EVT VT = Op.getValueType();
3352  SDLoc DL(Op);
3353 
3354  // DL(G) returns the remainder in the even register and the
3355  // quotient in the odd register.
3356  SDValue Ops[2];
3358  Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3359  return DAG.getMergeValues(Ops, DL);
3360 }
3361 
3362 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3363  assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
3364 
3365  // Get the known-zero masks for each operand.
3366  SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
3367  KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
3368  DAG.computeKnownBits(Ops[1])};
3369 
3370  // See if the upper 32 bits of one operand and the lower 32 bits of the
3371  // other are known zero. They are the low and high operands respectively.
3372  uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3373  Known[1].Zero.getZExtValue() };
3374  unsigned High, Low;
3375  if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3376  High = 1, Low = 0;
3377  else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3378  High = 0, Low = 1;
3379  else
3380  return Op;
3381 
3382  SDValue LowOp = Ops[Low];
3383  SDValue HighOp = Ops[High];
3384 
3385  // If the high part is a constant, we're better off using IILH.
3386  if (HighOp.getOpcode() == ISD::Constant)
3387  return Op;
3388 
3389  // If the low part is a constant that is outside the range of LHI,
3390  // then we're better off using IILF.
3391  if (LowOp.getOpcode() == ISD::Constant) {
3392  int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3393  if (!isInt<16>(Value))
3394  return Op;
3395  }
3396 
3397  // Check whether the high part is an AND that doesn't change the
3398  // high 32 bits and just masks out low bits. We can skip it if so.
3399  if (HighOp.getOpcode() == ISD::AND &&
3400  HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3401  SDValue HighOp0 = HighOp.getOperand(0);
3402  uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3403  if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3404  HighOp = HighOp0;
3405  }
3406 
3407  // Take advantage of the fact that all GR32 operations only change the
3408  // low 32 bits by truncating Low to an i32 and inserting it directly
3409  // using a subreg. The interesting cases are those where the truncation
3410  // can be folded.
3411  SDLoc DL(Op);
3412  SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3413  return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3414  MVT::i64, HighOp, Low32);
3415 }
3416 
3417 // Lower SADDO/SSUBO/UADDO/USUBO nodes.
3418 SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
3419  SelectionDAG &DAG) const {
3420  SDNode *N = Op.getNode();
3421  SDValue LHS = N->getOperand(0);
3422  SDValue RHS = N->getOperand(1);
3423  SDLoc DL(N);
3424  unsigned BaseOp = 0;
3425  unsigned CCValid = 0;
3426  unsigned CCMask = 0;
3427 
3428  switch (Op.getOpcode()) {
3429  default: llvm_unreachable("Unknown instruction!");
3430  case ISD::SADDO:
3431  BaseOp = SystemZISD::SADDO;
3432  CCValid = SystemZ::CCMASK_ARITH;
3434  break;
3435  case ISD::SSUBO:
3436  BaseOp = SystemZISD::SSUBO;
3437  CCValid = SystemZ::CCMASK_ARITH;
3439  break;
3440  case ISD::UADDO:
3441  BaseOp = SystemZISD::UADDO;
3442  CCValid = SystemZ::CCMASK_LOGICAL;
3444  break;
3445  case ISD::USUBO:
3446  BaseOp = SystemZISD::USUBO;
3447  CCValid = SystemZ::CCMASK_LOGICAL;
3449  break;
3450  }
3451 
3452  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
3453  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
3454 
3455  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3456  if (N->getValueType(1) == MVT::i1)
3457  SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3458 
3459  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3460 }
3461 
3462 static bool isAddCarryChain(SDValue Carry) {
3463  while (Carry.getOpcode() == ISD::ADDCARRY)
3464  Carry = Carry.getOperand(2);
3465  return Carry.getOpcode() == ISD::UADDO;
3466 }
3467 
3468 static bool isSubBorrowChain(SDValue Carry) {
3469  while (Carry.getOpcode() == ISD::SUBCARRY)
3470  Carry = Carry.getOperand(2);
3471  return Carry.getOpcode() == ISD::USUBO;
3472 }
3473 
3474 // Lower ADDCARRY/SUBCARRY nodes.
3475 SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
3476  SelectionDAG &DAG) const {
3477 
3478  SDNode *N = Op.getNode();
3479  MVT VT = N->getSimpleValueType(0);
3480 
3481  // Let legalize expand this if it isn't a legal type yet.
3482  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3483  return SDValue();
3484 
3485  SDValue LHS = N->getOperand(0);
3486  SDValue RHS = N->getOperand(1);
3487  SDValue Carry = Op.getOperand(2);
3488  SDLoc DL(N);
3489  unsigned BaseOp = 0;
3490  unsigned CCValid = 0;
3491  unsigned CCMask = 0;
3492 
3493  switch (Op.getOpcode()) {
3494  default: llvm_unreachable("Unknown instruction!");
3495  case ISD::ADDCARRY:
3496  if (!isAddCarryChain(Carry))
3497  return SDValue();
3498 
3499  BaseOp = SystemZISD::ADDCARRY;
3500  CCValid = SystemZ::CCMASK_LOGICAL;
3502  break;
3503  case ISD::SUBCARRY:
3504  if (!isSubBorrowChain(Carry))
3505  return SDValue();
3506 
3507  BaseOp = SystemZISD::SUBCARRY;
3508  CCValid = SystemZ::CCMASK_LOGICAL;
3510  break;
3511  }
3512 
3513  // Set the condition code from the carry flag.
3514  Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
3515  DAG.getConstant(CCValid, DL, MVT::i32),
3516  DAG.getConstant(CCMask, DL, MVT::i32));
3517 
3518  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3519  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
3520 
3521  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3522  if (N->getValueType(1) == MVT::i1)
3523  SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3524 
3525  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3526 }
3527 
3528 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3529  SelectionDAG &DAG) const {
3530  EVT VT = Op.getValueType();
3531  SDLoc DL(Op);
3532  Op = Op.getOperand(0);
3533 
3534  // Handle vector types via VPOPCT.
3535  if (VT.isVector()) {
3536  Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3537  Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3538  switch (VT.getScalarSizeInBits()) {
3539  case 8:
3540  break;
3541  case 16: {
3542  Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3543  SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3544  SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3545  Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3546  Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3547  break;
3548  }
3549  case 32: {
3550  SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3551  DAG.getConstant(0, DL, MVT::i32));
3552  Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3553  break;
3554  }
3555  case 64: {
3556  SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3557  DAG.getConstant(0, DL, MVT::i32));
3558  Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3559  Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3560  break;
3561  }
3562  default:
3563  llvm_unreachable("Unexpected type");
3564  }
3565  return Op;
3566  }
3567 
3568  // Get the known-zero mask for the operand.
3569  KnownBits Known = DAG.computeKnownBits(Op);
3570  unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
3571  if (NumSignificantBits == 0)
3572  return DAG.getConstant(0, DL, VT);
3573 
3574  // Skip known-zero high parts of the operand.
3575  int64_t OrigBitSize = VT.getSizeInBits();
3576  int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3577  BitSize = std::min(BitSize, OrigBitSize);
3578 
3579  // The POPCNT instruction counts the number of bits in each byte.
3580  Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3581  Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3582  Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3583 
3584  // Add up per-byte counts in a binary tree. All bits of Op at
3585  // position larger than BitSize remain zero throughout.
3586  for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3587  SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3588  if (BitSize != OrigBitSize)
3589  Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3590  DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3591  Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3592  }
3593 
3594  // Extract overall result from high byte.
3595  if (BitSize > 8)
3596  Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3597  DAG.getConstant(BitSize - 8, DL, VT));
3598 
3599  return Op;
3600 }
3601 
3602 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3603  SelectionDAG &DAG) const {
3604  SDLoc DL(Op);
3605  AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3606  cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3607  SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3608  cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3609 
3610  // The only fence that needs an instruction is a sequentially-consistent
3611  // cross-thread fence.
3612  if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3613  FenceSSID == SyncScope::System) {
3614  return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3615  Op.getOperand(0)),
3616  0);
3617  }
3618 
3619  // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3620  return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3621 }
3622 
3623 // Op is an atomic load. Lower it into a normal volatile load.
3624 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3625  SelectionDAG &DAG) const {
3626  auto *Node = cast<AtomicSDNode>(Op.getNode());
3627  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3628  Node->getChain(), Node->getBasePtr(),
3629  Node->getMemoryVT(), Node->getMemOperand());
3630 }
3631 
3632 // Op is an atomic store. Lower it into a normal volatile store.
3633 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3634  SelectionDAG &DAG) const {
3635  auto *Node = cast<AtomicSDNode>(Op.getNode());
3636  SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3637  Node->getBasePtr(), Node->getMemoryVT(),
3638  Node->getMemOperand());
3639  // We have to enforce sequential consistency by performing a
3640  // serialization operation after the store.
3641  if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
3642  Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3643  MVT::Other, Chain), 0);
3644  return Chain;
3645 }
3646 
3647 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3648 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3649 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3650  SelectionDAG &DAG,
3651  unsigned Opcode) const {
3652  auto *Node = cast<AtomicSDNode>(Op.getNode());
3653 
3654  // 32-bit operations need no code outside the main loop.
3655  EVT NarrowVT = Node->getMemoryVT();
3656  EVT WideVT = MVT::i32;
3657  if (NarrowVT == WideVT)
3658  return Op;
3659 
3660  int64_t BitSize = NarrowVT.getSizeInBits();
3661  SDValue ChainIn = Node->getChain();
3662  SDValue Addr = Node->getBasePtr();
3663  SDValue Src2 = Node->getVal();
3664  MachineMemOperand *MMO = Node->getMemOperand();
3665  SDLoc DL(Node);
3666  EVT PtrVT = Addr.getValueType();
3667 
3668  // Convert atomic subtracts of constants into additions.
3669  if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3670  if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3672  Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3673  }
3674 
3675  // Get the address of the containing word.
3676  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3677  DAG.getConstant(-4, DL, PtrVT));
3678 
3679  // Get the number of bits that the word must be rotated left in order
3680  // to bring the field to the top bits of a GR32.
3681  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3682  DAG.getConstant(3, DL, PtrVT));
3683  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3684 
3685  // Get the complementing shift amount, for rotating a field in the top
3686  // bits back to its proper position.
3687  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3688  DAG.getConstant(0, DL, WideVT), BitShift);
3689 
3690  // Extend the source operand to 32 bits and prepare it for the inner loop.
3691  // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3692  // operations require the source to be shifted in advance. (This shift
3693  // can be folded if the source is constant.) For AND and NAND, the lower
3694  // bits must be set, while for other opcodes they should be left clear.
3695  if (Opcode != SystemZISD::ATOMIC_SWAPW)
3696  Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3697  DAG.getConstant(32 - BitSize, DL, WideVT));
3698  if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3700  Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3701  DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3702 
3703  // Construct the ATOMIC_LOADW_* node.
3704  SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3705  SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3706  DAG.getConstant(BitSize, DL, WideVT) };
3707  SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3708  NarrowVT, MMO);
3709 
3710  // Rotate the result of the final CS so that the field is in the lower
3711  // bits of a GR32, then truncate it.
3712  SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3713  DAG.getConstant(BitSize, DL, WideVT));
3714  SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3715 
3716  SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3717  return DAG.getMergeValues(RetOps, DL);
3718 }
3719 
3720 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3721 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3722 // operations into additions.
3723 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3724  SelectionDAG &DAG) const {
3725  auto *Node = cast<AtomicSDNode>(Op.getNode());
3726  EVT MemVT = Node->getMemoryVT();
3727  if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3728  // A full-width operation.
3729  assert(Op.getValueType() == MemVT && "Mismatched VTs");
3730  SDValue Src2 = Node->getVal();
3731  SDValue NegSrc2;
3732  SDLoc DL(Src2);
3733 
3734  if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3735  // Use an addition if the operand is constant and either LAA(G) is
3736  // available or the negative value is in the range of A(G)FHI.
3737  int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3738  if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3739  NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3740  } else if (Subtarget.hasInterlockedAccess1())
3741  // Use LAA(G) if available.
3742  NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3743  Src2);
3744 
3745  if (NegSrc2.getNode())
3746  return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3747  Node->getChain(), Node->getBasePtr(), NegSrc2,
3748  Node->getMemOperand());
3749 
3750  // Use the node as-is.
3751  return Op;
3752  }
3753 
3754  return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3755 }
3756 
3757 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
3758 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3759  SelectionDAG &DAG) const {
3760  auto *Node = cast<AtomicSDNode>(Op.getNode());
3761  SDValue ChainIn = Node->getOperand(0);
3762  SDValue Addr = Node->getOperand(1);
3763  SDValue CmpVal = Node->getOperand(2);
3764  SDValue SwapVal = Node->getOperand(3);
3765  MachineMemOperand *MMO = Node->getMemOperand();
3766  SDLoc DL(Node);
3767 
3768  // We have native support for 32-bit and 64-bit compare and swap, but we
3769  // still need to expand extracting the "success" result from the CC.
3770  EVT NarrowVT = Node->getMemoryVT();
3771  EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
3772  if (NarrowVT == WideVT) {
3773  SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3774  SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
3776  DL, Tys, Ops, NarrowVT, MMO);
3777  SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3779 
3780  DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3782  DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3783  return SDValue();
3784  }
3785 
3786  // Convert 8-bit and 16-bit compare and swap to a loop, implemented
3787  // via a fullword ATOMIC_CMP_SWAPW operation.
3788  int64_t BitSize = NarrowVT.getSizeInBits();
3789  EVT PtrVT = Addr.getValueType();
3790 
3791  // Get the address of the containing word.
3792  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3793  DAG.getConstant(-4, DL, PtrVT));
3794 
3795  // Get the number of bits that the word must be rotated left in order
3796  // to bring the field to the top bits of a GR32.
3797  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3798  DAG.getConstant(3, DL, PtrVT));
3799  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3800 
3801  // Get the complementing shift amount, for rotating a field in the top
3802  // bits back to its proper position.
3803  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3804  DAG.getConstant(0, DL, WideVT), BitShift);
3805 
3806  // Construct the ATOMIC_CMP_SWAPW node.
3807  SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3808  SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3809  NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3811  VTList, Ops, NarrowVT, MMO);
3812  SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3814 
3815  DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3817  DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3818  return SDValue();
3819 }
3820 
3822 SystemZTargetLowering::getMMOFlags(const Instruction &I) const {
3823  // Because of how we convert atomic_load and atomic_store to normal loads and
3824  // stores in the DAG, we need to ensure that the MMOs are marked volatile
3825  // since DAGCombine hasn't been updated to account for atomic, but non
3826  // volatile loads. (See D57601)
3827  if (auto *SI = dyn_cast<StoreInst>(&I))
3828  if (SI->isAtomic())
3830  if (auto *LI = dyn_cast<LoadInst>(&I))
3831  if (LI->isAtomic())
3833  if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
3834  if (AI->isAtomic())
3836  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
3837  if (AI->isAtomic())
3840 }
3841 
3842 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
3843  SelectionDAG &DAG) const {
3844  MachineFunction &MF = DAG.getMachineFunction();
3845  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3846  return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
3847  SystemZ::R15D, Op.getValueType());
3848 }
3849 
3850 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
3851  SelectionDAG &DAG) const {
3852  MachineFunction &MF = DAG.getMachineFunction();
3853  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3854  bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3855 
3856  SDValue Chain = Op.getOperand(0);
3857  SDValue NewSP = Op.getOperand(1);
3858  SDValue Backchain;
3859  SDLoc DL(Op);
3860 
3861  if (StoreBackchain) {
3862  SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
3863  Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3864  }
3865 
3866  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3867 
3868  if (StoreBackchain)
3869  Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3870 
3871  return Chain;
3872 }
3873 
3874 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
3875  SelectionDAG &DAG) const {
3876  bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3877  if (!IsData)
3878  // Just preserve the chain.
3879  return Op.getOperand(0);
3880 
3881  SDLoc DL(Op);
3882  bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3883  unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
3884  auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
3885  SDValue Ops[] = {
3886  Op.getOperand(0),
3887  DAG.getConstant(Code, DL, MVT::i32),
3888  Op.getOperand(1)
3889  };
3891  Node->getVTList(), Ops,
3892  Node->getMemoryVT(), Node->getMemOperand());
3893 }
3894 
3895 // Convert condition code in CCReg to an i32 value.
3897  SDLoc DL(CCReg);
3898  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
3899  return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
3901 }
3902 
3903 SDValue
3904 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
3905  SelectionDAG &DAG) const {
3906  unsigned Opcode, CCValid;
3907  if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
3908  assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
3909  SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
3910  SDValue CC = getCCResult(DAG, SDValue(Node, 0));
3911  DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
3912  return SDValue();
3913  }
3914 
3915  return SDValue();
3916 }
3917 
3918 SDValue
3919 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
3920  SelectionDAG &DAG) const {
3921  unsigned Opcode, CCValid;
3922  if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
3923  SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
3924  if (Op->getNumValues() == 1)
3925  return getCCResult(DAG, SDValue(Node, 0));
3926  assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
3927  return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
3928  SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
3929  }
3930 
3931  unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3932  switch (Id) {
3933  case Intrinsic::thread_pointer:
3934  return lowerThreadPointer(SDLoc(Op), DAG);
3935 
3936  case Intrinsic::s390_vpdi:
3937  return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
3938  Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3939 
3940  case Intrinsic::s390_vperm:
3941  return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
3942  Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3943 
3944  case Intrinsic::s390_vuphb:
3945  case Intrinsic::s390_vuphh:
3946  case Intrinsic::s390_vuphf:
3947  return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
3948  Op.getOperand(1));
3949 
3950  case Intrinsic::s390_vuplhb:
3951  case Intrinsic::s390_vuplhh:
3952  case Intrinsic::s390_vuplhf:
3953  return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
3954  Op.getOperand(1));
3955 
3956  case Intrinsic::s390_vuplb:
3957  case Intrinsic::s390_vuplhw:
3958  case Intrinsic::s390_vuplf:
3959  return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
3960  Op.getOperand(1));
3961 
3962  case Intrinsic::s390_vupllb:
3963  case Intrinsic::s390_vupllh:
3964  case Intrinsic::s390_vupllf:
3965  return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
3966  Op.getOperand(1));
3967 
3968  case Intrinsic::s390_vsumb:
3969  case Intrinsic::s390_vsumh:
3970  case Intrinsic::s390_vsumgh:
3971  case Intrinsic::s390_vsumgf:
3972  case Intrinsic::s390_vsumqf:
3973  case Intrinsic::s390_vsumqg:
3974  return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
3975  Op.getOperand(1), Op.getOperand(2));
3976  }
3977 
3978  return SDValue();
3979 }
3980 
3981 namespace {
3982 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3983 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3984 // Operand is the constant third operand, otherwise it is the number of
3985 // bytes in each element of the result.
3986 struct Permute {
3987  unsigned Opcode;
3988  unsigned Operand;
3989  unsigned char Bytes[SystemZ::VectorBytes];
3990 };
3991 }
3992 
3993 static const Permute PermuteForms[] = {
3994  // VMRHG
3996  { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3997  // VMRHF
3999  { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4000  // VMRHH
4002  { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4003  // VMRHB
4005  { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4006  // VMRLG
4007  { SystemZISD::MERGE_LOW, 8,
4008  { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4009  // VMRLF
4010  { SystemZISD::MERGE_LOW, 4,
4011  { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4012  // VMRLH
4013  { SystemZISD::MERGE_LOW, 2,
4014  { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4015  // VMRLB
4016  { SystemZISD::MERGE_LOW, 1,
4017  { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4018  // VPKG
4019  { SystemZISD::PACK, 4,
4020  { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4021  // VPKF
4022  { SystemZISD::PACK, 2,
4023  { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4024  // VPKH
4025  { SystemZISD::PACK, 1,
4026  { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4027  // VPDI V1, V2, 4 (low half of V1, high half of V2)
4029  { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4030  // VPDI V1, V2, 1 (high half of V1, low half of V2)
4032  { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4033 };
4034 
4035 // Called after matching a vector shuffle against a particular pattern.
4036 // Both the original shuffle and the pattern have two vector operands.
4037 // OpNos[0] is the operand of the original shuffle that should be used for
4038 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4039 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4040 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4041 // for operands 0 and 1 of the pattern.
4042 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
4043  if (OpNos[0] < 0) {
4044  if (OpNos[1] < 0)
4045  return false;
4046  OpNo0 = OpNo1 = OpNos[1];
4047  } else if (OpNos[1] < 0) {
4048  OpNo0 = OpNo1 = OpNos[0];
4049  } else {
4050  OpNo0 = OpNos[0];
4051  OpNo1 = OpNos[1];
4052  }
4053  return true;
4054 }
4055 
4056 // Bytes is a VPERM-like permute vector, except that -1 is used for
4057 // undefined bytes. Return true if the VPERM can be implemented using P.
4058 // When returning true set OpNo0 to the VPERM operand that should be
4059 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4060 //
4061 // For example, if swapping the VPERM operands allows P to match, OpNo0
4062 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4063 // operand, but rewriting it to use two duplicated operands allows it to
4064 // match P, then OpNo0 and OpNo1 will be the same.
4065 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
4066  unsigned &OpNo0, unsigned &OpNo1) {
4067  int OpNos[] = { -1, -1 };
4068  for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4069  int Elt = Bytes[I];
4070  if (Elt >= 0) {
4071  // Make sure that the two permute vectors use the same suboperand
4072  // byte number. Only the operand numbers (the high bits) are
4073  // allowed to differ.
4074  if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
4075  return false;
4076  int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
4077  int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
4078  // Make sure that the operand mappings are consistent with previous
4079  // elements.
4080  if (OpNos[ModelOpNo] == 1 - RealOpNo)
4081  return false;
4082  OpNos[ModelOpNo] = RealOpNo;
4083  }
4084  }
4085  return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4086 }
4087 
4088 // As above, but search for a matching permute.
4089 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
4090  unsigned &OpNo0, unsigned &OpNo1) {
4091  for (auto &P : PermuteForms)
4092  if (matchPermute(Bytes, P, OpNo0, OpNo1))
4093  return &P;
4094  return nullptr;
4095 }
4096 
4097 // Bytes is a VPERM-like permute vector, except that -1 is used for
4098 // undefined bytes. This permute is an operand of an outer permute.
4099 // See whether redistributing the -1 bytes gives a shuffle that can be
4100 // implemented using P. If so, set Transform to a VPERM-like permute vector
4101 // that, when applied to the result of P, gives the original permute in Bytes.
4102 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4103  const Permute &P,
4104  SmallVectorImpl<int> &Transform) {
4105  unsigned To = 0;
4106  for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
4107  int Elt = Bytes[From];
4108  if (Elt < 0)
4109  // Byte number From of the result is undefined.
4110  Transform[From] = -1;
4111  else {
4112  while (P.Bytes[To] != Elt) {
4113  To += 1;
4114  if (To == SystemZ::VectorBytes)
4115  return false;
4116  }
4117  Transform[From] = To;
4118  }
4119  }
4120  return true;
4121 }
4122 
4123 // As above, but search for a matching permute.
4124 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4125  SmallVectorImpl<int> &Transform) {
4126  for (auto &P : PermuteForms)
4127  if (matchDoublePermute(Bytes, P, Transform))
4128  return &P;
4129  return nullptr;
4130 }
4131 
4132 // Convert the mask of the given shuffle op into a byte-level mask,
4133 // as if it had type vNi8.
4134 static bool getVPermMask(SDValue ShuffleOp,
4135  SmallVectorImpl<int> &Bytes) {
4136  EVT VT = ShuffleOp.getValueType();
4137  unsigned NumElements = VT.getVectorNumElements();
4138  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4139 
4140  if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
4141  Bytes.resize(NumElements * BytesPerElement, -1);
4142  for (unsigned I = 0; I < NumElements; ++I) {
4143  int Index = VSN->getMaskElt(I);
4144  if (Index >= 0)
4145  for (unsigned J = 0; J < BytesPerElement; ++J)
4146  Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4147  }
4148  return true;
4149  }
4150  if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
4151  isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
4152  unsigned Index = ShuffleOp.getConstantOperandVal(1);
4153  Bytes.resize(NumElements * BytesPerElement, -1);
4154  for (unsigned I = 0; I < NumElements; ++I)
4155  for (unsigned J = 0; J < BytesPerElement; ++J)
4156  Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4157  return true;
4158  }
4159  return false;
4160 }
4161 
4162 // Bytes is a VPERM-like permute vector, except that -1 is used for
4163 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4164 // the result come from a contiguous sequence of bytes from one input.
4165 // Set Base to the selector for the first byte if so.
4166 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
4167  unsigned BytesPerElement, int &Base) {
4168  Base = -1;
4169  for (unsigned I = 0; I < BytesPerElement; ++I) {
4170  if (Bytes[Start + I] >= 0) {
4171  unsigned Elem = Bytes[Start + I];
4172  if (Base < 0) {
4173  Base = Elem - I;
4174  // Make sure the bytes would come from one input operand.
4175  if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
4176  return false;
4177  } else if (unsigned(Base) != Elem - I)
4178  return false;
4179  }
4180  }
4181  return true;
4182 }
4183 
4184 // Bytes is a VPERM-like permute vector, except that -1 is used for
4185 // undefined bytes. Return true if it can be performed using VSLDI.
4186 // When returning true, set StartIndex to the shift amount and OpNo0
4187 // and OpNo1 to the VPERM operands that should be used as the first
4188 // and second shift operand respectively.
4189 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
4190  unsigned &StartIndex, unsigned &OpNo0,
4191  unsigned &OpNo1) {
4192  int OpNos[] = { -1, -1 };
4193  int Shift = -1;
4194  for (unsigned I = 0; I < 16; ++I) {
4195  int Index = Bytes[I];
4196  if (Index >= 0) {
4197  int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
4198  int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
4199  int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
4200  if (Shift < 0)
4201  Shift = ExpectedShift;
4202  else if (Shift != ExpectedShift)
4203  return false;
4204  // Make sure that the operand mappings are consistent with previous
4205  // elements.
4206  if (OpNos[ModelOpNo] == 1 - RealOpNo)
4207  return false;
4208  OpNos[ModelOpNo] = RealOpNo;
4209  }
4210  }
4211  StartIndex = Shift;
4212  return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4213 }
4214 
4215 // Create a node that performs P on operands Op0 and Op1, casting the
4216 // operands to the appropriate type. The type of the result is determined by P.
4217 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4218  const Permute &P, SDValue Op0, SDValue Op1) {
4219  // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4220  // elements of a PACK are twice as wide as the outputs.
4221  unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
4222  P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
4223  P.Operand);
4224  // Cast both operands to the appropriate type.
4225  MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
4226  SystemZ::VectorBytes / InBytes);
4227  Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
4228  Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
4229  SDValue Op;
4230  if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
4231  SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32);
4232  Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
4233  } else if (P.Opcode == SystemZISD::PACK) {
4234  MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
4235  SystemZ::VectorBytes / P.Operand);
4236  Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
4237  } else {
4238  Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
4239  }
4240  return Op;
4241 }
4242 
4243 // Bytes is a VPERM-like permute vector, except that -1 is used for
4244 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4245 // VSLDI or VPERM.
4247  SDValue *Ops,
4248  const SmallVectorImpl<int> &Bytes) {
4249  for (unsigned I = 0; I < 2; ++I)
4250  Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
4251 
4252  // First see whether VSLDI can be used.
4253  unsigned StartIndex, OpNo0, OpNo1;
4254  if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
4255  return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
4256  Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32));
4257 
4258  // Fall back on VPERM. Construct an SDNode for the permute vector.
4259  SDValue IndexNodes[SystemZ::VectorBytes];
4260  for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4261  if (Bytes[I] >= 0)
4262  IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
4263  else
4264  IndexNodes[I] = DAG.getUNDEF(MVT::i32);
4265  SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
4266  return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
4267 }
4268 
4269 namespace {
4270 // Describes a general N-operand vector shuffle.
4271 struct GeneralShuffle {
4272  GeneralShuffle(EVT vt) : VT(vt) {}
4273  void addUndef();
4274  bool add(SDValue, unsigned);
4275  SDValue getNode(SelectionDAG &, const SDLoc &);
4276 
4277  // The operands of the shuffle.
4279 
4280  // Index I is -1 if byte I of the result is undefined. Otherwise the
4281  // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4282  // Bytes[I] / SystemZ::VectorBytes.
4284 
4285  // The type of the shuffle result.
4286  EVT VT;
4287 };
4288 }
4289 
4290 // Add an extra undefined element to the shuffle.
4291 void GeneralShuffle::addUndef() {
4292  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4293  for (unsigned I = 0; I < BytesPerElement; ++I)
4294  Bytes.push_back(-1);
4295 }
4296 
4297 // Add an extra element to the shuffle, taking it from element Elem of Op.
4298 // A null Op indicates a vector input whose value will be calculated later;
4299 // there is at most one such input per shuffle and it always has the same
4300 // type as the result. Aborts and returns false if the source vector elements
4301 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4302 // LLVM they become implicitly extended, but this is rare and not optimized.
4303 bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
4304  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4305 
4306  // The source vector can have wider elements than the result,
4307  // either through an explicit TRUNCATE or because of type legalization.
4308  // We want the least significant part.
4309  EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
4310  unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
4311 
4312  // Return false if the source elements are smaller than their destination
4313  // elements.
4314  if (FromBytesPerElement < BytesPerElement)
4315  return false;
4316 
4317  unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
4318  (FromBytesPerElement - BytesPerElement));
4319 
4320  // Look through things like shuffles and bitcasts.
4321  while (Op.getNode()) {
4322  if (Op.getOpcode() == ISD::BITCAST)
4323  Op = Op.getOperand(0);
4324  else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
4325  // See whether the bytes we need come from a contiguous part of one
4326  // operand.
4328  if (!getVPermMask(Op, OpBytes))
4329  break;
4330  int NewByte;
4331  if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
4332  break;
4333  if (NewByte < 0) {
4334  addUndef();
4335  return true;
4336  }
4337  Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
4338  Byte = unsigned(NewByte) % SystemZ::VectorBytes;
4339  } else if (Op.isUndef()) {
4340  addUndef();
4341  return true;
4342  } else
4343  break;
4344  }
4345 
4346  // Make sure that the source of the extraction is in Ops.
4347  unsigned OpNo = 0;
4348  for (; OpNo < Ops.size(); ++OpNo)
4349  if (Ops[OpNo] == Op)
4350  break;
4351  if (OpNo == Ops.size())
4352  Ops.push_back(Op);
4353 
4354  // Add the element to Bytes.
4355  unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
4356  for (unsigned I = 0; I < BytesPerElement; ++I)
4357  Bytes.push_back(Base + I);
4358 
4359  return true;
4360 }
4361 
4362 // Return SDNodes for the completed shuffle.
4363 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
4364  assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
4365 
4366  if (Ops.size() == 0)
4367  return DAG.getUNDEF(VT);
4368 
4369  // Make sure that there are at least two shuffle operands.
4370  if (Ops.size() == 1)
4371  Ops.push_back(DAG.getUNDEF(MVT::v16i8));
4372 
4373  // Create a tree of shuffles, deferring root node until after the loop.
4374  // Try to redistribute the undefined elements of non-root nodes so that
4375  // the non-root shuffles match something like a pack or merge, then adjust
4376  // the parent node's permute vector to compensate for the new order.
4377  // Among other things, this copes with vectors like <2 x i16> that were
4378  // padded with undefined elements during type legalization.
4379  //
4380  // In the best case this redistribution will lead to the whole tree
4381  // using packs and merges. It should rarely be a loss in other cases.
4382  unsigned Stride = 1;
4383  for (; Stride * 2 < Ops.size(); Stride *= 2) {
4384  for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
4385  SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
4386 
4387  // Create a mask for just these two operands.
4389  for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4390  unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
4391  unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
4392  if (OpNo == I)
4393  NewBytes[J] = Byte;
4394  else if (OpNo == I + Stride)
4395  NewBytes[J] = SystemZ::VectorBytes + Byte;
4396  else
4397  NewBytes[J] = -1;
4398  }
4399  // See if it would be better to reorganize NewMask to avoid using VPERM.
4400  SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
4401  if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
4402  Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
4403  // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4404  for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4405  if (NewBytes[J] >= 0) {
4406  assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
4407  "Invalid double permute");
4408  Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
4409  } else
4410  assert(NewBytesMap[J] < 0 && "Invalid double permute");
4411  }
4412  } else {
4413  // Just use NewBytes on the operands.
4414  Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
4415  for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
4416  if (NewBytes[J] >= 0)
4417  Bytes[J] = I * SystemZ::VectorBytes + J;
4418  }
4419  }
4420  }
4421 
4422  // Now we just have 2 inputs. Put the second operand in Ops[1].
4423  if (Stride > 1) {
4424  Ops[1] = Ops[Stride];
4425  for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4426  if (Bytes[I] >= int(SystemZ::VectorBytes))
4427  Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
4428  }
4429 
4430  // Look for an instruction that can do the permute without resorting
4431  // to VPERM.
4432  unsigned OpNo0, OpNo1;
4433  SDValue Op;
4434  if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
4435  Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
4436  else
4437  Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
4438  return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4439 }
4440 
4441 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4442 static bool isScalarToVector(SDValue Op) {
4443  for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
4444  if (!Op.getOperand(I).isUndef())
4445  return false;
4446  return true;
4447 }
4448 
4449 // Return a vector of type VT that contains Value in the first element.
4450 // The other elements don't matter.
4451 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4452  SDValue Value) {
4453  // If we have a constant, replicate it to all elements and let the
4454  // BUILD_VECTOR lowering take care of it.
4455  if (Value.getOpcode() == ISD::Constant ||
4456  Value.getOpcode() == ISD::ConstantFP) {
4458  return DAG.getBuildVector(VT, DL, Ops);
4459  }
4460  if (Value.isUndef())
4461  return DAG.getUNDEF(VT);
4462  return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4463 }
4464 
4465 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4466 // element 1. Used for cases in which replication is cheap.
4467 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4468  SDValue Op0, SDValue Op1) {
4469  if (Op0.isUndef()) {
4470  if (Op1.isUndef())
4471  return DAG.getUNDEF(VT);
4472  return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4473  }
4474  if (Op1.isUndef())
4475  return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4476  return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4477  buildScalarToVector(DAG, DL, VT, Op0),
4478  buildScalarToVector(DAG, DL, VT, Op1));
4479 }
4480 
4481 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4482 // vector for them.
4483 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4484  SDValue Op1) {
4485  if (Op0.isUndef() && Op1.isUndef())
4486  return DAG.getUNDEF(MVT::v2i64);
4487  // If one of the two inputs is undefined then replicate the other one,
4488  // in order to avoid using another register unnecessarily.
4489  if (Op0.isUndef())
4490  Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4491  else if (Op1.isUndef())
4492  Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4493  else {
4494  Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4495  Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4496  }
4497  return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4498 }
4499 
4500 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4501 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4502 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4503 // would benefit from this representation and return it if so.
4505  BuildVectorSDNode *BVN) {
4506  EVT VT = BVN->getValueType(0);
4507  unsigned NumElements = VT.getVectorNumElements();
4508 
4509  // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4510  // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4511  // need a BUILD_VECTOR, add an additional placeholder operand for that
4512  // BUILD_VECTOR and store its operands in ResidueOps.
4513  GeneralShuffle GS(VT);
4515  bool FoundOne = false;
4516  for (unsigned I = 0; I < NumElements; ++I) {
4517  SDValue Op = BVN->getOperand(I);
4518  if (Op.getOpcode() == ISD::TRUNCATE)
4519  Op = Op.getOperand(0);
4520  if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4521  Op.getOperand(1).getOpcode() == ISD::Constant) {
4522  unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4523  if (!GS.add(Op.getOperand(0), Elem))
4524  return SDValue();
4525  FoundOne = true;
4526  } else if (Op.isUndef()) {
4527  GS.addUndef();
4528  } else {
4529  if (!GS.add(SDValue(), ResidueOps.size()))
4530  return SDValue();
4531  ResidueOps.push_back(BVN->getOperand(I));
4532  }
4533  }
4534 
4535  // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4536  if (!FoundOne)
4537  return SDValue();
4538 
4539  // Create the BUILD_VECTOR for the remaining elements, if any.
4540  if (!ResidueOps.empty()) {
4541  while (ResidueOps.size() < NumElements)
4542  ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4543  for (auto &Op : GS.Ops) {
4544  if (!Op.getNode()) {
4545  Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4546  break;
4547  }
4548  }
4549  }
4550  return GS.getNode(DAG, SDLoc(BVN));
4551 }
4552 
4553