Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1154, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RISCVISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Target/RISCV -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Target/RISCV -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp

1//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that RISCV uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCVISelLowering.h"
15#include "RISCV.h"
16#include "RISCVMachineFunctionInfo.h"
17#include "RISCVRegisterInfo.h"
18#include "RISCVSubtarget.h"
19#include "RISCVTargetMachine.h"
20#include "Utils/RISCVMatInt.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAGISel.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/DiagnosticInfo.h"
32#include "llvm/IR/DiagnosticPrinter.h"
33#include "llvm/IR/IntrinsicsRISCV.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/raw_ostream.h"
37
38using namespace llvm;
39
40#define DEBUG_TYPE"riscv-lower" "riscv-lower"
41
42STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"riscv-lower", "NumTailCalls"
, "Number of tail calls"}
;
43
44RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45 const RISCVSubtarget &STI)
46 : TargetLowering(TM), Subtarget(STI) {
47
48 if (Subtarget.isRV32E())
49 report_fatal_error("Codegen not yet implemented for RV32E");
50
51 RISCVABI::ABI ABI = Subtarget.getTargetABI();
52 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI")((ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"
) ? static_cast<void> (0) : __assert_fail ("ABI != RISCVABI::ABI_Unknown && \"Improperly initialised target ABI\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 52, __PRETTY_FUNCTION__))
;
53
54 switch (ABI) {
55 default:
56 report_fatal_error("Don't know how to lower this ABI");
57 case RISCVABI::ABI_ILP32:
58 case RISCVABI::ABI_ILP32F:
59 case RISCVABI::ABI_ILP32D:
60 case RISCVABI::ABI_LP64:
61 case RISCVABI::ABI_LP64F:
62 case RISCVABI::ABI_LP64D:
63 break;
64 }
65
66 MVT XLenVT = Subtarget.getXLenVT();
67
68 // Set up the register classes.
69 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
70
71 if (Subtarget.hasStdExtF())
72 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
73 if (Subtarget.hasStdExtD())
74 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
75
76 // Compute derived properties from the register classes.
77 computeRegisterProperties(STI.getRegisterInfo());
78
79 setStackPointerRegisterToSaveRestore(RISCV::X2);
80
81 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
82 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
83
84 // TODO: add all necessary setOperationAction calls.
85 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
86
87 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
88 setOperationAction(ISD::BR_CC, XLenVT, Expand);
89 setOperationAction(ISD::SELECT, XLenVT, Custom);
90 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
91
92 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
93 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
94
95 setOperationAction(ISD::VASTART, MVT::Other, Custom);
96 setOperationAction(ISD::VAARG, MVT::Other, Expand);
97 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
98 setOperationAction(ISD::VAEND, MVT::Other, Expand);
99
100 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
101 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
102
103 if (Subtarget.is64Bit()) {
104 setOperationAction(ISD::ADD, MVT::i32, Custom);
105 setOperationAction(ISD::SUB, MVT::i32, Custom);
106 setOperationAction(ISD::SHL, MVT::i32, Custom);
107 setOperationAction(ISD::SRA, MVT::i32, Custom);
108 setOperationAction(ISD::SRL, MVT::i32, Custom);
109 }
110
111 if (!Subtarget.hasStdExtM()) {
112 setOperationAction(ISD::MUL, XLenVT, Expand);
113 setOperationAction(ISD::MULHS, XLenVT, Expand);
114 setOperationAction(ISD::MULHU, XLenVT, Expand);
115 setOperationAction(ISD::SDIV, XLenVT, Expand);
116 setOperationAction(ISD::UDIV, XLenVT, Expand);
117 setOperationAction(ISD::SREM, XLenVT, Expand);
118 setOperationAction(ISD::UREM, XLenVT, Expand);
119 }
120
121 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
122 setOperationAction(ISD::MUL, MVT::i32, Custom);
123 setOperationAction(ISD::SDIV, MVT::i32, Custom);
124 setOperationAction(ISD::UDIV, MVT::i32, Custom);
125 setOperationAction(ISD::UREM, MVT::i32, Custom);
126 }
127
128 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
129 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
130 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
131 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
132
133 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
134 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
135 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
136
137 setOperationAction(ISD::ROTL, XLenVT, Expand);
138 setOperationAction(ISD::ROTR, XLenVT, Expand);
139 setOperationAction(ISD::BSWAP, XLenVT, Expand);
140 setOperationAction(ISD::CTTZ, XLenVT, Expand);
141 setOperationAction(ISD::CTLZ, XLenVT, Expand);
142 setOperationAction(ISD::CTPOP, XLenVT, Expand);
143
144 ISD::CondCode FPCCToExtend[] = {
145 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
146 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
147 ISD::SETGE, ISD::SETNE};
148
149 ISD::NodeType FPOpToExtend[] = {
150 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
151 ISD::FP_TO_FP16};
152
153 if (Subtarget.hasStdExtF()) {
154 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
155 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
156 for (auto CC : FPCCToExtend)
157 setCondCodeAction(CC, MVT::f32, Expand);
158 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
159 setOperationAction(ISD::SELECT, MVT::f32, Custom);
160 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
161 for (auto Op : FPOpToExtend)
162 setOperationAction(Op, MVT::f32, Expand);
163 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
164 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
165 }
166
167 if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
168 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
169
170 if (Subtarget.hasStdExtD()) {
171 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
172 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
173 for (auto CC : FPCCToExtend)
174 setCondCodeAction(CC, MVT::f64, Expand);
175 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
176 setOperationAction(ISD::SELECT, MVT::f64, Custom);
177 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
178 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
179 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
180 for (auto Op : FPOpToExtend)
181 setOperationAction(Op, MVT::f64, Expand);
182 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
183 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
184 }
185
186 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
187 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
188 setOperationAction(ISD::ConstantPool, XLenVT, Custom);
189
190 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
191
192 // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
193 // Unfortunately this can't be determined just from the ISA naming string.
194 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
195 Subtarget.is64Bit() ? Legal : Custom);
196
197 setOperationAction(ISD::TRAP, MVT::Other, Legal);
198 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
199
200 if (Subtarget.hasStdExtA()) {
201 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
202 setMinCmpXchgSizeInBits(32);
203 } else {
204 setMaxAtomicSizeInBitsSupported(0);
205 }
206
207 setBooleanContents(ZeroOrOneBooleanContent);
208
209 // Function alignments.
210 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
211 setMinFunctionAlignment(FunctionAlignment);
212 setPrefFunctionAlignment(FunctionAlignment);
213
214 // Effectively disable jump table generation.
215 setMinimumJumpTableEntries(INT_MAX2147483647);
216}
217
218EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
219 EVT VT) const {
220 if (!VT.isVector())
221 return getPointerTy(DL);
222 return VT.changeVectorElementTypeToInteger();
223}
224
225bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
226 const CallInst &I,
227 MachineFunction &MF,
228 unsigned Intrinsic) const {
229 switch (Intrinsic) {
230 default:
231 return false;
232 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
233 case Intrinsic::riscv_masked_atomicrmw_add_i32:
234 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
235 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
236 case Intrinsic::riscv_masked_atomicrmw_max_i32:
237 case Intrinsic::riscv_masked_atomicrmw_min_i32:
238 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
239 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
240 case Intrinsic::riscv_masked_cmpxchg_i32:
241 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
242 Info.opc = ISD::INTRINSIC_W_CHAIN;
243 Info.memVT = MVT::getVT(PtrTy->getElementType());
244 Info.ptrVal = I.getArgOperand(0);
245 Info.offset = 0;
246 Info.align = Align(4);
247 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
248 MachineMemOperand::MOVolatile;
249 return true;
250 }
251}
252
253bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
254 const AddrMode &AM, Type *Ty,
255 unsigned AS,
256 Instruction *I) const {
257 // No global is ever allowed as a base.
258 if (AM.BaseGV)
259 return false;
260
261 // Require a 12-bit signed offset.
262 if (!isInt<12>(AM.BaseOffs))
263 return false;
264
265 switch (AM.Scale) {
266 case 0: // "r+i" or just "i", depending on HasBaseReg.
267 break;
268 case 1:
269 if (!AM.HasBaseReg) // allow "r+i".
270 break;
271 return false; // disallow "r+r" or "r+r+i".
272 default:
273 return false;
274 }
275
276 return true;
277}
278
279bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
280 return isInt<12>(Imm);
281}
282
283bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
284 return isInt<12>(Imm);
285}
286
287// On RV32, 64-bit integers are split into their high and low parts and held
288// in two different registers, so the trunc is free since the low register can
289// just be used.
290bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
291 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
292 return false;
293 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
294 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
295 return (SrcBits == 64 && DestBits == 32);
296}
297
298bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
299 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
300 !SrcVT.isInteger() || !DstVT.isInteger())
301 return false;
302 unsigned SrcBits = SrcVT.getSizeInBits();
303 unsigned DestBits = DstVT.getSizeInBits();
304 return (SrcBits == 64 && DestBits == 32);
305}
306
307bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
308 // Zexts are free if they can be combined with a load.
309 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1
Assuming 'LD' is null
2
Taking false branch
310 EVT MemVT = LD->getMemoryVT();
311 if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
312 (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
313 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
314 LD->getExtensionType() == ISD::ZEXTLOAD))
315 return true;
316 }
317
318 return TargetLowering::isZExtFree(Val, VT2);
3
Value assigned to 'Val.Node'
4
Calling 'TargetLoweringBase::isZExtFree'
319}
320
321bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
322 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
323}
324
325bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
326 return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
327 (VT == MVT::f64 && Subtarget.hasStdExtD());
328}
329
330// Changes the condition code and swaps operands if necessary, so the SetCC
331// operation matches one of the comparisons supported directly in the RISC-V
332// ISA.
333static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
334 switch (CC) {
335 default:
336 break;
337 case ISD::SETGT:
338 case ISD::SETLE:
339 case ISD::SETUGT:
340 case ISD::SETULE:
341 CC = ISD::getSetCCSwappedOperands(CC);
342 std::swap(LHS, RHS);
343 break;
344 }
345}
346
347// Return the RISC-V branch opcode that matches the given DAG integer
348// condition code. The CondCode must be one of those supported by the RISC-V
349// ISA (see normaliseSetCC).
350static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
351 switch (CC) {
352 default:
353 llvm_unreachable("Unsupported CondCode")::llvm::llvm_unreachable_internal("Unsupported CondCode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 353)
;
354 case ISD::SETEQ:
355 return RISCV::BEQ;
356 case ISD::SETNE:
357 return RISCV::BNE;
358 case ISD::SETLT:
359 return RISCV::BLT;
360 case ISD::SETGE:
361 return RISCV::BGE;
362 case ISD::SETULT:
363 return RISCV::BLTU;
364 case ISD::SETUGE:
365 return RISCV::BGEU;
366 }
367}
368
369SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
370 SelectionDAG &DAG) const {
371 switch (Op.getOpcode()) {
372 default:
373 report_fatal_error("unimplemented operand");
374 case ISD::GlobalAddress:
375 return lowerGlobalAddress(Op, DAG);
376 case ISD::BlockAddress:
377 return lowerBlockAddress(Op, DAG);
378 case ISD::ConstantPool:
379 return lowerConstantPool(Op, DAG);
380 case ISD::GlobalTLSAddress:
381 return lowerGlobalTLSAddress(Op, DAG);
382 case ISD::SELECT:
383 return lowerSELECT(Op, DAG);
384 case ISD::VASTART:
385 return lowerVASTART(Op, DAG);
386 case ISD::FRAMEADDR:
387 return lowerFRAMEADDR(Op, DAG);
388 case ISD::RETURNADDR:
389 return lowerRETURNADDR(Op, DAG);
390 case ISD::SHL_PARTS:
391 return lowerShiftLeftParts(Op, DAG);
392 case ISD::SRA_PARTS:
393 return lowerShiftRightParts(Op, DAG, true);
394 case ISD::SRL_PARTS:
395 return lowerShiftRightParts(Op, DAG, false);
396 case ISD::BITCAST: {
397 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&((Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
"Unexpected custom legalisation") ? static_cast<void> (
0) : __assert_fail ("Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 398, __PRETTY_FUNCTION__))
398 "Unexpected custom legalisation")((Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
"Unexpected custom legalisation") ? static_cast<void> (
0) : __assert_fail ("Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 398, __PRETTY_FUNCTION__))
;
399 SDLoc DL(Op);
400 SDValue Op0 = Op.getOperand(0);
401 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
402 return SDValue();
403 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
404 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
405 return FPConv;
406 }
407 }
408}
409
410static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
411 SelectionDAG &DAG, unsigned Flags) {
412 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
413}
414
415static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
416 SelectionDAG &DAG, unsigned Flags) {
417 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
418 Flags);
419}
420
421static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
422 SelectionDAG &DAG, unsigned Flags) {
423 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
424 N->getOffset(), Flags);
425}
426
427template <class NodeTy>
428SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
429 bool IsLocal) const {
430 SDLoc DL(N);
431 EVT Ty = getPointerTy(DAG.getDataLayout());
432
433 if (isPositionIndependent()) {
434 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
435 if (IsLocal)
436 // Use PC-relative addressing to access the symbol. This generates the
437 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
438 // %pcrel_lo(auipc)).
439 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
440
441 // Use PC-relative addressing to access the GOT for this symbol, then load
442 // the address from the GOT. This generates the pattern (PseudoLA sym),
443 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
444 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
445 }
446
447 switch (getTargetMachine().getCodeModel()) {
448 default:
449 report_fatal_error("Unsupported code model for lowering");
450 case CodeModel::Small: {
451 // Generate a sequence for accessing addresses within the first 2 GiB of
452 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
453 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
454 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
455 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
456 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
457 }
458 case CodeModel::Medium: {
459 // Generate a sequence for accessing addresses within any 2GiB range within
460 // the address space. This generates the pattern (PseudoLLA sym), which
461 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
462 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
463 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
464 }
465 }
466}
467
468SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
469 SelectionDAG &DAG) const {
470 SDLoc DL(Op);
471 EVT Ty = Op.getValueType();
472 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
473 int64_t Offset = N->getOffset();
474 MVT XLenVT = Subtarget.getXLenVT();
475
476 const GlobalValue *GV = N->getGlobal();
477 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
478 SDValue Addr = getAddr(N, DAG, IsLocal);
479
480 // In order to maximise the opportunity for common subexpression elimination,
481 // emit a separate ADD node for the global address offset instead of folding
482 // it in the global address node. Later peephole optimisations may choose to
483 // fold it back in when profitable.
484 if (Offset != 0)
485 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
486 DAG.getConstant(Offset, DL, XLenVT));
487 return Addr;
488}
489
490SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
491 SelectionDAG &DAG) const {
492 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
493
494 return getAddr(N, DAG);
495}
496
497SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
498 SelectionDAG &DAG) const {
499 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
500
501 return getAddr(N, DAG);
502}
503
504SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
505 SelectionDAG &DAG,
506 bool UseGOT) const {
507 SDLoc DL(N);
508 EVT Ty = getPointerTy(DAG.getDataLayout());
509 const GlobalValue *GV = N->getGlobal();
510 MVT XLenVT = Subtarget.getXLenVT();
511
512 if (UseGOT) {
513 // Use PC-relative addressing to access the GOT for this TLS symbol, then
514 // load the address from the GOT and add the thread pointer. This generates
515 // the pattern (PseudoLA_TLS_IE sym), which expands to
516 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
517 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
518 SDValue Load =
519 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
520
521 // Add the thread pointer.
522 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
523 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
524 }
525
526 // Generate a sequence for accessing the address relative to the thread
527 // pointer, with the appropriate adjustment for the thread pointer offset.
528 // This generates the pattern
529 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
530 SDValue AddrHi =
531 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
532 SDValue AddrAdd =
533 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
534 SDValue AddrLo =
535 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
536
537 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
538 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
539 SDValue MNAdd = SDValue(
540 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
541 0);
542 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
543}
544
545SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
546 SelectionDAG &DAG) const {
547 SDLoc DL(N);
548 EVT Ty = getPointerTy(DAG.getDataLayout());
549 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
550 const GlobalValue *GV = N->getGlobal();
551
552 // Use a PC-relative addressing mode to access the global dynamic GOT address.
553 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
554 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
555 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
556 SDValue Load =
557 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
558
559 // Prepare argument list to generate call.
560 ArgListTy Args;
561 ArgListEntry Entry;
562 Entry.Node = Load;
563 Entry.Ty = CallTy;
564 Args.push_back(Entry);
565
566 // Setup call to __tls_get_addr.
567 TargetLowering::CallLoweringInfo CLI(DAG);
568 CLI.setDebugLoc(DL)
569 .setChain(DAG.getEntryNode())
570 .setLibCallee(CallingConv::C, CallTy,
571 DAG.getExternalSymbol("__tls_get_addr", Ty),
572 std::move(Args));
573
574 return LowerCallTo(CLI).first;
575}
576
577SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
578 SelectionDAG &DAG) const {
579 SDLoc DL(Op);
580 EVT Ty = Op.getValueType();
581 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
582 int64_t Offset = N->getOffset();
583 MVT XLenVT = Subtarget.getXLenVT();
584
585 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
586
587 SDValue Addr;
588 switch (Model) {
589 case TLSModel::LocalExec:
590 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
591 break;
592 case TLSModel::InitialExec:
593 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
594 break;
595 case TLSModel::LocalDynamic:
596 case TLSModel::GeneralDynamic:
597 Addr = getDynamicTLSAddr(N, DAG);
598 break;
599 }
600
601 // In order to maximise the opportunity for common subexpression elimination,
602 // emit a separate ADD node for the global address offset instead of folding
603 // it in the global address node. Later peephole optimisations may choose to
604 // fold it back in when profitable.
605 if (Offset != 0)
606 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
607 DAG.getConstant(Offset, DL, XLenVT));
608 return Addr;
609}
610
611SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
612 SDValue CondV = Op.getOperand(0);
613 SDValue TrueV = Op.getOperand(1);
614 SDValue FalseV = Op.getOperand(2);
615 SDLoc DL(Op);
616 MVT XLenVT = Subtarget.getXLenVT();
617
618 // If the result type is XLenVT and CondV is the output of a SETCC node
619 // which also operated on XLenVT inputs, then merge the SETCC node into the
620 // lowered RISCVISD::SELECT_CC to take advantage of the integer
621 // compare+branch instructions. i.e.:
622 // (select (setcc lhs, rhs, cc), truev, falsev)
623 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
624 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
625 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
626 SDValue LHS = CondV.getOperand(0);
627 SDValue RHS = CondV.getOperand(1);
628 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
629 ISD::CondCode CCVal = CC->get();
630
631 normaliseSetCC(LHS, RHS, CCVal);
632
633 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
634 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
635 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
636 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
637 }
638
639 // Otherwise:
640 // (select condv, truev, falsev)
641 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
642 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
643 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
644
645 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
646 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
647
648 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
649}
650
651SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
652 MachineFunction &MF = DAG.getMachineFunction();
653 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
654
655 SDLoc DL(Op);
656 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
657 getPointerTy(MF.getDataLayout()));
658
659 // vastart just stores the address of the VarArgsFrameIndex slot into the
660 // memory location argument.
661 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
662 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
663 MachinePointerInfo(SV));
664}
665
666SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
667 SelectionDAG &DAG) const {
668 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
669 MachineFunction &MF = DAG.getMachineFunction();
670 MachineFrameInfo &MFI = MF.getFrameInfo();
671 MFI.setFrameAddressIsTaken(true);
672 Register FrameReg = RI.getFrameRegister(MF);
673 int XLenInBytes = Subtarget.getXLen() / 8;
674
675 EVT VT = Op.getValueType();
676 SDLoc DL(Op);
677 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
678 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
679 while (Depth--) {
680 int Offset = -(XLenInBytes * 2);
681 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
682 DAG.getIntPtrConstant(Offset, DL));
683 FrameAddr =
684 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
685 }
686 return FrameAddr;
687}
688
689SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
690 SelectionDAG &DAG) const {
691 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
692 MachineFunction &MF = DAG.getMachineFunction();
693 MachineFrameInfo &MFI = MF.getFrameInfo();
694 MFI.setReturnAddressIsTaken(true);
695 MVT XLenVT = Subtarget.getXLenVT();
696 int XLenInBytes = Subtarget.getXLen() / 8;
697
698 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
699 return SDValue();
700
701 EVT VT = Op.getValueType();
702 SDLoc DL(Op);
703 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
704 if (Depth) {
705 int Off = -XLenInBytes;
706 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
707 SDValue Offset = DAG.getConstant(Off, DL, VT);
708 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
709 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
710 MachinePointerInfo());
711 }
712
713 // Return the value of the return address register, marking it an implicit
714 // live-in.
715 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
716 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
717}
718
719SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
720 SelectionDAG &DAG) const {
721 SDLoc DL(Op);
722 SDValue Lo = Op.getOperand(0);
723 SDValue Hi = Op.getOperand(1);
724 SDValue Shamt = Op.getOperand(2);
725 EVT VT = Lo.getValueType();
726
727 // if Shamt-XLEN < 0: // Shamt < XLEN
728 // Lo = Lo << Shamt
729 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
730 // else:
731 // Lo = 0
732 // Hi = Lo << (Shamt-XLEN)
733
734 SDValue Zero = DAG.getConstant(0, DL, VT);
735 SDValue One = DAG.getConstant(1, DL, VT);
736 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
737 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
738 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
739 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
740
741 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
742 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
743 SDValue ShiftRightLo =
744 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
745 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
746 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
747 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
748
749 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
750
751 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
752 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
753
754 SDValue Parts[2] = {Lo, Hi};
755 return DAG.getMergeValues(Parts, DL);
756}
757
758SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
759 bool IsSRA) const {
760 SDLoc DL(Op);
761 SDValue Lo = Op.getOperand(0);
762 SDValue Hi = Op.getOperand(1);
763 SDValue Shamt = Op.getOperand(2);
764 EVT VT = Lo.getValueType();
765
766 // SRA expansion:
767 // if Shamt-XLEN < 0: // Shamt < XLEN
768 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
769 // Hi = Hi >>s Shamt
770 // else:
771 // Lo = Hi >>s (Shamt-XLEN);
772 // Hi = Hi >>s (XLEN-1)
773 //
774 // SRL expansion:
775 // if Shamt-XLEN < 0: // Shamt < XLEN
776 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
777 // Hi = Hi >>u Shamt
778 // else:
779 // Lo = Hi >>u (Shamt-XLEN);
780 // Hi = 0;
781
782 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
783
784 SDValue Zero = DAG.getConstant(0, DL, VT);
785 SDValue One = DAG.getConstant(1, DL, VT);
786 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
787 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
788 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
789 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
790
791 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
792 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
793 SDValue ShiftLeftHi =
794 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
795 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
796 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
797 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
798 SDValue HiFalse =
799 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
800
801 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
802
803 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
804 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
805
806 SDValue Parts[2] = {Lo, Hi};
807 return DAG.getMergeValues(Parts, DL);
808}
809
810// Returns the opcode of the target-specific SDNode that implements the 32-bit
811// form of the given Opcode.
812static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
813 switch (Opcode) {
814 default:
815 llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 815)
;
816 case ISD::SHL:
817 return RISCVISD::SLLW;
818 case ISD::SRA:
819 return RISCVISD::SRAW;
820 case ISD::SRL:
821 return RISCVISD::SRLW;
822 case ISD::SDIV:
823 return RISCVISD::DIVW;
824 case ISD::UDIV:
825 return RISCVISD::DIVUW;
826 case ISD::UREM:
827 return RISCVISD::REMUW;
828 }
829}
830
831// Converts the given 32-bit operation to a target-specific SelectionDAG node.
832// Because i32 isn't a legal type for RV64, these operations would otherwise
833// be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
834// later one because the fact the operation was originally of type i32 is
835// lost.
836static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
837 SDLoc DL(N);
838 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
839 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
840 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
841 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
842 // ReplaceNodeResults requires we maintain the same type for the return value.
843 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
844}
845
846// Converts the given 32-bit operation to a i64 operation with signed extension
847// semantic to reduce the signed extension instructions.
848static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
849 SDLoc DL(N);
850 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
851 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
852 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
853 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
854 DAG.getValueType(MVT::i32));
855 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
856}
857
858void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
859 SmallVectorImpl<SDValue> &Results,
860 SelectionDAG &DAG) const {
861 SDLoc DL(N);
862 switch (N->getOpcode()) {
863 default:
864 llvm_unreachable("Don't know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this operation!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 864)
;
865 case ISD::READCYCLECOUNTER: {
866 assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 867, __PRETTY_FUNCTION__))
867 "READCYCLECOUNTER only has custom type legalization on riscv32")((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 867, __PRETTY_FUNCTION__))
;
868
869 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
870 SDValue RCW =
871 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
872
873 Results.push_back(RCW);
874 Results.push_back(RCW.getValue(1));
875 Results.push_back(RCW.getValue(2));
876 break;
877 }
878 case ISD::ADD:
879 case ISD::SUB:
880 case ISD::MUL:
881 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 882, __PRETTY_FUNCTION__))
882 "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 882, __PRETTY_FUNCTION__))
;
883 if (N->getOperand(1).getOpcode() == ISD::Constant)
884 return;
885 Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
886 break;
887 case ISD::SHL:
888 case ISD::SRA:
889 case ISD::SRL:
890 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 891, __PRETTY_FUNCTION__))
891 "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 891, __PRETTY_FUNCTION__))
;
892 if (N->getOperand(1).getOpcode() == ISD::Constant)
893 return;
894 Results.push_back(customLegalizeToWOp(N, DAG));
895 break;
896 case ISD::SDIV:
897 case ISD::UDIV:
898 case ISD::UREM:
899 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtM() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 900, __PRETTY_FUNCTION__))
900 Subtarget.hasStdExtM() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtM() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 900, __PRETTY_FUNCTION__))
;
901 if (N->getOperand(0).getOpcode() == ISD::Constant ||
902 N->getOperand(1).getOpcode() == ISD::Constant)
903 return;
904 Results.push_back(customLegalizeToWOp(N, DAG));
905 break;
906 case ISD::BITCAST: {
907 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtF() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 908, __PRETTY_FUNCTION__))
908 Subtarget.hasStdExtF() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtF() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 908, __PRETTY_FUNCTION__))
;
909 SDLoc DL(N);
910 SDValue Op0 = N->getOperand(0);
911 if (Op0.getValueType() != MVT::f32)
912 return;
913 SDValue FPConv =
914 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
915 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
916 break;
917 }
918 }
919}
920
921SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
922 DAGCombinerInfo &DCI) const {
923 SelectionDAG &DAG = DCI.DAG;
924
925 switch (N->getOpcode()) {
926 default:
927 break;
928 case RISCVISD::SplitF64: {
929 SDValue Op0 = N->getOperand(0);
930 // If the input to SplitF64 is just BuildPairF64 then the operation is
931 // redundant. Instead, use BuildPairF64's operands directly.
932 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
933 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
934
935 SDLoc DL(N);
936
937 // It's cheaper to materialise two 32-bit integers than to load a double
938 // from the constant pool and transfer it to integer registers through the
939 // stack.
940 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
941 APInt V = C->getValueAPF().bitcastToAPInt();
942 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
943 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
944 return DCI.CombineTo(N, Lo, Hi);
945 }
946
947 // This is a target-specific version of a DAGCombine performed in
948 // DAGCombiner::visitBITCAST. It performs the equivalent of:
949 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
950 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
951 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
952 !Op0.getNode()->hasOneUse())
953 break;
954 SDValue NewSplitF64 =
955 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
956 Op0.getOperand(0));
957 SDValue Lo = NewSplitF64.getValue(0);
958 SDValue Hi = NewSplitF64.getValue(1);
959 APInt SignBit = APInt::getSignMask(32);
960 if (Op0.getOpcode() == ISD::FNEG) {
961 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
962 DAG.getConstant(SignBit, DL, MVT::i32));
963 return DCI.CombineTo(N, Lo, NewHi);
964 }
965 assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0)
: __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 965, __PRETTY_FUNCTION__))
;
966 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
967 DAG.getConstant(~SignBit, DL, MVT::i32));
968 return DCI.CombineTo(N, Lo, NewHi);
969 }
970 case RISCVISD::SLLW:
971 case RISCVISD::SRAW:
972 case RISCVISD::SRLW: {
973 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
974 SDValue LHS = N->getOperand(0);
975 SDValue RHS = N->getOperand(1);
976 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
977 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
978 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) ||
979 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)))
980 return SDValue();
981 break;
982 }
983 case RISCVISD::FMV_X_ANYEXTW_RV64: {
984 SDLoc DL(N);
985 SDValue Op0 = N->getOperand(0);
986 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
987 // conversion is unnecessary and can be replaced with an ANY_EXTEND
988 // of the FMV_W_X_RV64 operand.
989 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
990 SDValue AExtOp =
991 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
992 return DCI.CombineTo(N, AExtOp);
993 }
994
995 // This is a target-specific version of a DAGCombine performed in
996 // DAGCombiner::visitBITCAST. It performs the equivalent of:
997 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
998 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
999 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1000 !Op0.getNode()->hasOneUse())
1001 break;
1002 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1003 Op0.getOperand(0));
1004 APInt SignBit = APInt::getSignMask(32).sext(64);
1005 if (Op0.getOpcode() == ISD::FNEG) {
1006 return DCI.CombineTo(N,
1007 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1008 DAG.getConstant(SignBit, DL, MVT::i64)));
1009 }
1010 assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0)
: __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1010, __PRETTY_FUNCTION__))
;
1011 return DCI.CombineTo(N,
1012 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1013 DAG.getConstant(~SignBit, DL, MVT::i64)));
1014 }
1015 }
1016
1017 return SDValue();
1018}
1019
1020bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1021 const SDNode *N, CombineLevel Level) const {
1022 // The following folds are only desirable if `(OP _, c1 << c2)` can be
1023 // materialised in fewer instructions than `(OP _, c1)`:
1024 //
1025 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1026 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1027 SDValue N0 = N->getOperand(0);
1028 EVT Ty = N0.getValueType();
1029 if (Ty.isScalarInteger() &&
1030 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1031 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1032 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1033 if (C1 && C2) {
1034 APInt C1Int = C1->getAPIntValue();
1035 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1036
1037 // We can materialise `c1 << c2` into an add immediate, so it's "free",
1038 // and the combine should happen, to potentially allow further combines
1039 // later.
1040 if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1041 isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1042 return true;
1043
1044 // We can materialise `c1` in an add immediate, so it's "free", and the
1045 // combine should be prevented.
1046 if (C1Int.getMinSignedBits() <= 64 &&
1047 isLegalAddImmediate(C1Int.getSExtValue()))
1048 return false;
1049
1050 // Neither constant will fit into an immediate, so find materialisation
1051 // costs.
1052 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1053 Subtarget.is64Bit());
1054 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1055 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1056
1057 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1058 // combine should be prevented.
1059 if (C1Cost < ShiftedC1Cost)
1060 return false;
1061 }
1062 }
1063 return true;
1064}
1065
1066unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1067 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1068 unsigned Depth) const {
1069 switch (Op.getOpcode()) {
1070 default:
1071 break;
1072 case RISCVISD::SLLW:
1073 case RISCVISD::SRAW:
1074 case RISCVISD::SRLW:
1075 case RISCVISD::DIVW:
1076 case RISCVISD::DIVUW:
1077 case RISCVISD::REMUW:
1078 // TODO: As the result is sign-extended, this is conservatively correct. A
1079 // more precise answer could be calculated for SRAW depending on known
1080 // bits in the shift amount.
1081 return 33;
1082 }
1083
1084 return 1;
1085}
1086
1087static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1088 MachineBasicBlock *BB) {
1089 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction")((MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::ReadCycleWide && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1089, __PRETTY_FUNCTION__))
;
1090
1091 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1092 // Should the count have wrapped while it was being read, we need to try
1093 // again.
1094 // ...
1095 // read:
1096 // rdcycleh x3 # load high word of cycle
1097 // rdcycle x2 # load low word of cycle
1098 // rdcycleh x4 # load high word of cycle
1099 // bne x3, x4, read # check if high word reads match, otherwise try again
1100 // ...
1101
1102 MachineFunction &MF = *BB->getParent();
1103 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1104 MachineFunction::iterator It = ++BB->getIterator();
1105
1106 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1107 MF.insert(It, LoopMBB);
1108
1109 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1110 MF.insert(It, DoneMBB);
1111
1112 // Transfer the remainder of BB and its successor edges to DoneMBB.
1113 DoneMBB->splice(DoneMBB->begin(), BB,
1114 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1115 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1116
1117 BB->addSuccessor(LoopMBB);
1118
1119 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1120 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1121 Register LoReg = MI.getOperand(0).getReg();
1122 Register HiReg = MI.getOperand(1).getReg();
1123 DebugLoc DL = MI.getDebugLoc();
1124
1125 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1126 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1127 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1128 .addReg(RISCV::X0);
1129 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1130 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1131 .addReg(RISCV::X0);
1132 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1133 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1134 .addReg(RISCV::X0);
1135
1136 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1137 .addReg(HiReg)
1138 .addReg(ReadAgainReg)
1139 .addMBB(LoopMBB);
1140
1141 LoopMBB->addSuccessor(LoopMBB);
1142 LoopMBB->addSuccessor(DoneMBB);
1143
1144 MI.eraseFromParent();
1145
1146 return DoneMBB;
1147}
1148
1149static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1150 MachineBasicBlock *BB) {
1151 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction")((MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::SplitF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1151, __PRETTY_FUNCTION__))
;
1152
1153 MachineFunction &MF = *BB->getParent();
1154 DebugLoc DL = MI.getDebugLoc();
1155 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1156 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1157 Register LoReg = MI.getOperand(0).getReg();
1158 Register HiReg = MI.getOperand(1).getReg();
1159 Register SrcReg = MI.getOperand(2).getReg();
1160 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1161 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
1162
1163 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1164 RI);
1165 MachineMemOperand *MMO =
1166 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1167 MachineMemOperand::MOLoad, 8, 8);
1168 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1169 .addFrameIndex(FI)
1170 .addImm(0)
1171 .addMemOperand(MMO);
1172 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1173 .addFrameIndex(FI)
1174 .addImm(4)
1175 .addMemOperand(MMO);
1176 MI.eraseFromParent(); // The pseudo instruction is gone now.
1177 return BB;
1178}
1179
1180static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1181 MachineBasicBlock *BB) {
1182 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1183, __PRETTY_FUNCTION__))
1183 "Unexpected instruction")((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1183, __PRETTY_FUNCTION__))
;
1184
1185 MachineFunction &MF = *BB->getParent();
1186 DebugLoc DL = MI.getDebugLoc();
1187 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1188 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1189 Register DstReg = MI.getOperand(0).getReg();
1190 Register LoReg = MI.getOperand(1).getReg();
1191 Register HiReg = MI.getOperand(2).getReg();
1192 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1193 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
1194
1195 MachineMemOperand *MMO =
1196 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1197 MachineMemOperand::MOStore, 8, 8);
1198 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1199 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1200 .addFrameIndex(FI)
1201 .addImm(0)
1202 .addMemOperand(MMO);
1203 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1204 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1205 .addFrameIndex(FI)
1206 .addImm(4)
1207 .addMemOperand(MMO);
1208 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1209 MI.eraseFromParent(); // The pseudo instruction is gone now.
1210 return BB;
1211}
1212
1213static bool isSelectPseudo(MachineInstr &MI) {
1214 switch (MI.getOpcode()) {
1215 default:
1216 return false;
1217 case RISCV::Select_GPR_Using_CC_GPR:
1218 case RISCV::Select_FPR32_Using_CC_GPR:
1219 case RISCV::Select_FPR64_Using_CC_GPR:
1220 return true;
1221 }
1222}
1223
1224static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
1225 MachineBasicBlock *BB) {
1226 // To "insert" Select_* instructions, we actually have to insert the triangle
1227 // control-flow pattern. The incoming instructions know the destination vreg
1228 // to set, the condition code register to branch on, the true/false values to
1229 // select between, and the condcode to use to select the appropriate branch.
1230 //
1231 // We produce the following control flow:
1232 // HeadMBB
1233 // | \
1234 // | IfFalseMBB
1235 // | /
1236 // TailMBB
1237 //
1238 // When we find a sequence of selects we attempt to optimize their emission
1239 // by sharing the control flow. Currently we only handle cases where we have
1240 // multiple selects with the exact same condition (same LHS, RHS and CC).
1241 // The selects may be interleaved with other instructions if the other
1242 // instructions meet some requirements we deem safe:
1243 // - They are debug instructions. Otherwise,
1244 // - They do not have side-effects, do not access memory and their inputs do
1245 // not depend on the results of the select pseudo-instructions.
1246 // The TrueV/FalseV operands of the selects cannot depend on the result of
1247 // previous selects in the sequence.
1248 // These conditions could be further relaxed. See the X86 target for a
1249 // related approach and more information.
1250 Register LHS = MI.getOperand(1).getReg();
1251 Register RHS = MI.getOperand(2).getReg();
1252 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
1253
1254 SmallVector<MachineInstr *, 4> SelectDebugValues;
1255 SmallSet<Register, 4> SelectDests;
1256 SelectDests.insert(MI.getOperand(0).getReg());
1257
1258 MachineInstr *LastSelectPseudo = &MI;
1259
1260 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
1261 SequenceMBBI != E; ++SequenceMBBI) {
1262 if (SequenceMBBI->isDebugInstr())
1263 continue;
1264 else if (isSelectPseudo(*SequenceMBBI)) {
1265 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
1266 SequenceMBBI->getOperand(2).getReg() != RHS ||
1267 SequenceMBBI->getOperand(3).getImm() != CC ||
1268 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
1269 SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
1270 break;
1271 LastSelectPseudo = &*SequenceMBBI;
1272 SequenceMBBI->collectDebugValues(SelectDebugValues);
1273 SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
1274 } else {
1275 if (SequenceMBBI->hasUnmodeledSideEffects() ||
1276 SequenceMBBI->mayLoadOrStore())
1277 break;
1278 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
1279 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
1280 }))
1281 break;
1282 }
1283 }
1284
1285 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1286 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1287 DebugLoc DL = MI.getDebugLoc();
1288 MachineFunction::iterator I = ++BB->getIterator();
1289
1290 MachineBasicBlock *HeadMBB = BB;
1291 MachineFunction *F = BB->getParent();
1292 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
1293 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
1294
1295 F->insert(I, IfFalseMBB);
1296 F->insert(I, TailMBB);
1297
1298 // Transfer debug instructions associated with the selects to TailMBB.
1299 for (MachineInstr *DebugInstr : SelectDebugValues) {
1300 TailMBB->push_back(DebugInstr->removeFromParent());
1301 }
1302
1303 // Move all instructions after the sequence to TailMBB.
1304 TailMBB->splice(TailMBB->end(), HeadMBB,
1305 std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
1306 // Update machine-CFG edges by transferring all successors of the current
1307 // block to the new block which will contain the Phi nodes for the selects.
1308 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
1309 // Set the successors for HeadMBB.
1310 HeadMBB->addSuccessor(IfFalseMBB);
1311 HeadMBB->addSuccessor(TailMBB);
1312
1313 // Insert appropriate branch.
1314 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
1315
1316 BuildMI(HeadMBB, DL, TII.get(Opcode))
1317 .addReg(LHS)
1318 .addReg(RHS)
1319 .addMBB(TailMBB);
1320
1321 // IfFalseMBB just falls through to TailMBB.
1322 IfFalseMBB->addSuccessor(TailMBB);
1323
1324 // Create PHIs for all of the select pseudo-instructions.
1325 auto SelectMBBI = MI.getIterator();
1326 auto SelectEnd = std::next(LastSelectPseudo->getIterator());
1327 auto InsertionPoint = TailMBB->begin();
1328 while (SelectMBBI != SelectEnd) {
1329 auto Next = std::next(SelectMBBI);
1330 if (isSelectPseudo(*SelectMBBI)) {
1331 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
1332 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
1333 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
1334 .addReg(SelectMBBI->getOperand(4).getReg())
1335 .addMBB(HeadMBB)
1336 .addReg(SelectMBBI->getOperand(5).getReg())
1337 .addMBB(IfFalseMBB);
1338 SelectMBBI->eraseFromParent();
1339 }
1340 SelectMBBI = Next;
1341 }
1342
1343 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
1344 return TailMBB;
1345}
1346
1347MachineBasicBlock *
1348RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1349 MachineBasicBlock *BB) const {
1350 switch (MI.getOpcode()) {
1351 default:
1352 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1352)
;
1353 case RISCV::ReadCycleWide:
1354 assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1355, __PRETTY_FUNCTION__))
1355 "ReadCycleWrite is only to be used on riscv32")((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1355, __PRETTY_FUNCTION__))
;
1356 return emitReadCycleWidePseudo(MI, BB);
1357 case RISCV::Select_GPR_Using_CC_GPR:
1358 case RISCV::Select_FPR32_Using_CC_GPR:
1359 case RISCV::Select_FPR64_Using_CC_GPR:
1360 return emitSelectPseudo(MI, BB);
1361 case RISCV::BuildPairF64Pseudo:
1362 return emitBuildPairF64Pseudo(MI, BB);
1363 case RISCV::SplitF64Pseudo:
1364 return emitSplitF64Pseudo(MI, BB);
1365 }
1366}
1367
1368// Calling Convention Implementation.
1369// The expectations for frontend ABI lowering vary from target to target.
1370// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
1371// details, but this is a longer term goal. For now, we simply try to keep the
1372// role of the frontend as simple and well-defined as possible. The rules can
1373// be summarised as:
1374// * Never split up large scalar arguments. We handle them here.
1375// * If a hardfloat calling convention is being used, and the struct may be
1376// passed in a pair of registers (fp+fp, int+fp), and both registers are
1377// available, then pass as two separate arguments. If either the GPRs or FPRs
1378// are exhausted, then pass according to the rule below.
1379// * If a struct could never be passed in registers or directly in a stack
1380// slot (as it is larger than 2*XLEN and the floating point rules don't
1381// apply), then pass it using a pointer with the byval attribute.
1382// * If a struct is less than 2*XLEN, then coerce to either a two-element
1383// word-sized array or a 2*XLEN scalar (depending on alignment).
1384// * The frontend can determine whether a struct is returned by reference or
1385// not based on its size and fields. If it will be returned by reference, the
1386// frontend must modify the prototype so a pointer with the sret annotation is
1387// passed as the first argument. This is not necessary for large scalar
1388// returns.
1389// * Struct return values and varargs should be coerced to structs containing
1390// register-size fields in the same situations they would be for fixed
1391// arguments.
1392
1393static const MCPhysReg ArgGPRs[] = {
1394 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
1395 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
1396};
1397static const MCPhysReg ArgFPR32s[] = {
1398 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
1399 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
1400};
1401static const MCPhysReg ArgFPR64s[] = {
1402 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
1403 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
1404};
1405
1406// Pass a 2*XLEN argument that has been split into two XLEN values through
1407// registers or the stack as necessary.
1408static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
1409 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
1410 MVT ValVT2, MVT LocVT2,
1411 ISD::ArgFlagsTy ArgFlags2) {
1412 unsigned XLenInBytes = XLen / 8;
1413 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1414 // At least one half can be passed via register.
1415 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
1416 VA1.getLocVT(), CCValAssign::Full));
1417 } else {
1418 // Both halves must be passed on the stack, with proper alignment.
1419 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
1420 State.addLoc(
1421 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
1422 State.AllocateStack(XLenInBytes, StackAlign),
1423 VA1.getLocVT(), CCValAssign::Full));
1424 State.addLoc(CCValAssign::getMem(
1425 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
1426 CCValAssign::Full));
1427 return false;
1428 }
1429
1430 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1431 // The second half can also be passed via register.
1432 State.addLoc(
1433 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
1434 } else {
1435 // The second half is passed via the stack, without additional alignment.
1436 State.addLoc(CCValAssign::getMem(
1437 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
1438 CCValAssign::Full));
1439 }
1440
1441 return false;
1442}
1443
1444// Implements the RISC-V calling convention. Returns true upon failure.
1445static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
1446 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
1447 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
1448 bool IsRet, Type *OrigTy) {
1449 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
1450 assert(XLen == 32 || XLen == 64)((XLen == 32 || XLen == 64) ? static_cast<void> (0) : __assert_fail
("XLen == 32 || XLen == 64", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1450, __PRETTY_FUNCTION__))
;
1451 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
1452
1453 // Any return value split in to more than two values can't be returned
1454 // directly.
1455 if (IsRet && ValNo > 1)
1456 return true;
1457
1458 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
1459 // variadic argument, or if no F32 argument registers are available.
1460 bool UseGPRForF32 = true;
1461 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
1462 // variadic argument, or if no F64 argument registers are available.
1463 bool UseGPRForF64 = true;
1464
1465 switch (ABI) {
1466 default:
1467 llvm_unreachable("Unexpected ABI")::llvm::llvm_unreachable_internal("Unexpected ABI", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1467)
;
1468 case RISCVABI::ABI_ILP32:
1469 case RISCVABI::ABI_LP64:
1470 break;
1471 case RISCVABI::ABI_ILP32F:
1472 case RISCVABI::ABI_LP64F:
1473 UseGPRForF32 = !IsFixed;
1474 break;
1475 case RISCVABI::ABI_ILP32D:
1476 case RISCVABI::ABI_LP64D:
1477 UseGPRForF32 = !IsFixed;
1478 UseGPRForF64 = !IsFixed;
1479 break;
1480 }
1481
1482 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
1483 UseGPRForF32 = true;
1484 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
1485 UseGPRForF64 = true;
1486
1487 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
1488 // variables rather than directly checking against the target ABI.
1489
1490 if (UseGPRForF32 && ValVT == MVT::f32) {
1491 LocVT = XLenVT;
1492 LocInfo = CCValAssign::BCvt;
1493 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
1494 LocVT = MVT::i64;
1495 LocInfo = CCValAssign::BCvt;
1496 }
1497
1498 // If this is a variadic argument, the RISC-V calling convention requires
1499 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
1500 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
1501 // be used regardless of whether the original argument was split during
1502 // legalisation or not. The argument will not be passed by registers if the
1503 // original type is larger than 2*XLEN, so the register alignment rule does
1504 // not apply.
1505 unsigned TwoXLenInBytes = (2 * XLen) / 8;
1506 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
1507 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
1508 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
1509 // Skip 'odd' register if necessary.
1510 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
1511 State.AllocateReg(ArgGPRs);
1512 }
1513
1514 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
1515 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
1516 State.getPendingArgFlags();
1517
1518 assert(PendingLocs.size() == PendingArgFlags.size() &&((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1519, __PRETTY_FUNCTION__))
1519 "PendingLocs and PendingArgFlags out of sync")((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1519, __PRETTY_FUNCTION__))
;
1520
1521 // Handle passing f64 on RV32D with a soft float ABI or when floating point
1522 // registers are exhausted.
1523 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
1524 assert(!ArgFlags.isSplit() && PendingLocs.empty() &&((!ArgFlags.isSplit() && PendingLocs.empty() &&
"Can't lower f64 if it is split") ? static_cast<void> (
0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1525, __PRETTY_FUNCTION__))
1525 "Can't lower f64 if it is split")((!ArgFlags.isSplit() && PendingLocs.empty() &&
"Can't lower f64 if it is split") ? static_cast<void> (
0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1525, __PRETTY_FUNCTION__))
;
1526 // Depending on available argument GPRS, f64 may be passed in a pair of
1527 // GPRs, split between a GPR and the stack, or passed completely on the
1528 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
1529 // cases.
1530 Register Reg = State.AllocateReg(ArgGPRs);
1531 LocVT = MVT::i32;
1532 if (!Reg) {
1533 unsigned StackOffset = State.AllocateStack(8, 8);
1534 State.addLoc(
1535 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1536 return false;
1537 }
1538 if (!State.AllocateReg(ArgGPRs))
1539 State.AllocateStack(4, 4);
1540 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1541 return false;
1542 }
1543
1544 // Split arguments might be passed indirectly, so keep track of the pending
1545 // values.
1546 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
1547 LocVT = XLenVT;
1548 LocInfo = CCValAssign::Indirect;
1549 PendingLocs.push_back(
1550 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
1551 PendingArgFlags.push_back(ArgFlags);
1552 if (!ArgFlags.isSplitEnd()) {
1553 return false;
1554 }
1555 }
1556
1557 // If the split argument only had two elements, it should be passed directly
1558 // in registers or on the stack.
1559 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
1560 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == 2 && \"Unexpected PendingLocs.size()\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1560, __PRETTY_FUNCTION__))
;
1561 // Apply the normal calling convention rules to the first half of the
1562 // split argument.
1563 CCValAssign VA = PendingLocs[0];
1564 ISD::ArgFlagsTy AF = PendingArgFlags[0];
1565 PendingLocs.clear();
1566 PendingArgFlags.clear();
1567 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
1568 ArgFlags);
1569 }
1570
1571 // Allocate to a register if possible, or else a stack slot.
1572 Register Reg;
1573 if (ValVT == MVT::f32 && !UseGPRForF32)
1574 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
1575 else if (ValVT == MVT::f64 && !UseGPRForF64)
1576 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
1577 else
1578 Reg = State.AllocateReg(ArgGPRs);
1579 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
1580
1581 // If we reach this point and PendingLocs is non-empty, we must be at the
1582 // end of a split argument that must be passed indirectly.
1583 if (!PendingLocs.empty()) {
1584 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()")((ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"
) ? static_cast<void> (0) : __assert_fail ("ArgFlags.isSplitEnd() && \"Expected ArgFlags.isSplitEnd()\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1584, __PRETTY_FUNCTION__))
;
1585 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() > 2 && \"Unexpected PendingLocs.size()\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1585, __PRETTY_FUNCTION__))
;
1586
1587 for (auto &It : PendingLocs) {
1588 if (Reg)
1589 It.convertToReg(Reg);
1590 else
1591 It.convertToMem(StackOffset);
1592 State.addLoc(It);
1593 }
1594 PendingLocs.clear();
1595 PendingArgFlags.clear();
1596 return false;
1597 }
1598
1599 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&(((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
"Expected an XLenVT at this stage") ? static_cast<void>
(0) : __assert_fail ("(!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && \"Expected an XLenVT at this stage\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1600, __PRETTY_FUNCTION__))
1600 "Expected an XLenVT at this stage")(((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
"Expected an XLenVT at this stage") ? static_cast<void>
(0) : __assert_fail ("(!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && \"Expected an XLenVT at this stage\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1600, __PRETTY_FUNCTION__))
;
1601
1602 if (Reg) {
1603 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1604 return false;
1605 }
1606
1607 // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
1608 if (ValVT == MVT::f32 || ValVT == MVT::f64) {
1609 LocVT = ValVT;
1610 LocInfo = CCValAssign::Full;
1611 }
1612 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1613 return false;
1614}
1615
1616void RISCVTargetLowering::analyzeInputArgs(
1617 MachineFunction &MF, CCState &CCInfo,
1618 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
1619 unsigned NumArgs = Ins.size();
1620 FunctionType *FType = MF.getFunction().getFunctionType();
1621
1622 for (unsigned i = 0; i != NumArgs; ++i) {
1623 MVT ArgVT = Ins[i].VT;
1624 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
1625
1626 Type *ArgTy = nullptr;
1627 if (IsRet)
1628 ArgTy = FType->getReturnType();
1629 else if (Ins[i].isOrigArg())
1630 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
1631
1632 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1633 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1634 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
1635 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "InputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
'\n'; } } while (false)
1636 << EVT(ArgVT).getEVTString() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "InputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
'\n'; } } while (false)
;
1637 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1637)
;
1638 }
1639 }
1640}
1641
1642void RISCVTargetLowering::analyzeOutputArgs(
1643 MachineFunction &MF, CCState &CCInfo,
1644 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
1645 CallLoweringInfo *CLI) const {
1646 unsigned NumArgs = Outs.size();
1647
1648 for (unsigned i = 0; i != NumArgs; i++) {
1649 MVT ArgVT = Outs[i].VT;
1650 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1651 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
1652
1653 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1654 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1655 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
1656 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "OutputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
"\n"; } } while (false)
1657 << EVT(ArgVT).getEVTString() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "OutputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
"\n"; } } while (false)
;
1658 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1658)
;
1659 }
1660 }
1661}
1662
1663// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
1664// values.
1665static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
1666 const CCValAssign &VA, const SDLoc &DL) {
1667 switch (VA.getLocInfo()) {
1668 default:
1669 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1669)
;
1670 case CCValAssign::Full:
1671 break;
1672 case CCValAssign::BCvt:
1673 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1674 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
1675 break;
1676 }
1677 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1678 break;
1679 }
1680 return Val;
1681}
1682
1683// The caller is responsible for loading the full value if the argument is
1684// passed with CCValAssign::Indirect.
1685static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
1686 const CCValAssign &VA, const SDLoc &DL) {
1687 MachineFunction &MF = DAG.getMachineFunction();
1688 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1689 EVT LocVT = VA.getLocVT();
1690 SDValue Val;
1691 const TargetRegisterClass *RC;
1692
1693 switch (LocVT.getSimpleVT().SimpleTy) {
1694 default:
1695 llvm_unreachable("Unexpected register type")::llvm::llvm_unreachable_internal("Unexpected register type",
"/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1695)
;
1696 case MVT::i32:
1697 case MVT::i64:
1698 RC = &RISCV::GPRRegClass;
1699 break;
1700 case MVT::f32:
1701 RC = &RISCV::FPR32RegClass;
1702 break;
1703 case MVT::f64:
1704 RC = &RISCV::FPR64RegClass;
1705 break;
1706 }
1707
1708 Register VReg = RegInfo.createVirtualRegister(RC);
1709 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1710 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1711
1712 if (VA.getLocInfo() == CCValAssign::Indirect)
1713 return Val;
1714
1715 return convertLocVTToValVT(DAG, Val, VA, DL);
1716}
1717
1718static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
1719 const CCValAssign &VA, const SDLoc &DL) {
1720 EVT LocVT = VA.getLocVT();
1721
1722 switch (VA.getLocInfo()) {
1723 default:
1724 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1724)
;
1725 case CCValAssign::Full:
1726 break;
1727 case CCValAssign::BCvt:
1728 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1729 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
1730 break;
1731 }
1732 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1733 break;
1734 }
1735 return Val;
1736}
1737
1738// The caller is responsible for loading the full value if the argument is
1739// passed with CCValAssign::Indirect.
1740static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
1741 const CCValAssign &VA, const SDLoc &DL) {
1742 MachineFunction &MF = DAG.getMachineFunction();
1743 MachineFrameInfo &MFI = MF.getFrameInfo();
1744 EVT LocVT = VA.getLocVT();
1745 EVT ValVT = VA.getValVT();
1746 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
1747 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
1748 VA.getLocMemOffset(), /*Immutable=*/true);
1749 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1750 SDValue Val;
1751
1752 ISD::LoadExtType ExtType;
1753 switch (VA.getLocInfo()) {
1754 default:
1755 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1755)
;
1756 case CCValAssign::Full:
1757 case CCValAssign::Indirect:
1758 case CCValAssign::BCvt:
1759 ExtType = ISD::NON_EXTLOAD;
1760 break;
1761 }
1762 Val = DAG.getExtLoad(
1763 ExtType, DL, LocVT, Chain, FIN,
1764 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
1765 return Val;
1766}
1767
1768static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
1769 const CCValAssign &VA, const SDLoc &DL) {
1770 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::
f64 && "Unexpected VA") ? static_cast<void> (0)
: __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1771, __PRETTY_FUNCTION__))
1771 "Unexpected VA")((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::
f64 && "Unexpected VA") ? static_cast<void> (0)
: __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1771, __PRETTY_FUNCTION__))
;
1772 MachineFunction &MF = DAG.getMachineFunction();
1773 MachineFrameInfo &MFI = MF.getFrameInfo();
1774 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1775
1776 if (VA.isMemLoc()) {
1777 // f64 is passed on the stack.
1778 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
1779 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1780 return DAG.getLoad(MVT::f64, DL, Chain, FIN,
1781 MachinePointerInfo::getFixedStack(MF, FI));
1782 }
1783
1784 assert(VA.isRegLoc() && "Expected register VA assignment")((VA.isRegLoc() && "Expected register VA assignment")
? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected register VA assignment\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1784, __PRETTY_FUNCTION__))
;
1785
1786 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1787 RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
1788 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
1789 SDValue Hi;
1790 if (VA.getLocReg() == RISCV::X17) {
1791 // Second half of f64 is passed on the stack.
1792 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
1793 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1794 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1795 MachinePointerInfo::getFixedStack(MF, FI));
1796 } else {
1797 // Second half of f64 is passed in another GPR.
1798 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1799 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1800 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1801 }
1802 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1803}
1804
1805// FastCC has less than 1% performance improvement for some particular
1806// benchmark. But theoretically, it may has benenfit for some cases.
1807static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
1808 CCValAssign::LocInfo LocInfo,
1809 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1810
1811 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
1812 // X5 and X6 might be used for save-restore libcall.
1813 static const MCPhysReg GPRList[] = {
1814 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
1815 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
1816 RISCV::X29, RISCV::X30, RISCV::X31};
1817 if (unsigned Reg = State.AllocateReg(GPRList)) {
1818 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1819 return false;
1820 }
1821 }
1822
1823 if (LocVT == MVT::f32) {
1824 static const MCPhysReg FPR32List[] = {
1825 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
1826 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
1827 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
1828 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
1829 if (unsigned Reg = State.AllocateReg(FPR32List)) {
1830 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1831 return false;
1832 }
1833 }
1834
1835 if (LocVT == MVT::f64) {
1836 static const MCPhysReg FPR64List[] = {
1837 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
1838 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
1839 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
1840 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
1841 if (unsigned Reg = State.AllocateReg(FPR64List)) {
1842 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1843 return false;
1844 }
1845 }
1846
1847 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
1848 unsigned Offset4 = State.AllocateStack(4, 4);
1849 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
1850 return false;
1851 }
1852
1853 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
1854 unsigned Offset5 = State.AllocateStack(8, 8);
1855 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
1856 return false;
1857 }
1858
1859 return true; // CC didn't match.
1860}
1861
1862// Transform physical registers into virtual registers.
1863SDValue RISCVTargetLowering::LowerFormalArguments(
1864 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1865 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1866 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1867
1868 switch (CallConv) {
1869 default:
1870 report_fatal_error("Unsupported calling convention");
1871 case CallingConv::C:
1872 case CallingConv::Fast:
1873 break;
1874 }
1875
1876 MachineFunction &MF = DAG.getMachineFunction();
1877
1878 const Function &Func = MF.getFunction();
1879 if (Func.hasFnAttribute("interrupt")) {
1880 if (!Func.arg_empty())
1881 report_fatal_error(
1882 "Functions with the interrupt attribute cannot have arguments!");
1883
1884 StringRef Kind =
1885 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1886
1887 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1888 report_fatal_error(
1889 "Function interrupt attribute argument not supported!");
1890 }
1891
1892 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1893 MVT XLenVT = Subtarget.getXLenVT();
1894 unsigned XLenInBytes = Subtarget.getXLen() / 8;
1895 // Used with vargs to acumulate store chains.
1896 std::vector<SDValue> OutChains;
1897
1898 // Assign locations to all of the incoming arguments.
1899 SmallVector<CCValAssign, 16> ArgLocs;
1900 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1901
1902 if (CallConv == CallingConv::Fast)
1903 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
1904 else
1905 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
1906
1907 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1908 CCValAssign &VA = ArgLocs[i];
1909 SDValue ArgValue;
1910 // Passing f64 on RV32D with a soft float ABI must be handled as a special
1911 // case.
1912 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
1913 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
1914 else if (VA.isRegLoc())
1915 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
1916 else
1917 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
1918
1919 if (VA.getLocInfo() == CCValAssign::Indirect) {
1920 // If the original argument was split and passed by reference (e.g. i128
1921 // on RV32), we need to load all parts of it here (using the same
1922 // address).
1923 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1924 MachinePointerInfo()));
1925 unsigned ArgIndex = Ins[i].OrigArgIndex;
1926 assert(Ins[i].PartOffset == 0)((Ins[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Ins[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1926, __PRETTY_FUNCTION__))
;
1927 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
1928 CCValAssign &PartVA = ArgLocs[i + 1];
1929 unsigned PartOffset = Ins[i + 1].PartOffset;
1930 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1931 DAG.getIntPtrConstant(PartOffset, DL));
1932 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1933 MachinePointerInfo()));
1934 ++i;
1935 }
1936 continue;
1937 }
1938 InVals.push_back(ArgValue);
1939 }
1940
1941 if (IsVarArg) {
1942 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
1943 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
1944 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1945 MachineFrameInfo &MFI = MF.getFrameInfo();
1946 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1947 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1948
1949 // Offset of the first variable argument from stack pointer, and size of
1950 // the vararg save area. For now, the varargs save area is either zero or
1951 // large enough to hold a0-a7.
1952 int VaArgOffset, VarArgsSaveSize;
1953
1954 // If all registers are allocated, then all varargs must be passed on the
1955 // stack and we don't need to save any argregs.
1956 if (ArgRegs.size() == Idx) {
1957 VaArgOffset = CCInfo.getNextStackOffset();
1958 VarArgsSaveSize = 0;
1959 } else {
1960 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
1961 VaArgOffset = -VarArgsSaveSize;
1962 }
1963
1964 // Record the frame index of the first variable argument
1965 // which is a value necessary to VASTART.
1966 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1967 RVFI->setVarArgsFrameIndex(FI);
1968
1969 // If saving an odd number of registers then create an extra stack slot to
1970 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
1971 // offsets to even-numbered registered remain 2*XLEN-aligned.
1972 if (Idx % 2) {
1973 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
1974 VarArgsSaveSize += XLenInBytes;
1975 }
1976
1977 // Copy the integer registers that may have been used for passing varargs
1978 // to the vararg save area.
1979 for (unsigned I = Idx; I < ArgRegs.size();
1980 ++I, VaArgOffset += XLenInBytes) {
1981 const Register Reg = RegInfo.createVirtualRegister(RC);
1982 RegInfo.addLiveIn(ArgRegs[I], Reg);
1983 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
1984 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1985 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1986 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
1987 MachinePointerInfo::getFixedStack(MF, FI));
1988 cast<StoreSDNode>(Store.getNode())
1989 ->getMemOperand()
1990 ->setValue((Value *)nullptr);
1991 OutChains.push_back(Store);
1992 }
1993 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
1994 }
1995
1996 // All stores are grouped in one node to allow the matching between
1997 // the size of Ins and InVals. This only happens for vararg functions.
1998 if (!OutChains.empty()) {
1999 OutChains.push_back(Chain);
2000 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2001 }
2002
2003 return Chain;
2004}
2005
2006/// isEligibleForTailCallOptimization - Check whether the call is eligible
2007/// for tail call optimization.
2008/// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
2009bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2010 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2011 const SmallVector<CCValAssign, 16> &ArgLocs) const {
2012
2013 auto &Callee = CLI.Callee;
2014 auto CalleeCC = CLI.CallConv;
2015 auto &Outs = CLI.Outs;
2016 auto &Caller = MF.getFunction();
2017 auto CallerCC = Caller.getCallingConv();
2018
2019 // Exception-handling functions need a special set of instructions to
2020 // indicate a return to the hardware. Tail-calling another function would
2021 // probably break this.
2022 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2023 // should be expanded as new function attributes are introduced.
2024 if (Caller.hasFnAttribute("interrupt"))
2025 return false;
2026
2027 // Do not tail call opt if the stack is used to pass parameters.
2028 if (CCInfo.getNextStackOffset() != 0)
2029 return false;
2030
2031 // Do not tail call opt if any parameters need to be passed indirectly.
2032 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2033 // passed indirectly. So the address of the value will be passed in a
2034 // register, or if not available, then the address is put on the stack. In
2035 // order to pass indirectly, space on the stack often needs to be allocated
2036 // in order to store the value. In this case the CCInfo.getNextStackOffset()
2037 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
2038 // are passed CCValAssign::Indirect.
2039 for (auto &VA : ArgLocs)
2040 if (VA.getLocInfo() == CCValAssign::Indirect)
2041 return false;
2042
2043 // Do not tail call opt if either caller or callee uses struct return
2044 // semantics.
2045 auto IsCallerStructRet = Caller.hasStructRetAttr();
2046 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
2047 if (IsCallerStructRet || IsCalleeStructRet)
2048 return false;
2049
2050 // Externally-defined functions with weak linkage should not be
2051 // tail-called. The behaviour of branch instructions in this situation (as
2052 // used for tail calls) is implementation-defined, so we cannot rely on the
2053 // linker replacing the tail call with a return.
2054 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2055 const GlobalValue *GV = G->getGlobal();
2056 if (GV->hasExternalWeakLinkage())
2057 return false;
2058 }
2059
2060 // The callee has to preserve all registers the caller needs to preserve.
2061 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2062 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2063 if (CalleeCC != CallerCC) {
2064 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2065 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2066 return false;
2067 }
2068
2069 // Byval parameters hand the function a pointer directly into the stack area
2070 // we want to reuse during a tail call. Working around this *is* possible
2071 // but less efficient and uglier in LowerCall.
2072 for (auto &Arg : Outs)
2073 if (Arg.Flags.isByVal())
2074 return false;
2075
2076 return true;
2077}
2078
2079// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
2080// and output parameter nodes.
2081SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
2082 SmallVectorImpl<SDValue> &InVals) const {
2083 SelectionDAG &DAG = CLI.DAG;
2084 SDLoc &DL = CLI.DL;
2085 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2086 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2087 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2088 SDValue Chain = CLI.Chain;
2089 SDValue Callee = CLI.Callee;
2090 bool &IsTailCall = CLI.IsTailCall;
2091 CallingConv::ID CallConv = CLI.CallConv;
2092 bool IsVarArg = CLI.IsVarArg;
2093 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2094 MVT XLenVT = Subtarget.getXLenVT();
2095
2096 MachineFunction &MF = DAG.getMachineFunction();
2097
2098 // Analyze the operands of the call, assigning locations to each operand.
2099 SmallVector<CCValAssign, 16> ArgLocs;
2100 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2101
2102 if (CallConv == CallingConv::Fast)
2103 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
2104 else
2105 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
2106
2107 // Check if it's really possible to do a tail call.
2108 if (IsTailCall)
2109 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2110
2111 if (IsTailCall)
2112 ++NumTailCalls;
2113 else if (CLI.CS && CLI.CS.isMustTailCall())
2114 report_fatal_error("failed to perform tail call elimination on a call "
2115 "site marked musttail");
2116
2117 // Get a count of how many bytes are to be pushed on the stack.
2118 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
2119
2120 // Create local copies for byval args
2121 SmallVector<SDValue, 8> ByValArgs;
2122 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2123 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2124 if (!Flags.isByVal())
2125 continue;
2126
2127 SDValue Arg = OutVals[i];
2128 unsigned Size = Flags.getByValSize();
2129 unsigned Align = Flags.getByValAlign();
2130
2131 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
2132 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2133 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
2134
2135 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
2136 /*IsVolatile=*/false,
2137 /*AlwaysInline=*/false,
2138 IsTailCall, MachinePointerInfo(),
2139 MachinePointerInfo());
2140 ByValArgs.push_back(FIPtr);
2141 }
2142
2143 if (!IsTailCall)
2144 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
2145
2146 // Copy argument values to their designated locations.
2147 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
2148 SmallVector<SDValue, 8> MemOpChains;
2149 SDValue StackPtr;
2150 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
2151 CCValAssign &VA = ArgLocs[i];
2152 SDValue ArgValue = OutVals[i];
2153 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2154
2155 // Handle passing f64 on RV32D with a soft float ABI as a special case.
2156 bool IsF64OnRV32DSoftABI =
2157 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
2158 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
2159 SDValue SplitF64 = DAG.getNode(
2160 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
2161 SDValue Lo = SplitF64.getValue(0);
2162 SDValue Hi = SplitF64.getValue(1);
2163
2164 Register RegLo = VA.getLocReg();
2165 RegsToPass.push_back(std::make_pair(RegLo, Lo));
2166
2167 if (RegLo == RISCV::X17) {
2168 // Second half of f64 is passed on the stack.
2169 // Work out the address of the stack slot.
2170 if (!StackPtr.getNode())
2171 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2172 // Emit the store.
2173 MemOpChains.push_back(
2174 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
2175 } else {
2176 // Second half of f64 is passed in another GPR.
2177 assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ?
static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2177, __PRETTY_FUNCTION__))
;
2178 Register RegHigh = RegLo + 1;
2179 RegsToPass.push_back(std::make_pair(RegHigh, Hi));
2180 }
2181 continue;
2182 }
2183
2184 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
2185 // as any other MemLoc.
2186
2187 // Promote the value if needed.
2188 // For now, only handle fully promoted and indirect arguments.
2189 if (VA.getLocInfo() == CCValAssign::Indirect) {
2190 // Store the argument in a stack slot and pass its address.
2191 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
2192 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2193 MemOpChains.push_back(
2194 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
2195 MachinePointerInfo::getFixedStack(MF, FI)));
2196 // If the original argument was split (e.g. i128), we need
2197 // to store all parts of it here (and pass just one address).
2198 unsigned ArgIndex = Outs[i].OrigArgIndex;
2199 assert(Outs[i].PartOffset == 0)((Outs[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Outs[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2199, __PRETTY_FUNCTION__))
;
2200 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2201 SDValue PartValue = OutVals[i + 1];
2202 unsigned PartOffset = Outs[i + 1].PartOffset;
2203 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
2204 DAG.getIntPtrConstant(PartOffset, DL));
2205 MemOpChains.push_back(
2206 DAG.getStore(Chain, DL, PartValue, Address,
2207 MachinePointerInfo::getFixedStack(MF, FI)));
2208 ++i;
2209 }
2210 ArgValue = SpillSlot;
2211 } else {
2212 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
2213 }
2214
2215 // Use local copy if it is a byval arg.
2216 if (Flags.isByVal())
2217 ArgValue = ByValArgs[j++];
2218
2219 if (VA.isRegLoc()) {
2220 // Queue up the argument copies and emit them at the end.
2221 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
2222 } else {
2223 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2223, __PRETTY_FUNCTION__))
;
2224 assert(!IsTailCall && "Tail call not allowed if stack is used "((!IsTailCall && "Tail call not allowed if stack is used "
"for passing parameters") ? static_cast<void> (0) : __assert_fail
("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2225, __PRETTY_FUNCTION__))
2225 "for passing parameters")((!IsTailCall && "Tail call not allowed if stack is used "
"for passing parameters") ? static_cast<void> (0) : __assert_fail
("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2225, __PRETTY_FUNCTION__))
;
2226
2227 // Work out the address of the stack slot.
2228 if (!StackPtr.getNode())
2229 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2230 SDValue Address =
2231 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
2232 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
2233
2234 // Emit the store.
2235 MemOpChains.push_back(
2236 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
2237 }
2238 }
2239
2240 // Join the stores, which are independent of one another.
2241 if (!MemOpChains.empty())
2242 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2243
2244 SDValue Glue;
2245
2246 // Build a sequence of copy-to-reg nodes, chained and glued together.
2247 for (auto &Reg : RegsToPass) {
2248 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
2249 Glue = Chain.getValue(1);
2250 }
2251
2252 // Validate that none of the argument registers have been marked as
2253 // reserved, if so report an error. Do the same for the return address if this
2254 // is not a tailcall.
2255 validateCCReservedRegs(RegsToPass, MF);
2256 if (!IsTailCall &&
2257 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
2258 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2259 MF.getFunction(),
2260 "Return address register required, but has been reserved."});
2261
2262 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
2263 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
2264 // split it and then direct call can be matched by PseudoCALL.
2265 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
2266 const GlobalValue *GV = S->getGlobal();
2267
2268 unsigned OpFlags = RISCVII::MO_CALL;
2269 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
2270 OpFlags = RISCVII::MO_PLT;
2271
2272 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2273 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2274 unsigned OpFlags = RISCVII::MO_CALL;
2275
2276 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
2277 nullptr))
2278 OpFlags = RISCVII::MO_PLT;
2279
2280 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
2281 }
2282
2283 // The first call operand is the chain and the second is the target address.
2284 SmallVector<SDValue, 8> Ops;
2285 Ops.push_back(Chain);
2286 Ops.push_back(Callee);
2287
2288 // Add argument registers to the end of the list so that they are
2289 // known live into the call.
2290 for (auto &Reg : RegsToPass)
2291 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2292
2293 if (!IsTailCall) {
2294 // Add a register mask operand representing the call-preserved registers.
2295 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2296 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2297 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2297, __PRETTY_FUNCTION__))
;
2298 Ops.push_back(DAG.getRegisterMask(Mask));
2299 }
2300
2301 // Glue the call to the argument copies, if any.
2302 if (Glue.getNode())
2303 Ops.push_back(Glue);
2304
2305 // Emit the call.
2306 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2307
2308 if (IsTailCall) {
2309 MF.getFrameInfo().setHasTailCall();
2310 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
2311 }
2312
2313 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
2314 Glue = Chain.getValue(1);
2315
2316 // Mark the end of the call, which is glued to the call itself.
2317 Chain = DAG.getCALLSEQ_END(Chain,
2318 DAG.getConstant(NumBytes, DL, PtrVT, true),
2319 DAG.getConstant(0, DL, PtrVT, true),
2320 Glue, DL);
2321 Glue = Chain.getValue(1);
2322
2323 // Assign locations to each value returned by this call.
2324 SmallVector<CCValAssign, 16> RVLocs;
2325 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
2326 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
2327
2328 // Copy all of the result registers out of their specified physreg.
2329 for (auto &VA : RVLocs) {
2330 // Copy the value out
2331 SDValue RetValue =
2332 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
2333 // Glue the RetValue to the end of the call sequence
2334 Chain = RetValue.getValue(1);
2335 Glue = RetValue.getValue(2);
2336
2337 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2338 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment")((VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"
) ? static_cast<void> (0) : __assert_fail ("VA.getLocReg() == ArgGPRs[0] && \"Unexpected reg assignment\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2338, __PRETTY_FUNCTION__))
;
2339 SDValue RetValue2 =
2340 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
2341 Chain = RetValue2.getValue(1);
2342 Glue = RetValue2.getValue(2);
2343 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
2344 RetValue2);
2345 }
2346
2347 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
2348
2349 InVals.push_back(RetValue);
2350 }
2351
2352 return Chain;
2353}
2354
2355bool RISCVTargetLowering::CanLowerReturn(
2356 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
2357 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2358 SmallVector<CCValAssign, 16> RVLocs;
2359 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2360 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2361 MVT VT = Outs[i].VT;
2362 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2363 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2364 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
2365 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
2366 return false;
2367 }
2368 return true;
2369}
2370
2371SDValue
2372RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2373 bool IsVarArg,
2374 const SmallVectorImpl<ISD::OutputArg> &Outs,
2375 const SmallVectorImpl<SDValue> &OutVals,
2376 const SDLoc &DL, SelectionDAG &DAG) const {
2377 const MachineFunction &MF = DAG.getMachineFunction();
2378 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2379
2380 // Stores the assignment of the return value to a location.
2381 SmallVector<CCValAssign, 16> RVLocs;
2382
2383 // Info about the registers and stack slot.
2384 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2385 *DAG.getContext());
2386
2387 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
2388 nullptr);
2389
2390 SDValue Glue;
2391 SmallVector<SDValue, 4> RetOps(1, Chain);
2392
2393 // Copy the result values into the output registers.
2394 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
2395 SDValue Val = OutVals[i];
2396 CCValAssign &VA = RVLocs[i];
2397 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2397, __PRETTY_FUNCTION__))
;
2398
2399 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2400 // Handle returning f64 on RV32D with a soft float ABI.
2401 assert(VA.isRegLoc() && "Expected return via registers")((VA.isRegLoc() && "Expected return via registers") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected return via registers\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2401, __PRETTY_FUNCTION__))
;
2402 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
2403 DAG.getVTList(MVT::i32, MVT::i32), Val);
2404 SDValue Lo = SplitF64.getValue(0);
2405 SDValue Hi = SplitF64.getValue(1);
2406 Register RegLo = VA.getLocReg();
2407 assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ?
static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2407, __PRETTY_FUNCTION__))
;
2408 Register RegHi = RegLo + 1;
2409
2410 if (STI.isRegisterReservedByUser(RegLo) ||
2411 STI.isRegisterReservedByUser(RegHi))
2412 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2413 MF.getFunction(),
2414 "Return value register required, but has been reserved."});
2415
2416 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
2417 Glue = Chain.getValue(1);
2418 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
2419 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
2420 Glue = Chain.getValue(1);
2421 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
2422 } else {
2423 // Handle a 'normal' return.
2424 Val = convertValVTToLocVT(DAG, Val, VA, DL);
2425 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
2426
2427 if (STI.isRegisterReservedByUser(VA.getLocReg()))
2428 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2429 MF.getFunction(),
2430 "Return value register required, but has been reserved."});
2431
2432 // Guarantee that all emitted copies are stuck together.
2433 Glue = Chain.getValue(1);
2434 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2435 }
2436 }
2437
2438 RetOps[0] = Chain; // Update chain.
2439
2440 // Add the glue node if we have it.
2441 if (Glue.getNode()) {
2442 RetOps.push_back(Glue);
2443 }
2444
2445 // Interrupt service routines use different return instructions.
2446 const Function &Func = DAG.getMachineFunction().getFunction();
2447 if (Func.hasFnAttribute("interrupt")) {
2448 if (!Func.getReturnType()->isVoidTy())
2449 report_fatal_error(
2450 "Functions with the interrupt attribute must have void return type!");
2451
2452 MachineFunction &MF = DAG.getMachineFunction();
2453 StringRef Kind =
2454 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2455
2456 unsigned RetOpc;
2457 if (Kind == "user")
2458 RetOpc = RISCVISD::URET_FLAG;
2459 else if (Kind == "supervisor")
2460 RetOpc = RISCVISD::SRET_FLAG;
2461 else
2462 RetOpc = RISCVISD::MRET_FLAG;
2463
2464 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
2465 }
2466
2467 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
2468}
2469
2470void RISCVTargetLowering::validateCCReservedRegs(
2471 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
2472 MachineFunction &MF) const {
2473 const Function &F = MF.getFunction();
2474 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2475
2476 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) {
2477 return STI.isRegisterReservedByUser(Reg.first);
2478 }))
2479 F.getContext().diagnose(DiagnosticInfoUnsupported{
2480 F, "Argument register required, but has been reserved."});
2481}
2482
2483const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
2484 switch ((RISCVISD::NodeType)Opcode) {
2485 case RISCVISD::FIRST_NUMBER:
2486 break;
2487 case RISCVISD::RET_FLAG:
2488 return "RISCVISD::RET_FLAG";
2489 case RISCVISD::URET_FLAG:
2490 return "RISCVISD::URET_FLAG";
2491 case RISCVISD::SRET_FLAG:
2492 return "RISCVISD::SRET_FLAG";
2493 case RISCVISD::MRET_FLAG:
2494 return "RISCVISD::MRET_FLAG";
2495 case RISCVISD::CALL:
2496 return "RISCVISD::CALL";
2497 case RISCVISD::SELECT_CC:
2498 return "RISCVISD::SELECT_CC";
2499 case RISCVISD::BuildPairF64:
2500 return "RISCVISD::BuildPairF64";
2501 case RISCVISD::SplitF64:
2502 return "RISCVISD::SplitF64";
2503 case RISCVISD::TAIL:
2504 return "RISCVISD::TAIL";
2505 case RISCVISD::SLLW:
2506 return "RISCVISD::SLLW";
2507 case RISCVISD::SRAW:
2508 return "RISCVISD::SRAW";
2509 case RISCVISD::SRLW:
2510 return "RISCVISD::SRLW";
2511 case RISCVISD::DIVW:
2512 return "RISCVISD::DIVW";
2513 case RISCVISD::DIVUW:
2514 return "RISCVISD::DIVUW";
2515 case RISCVISD::REMUW:
2516 return "RISCVISD::REMUW";
2517 case RISCVISD::FMV_W_X_RV64:
2518 return "RISCVISD::FMV_W_X_RV64";
2519 case RISCVISD::FMV_X_ANYEXTW_RV64:
2520 return "RISCVISD::FMV_X_ANYEXTW_RV64";
2521 case RISCVISD::READ_CYCLE_WIDE:
2522 return "RISCVISD::READ_CYCLE_WIDE";
2523 }
2524 return nullptr;
2525}
2526
2527/// getConstraintType - Given a constraint letter, return the type of
2528/// constraint it is for this target.
2529RISCVTargetLowering::ConstraintType
2530RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
2531 if (Constraint.size() == 1) {
2532 switch (Constraint[0]) {
2533 default:
2534 break;
2535 case 'f':
2536 return C_RegisterClass;
2537 case 'I':
2538 case 'J':
2539 case 'K':
2540 return C_Immediate;
2541 case 'A':
2542 return C_Memory;
2543 }
2544 }
2545 return TargetLowering::getConstraintType(Constraint);
2546}
2547
2548std::pair<unsigned, const TargetRegisterClass *>
2549RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2550 StringRef Constraint,
2551 MVT VT) const {
2552 // First, see if this is a constraint that directly corresponds to a
2553 // RISCV register class.
2554 if (Constraint.size() == 1) {
2555 switch (Constraint[0]) {
2556 case 'r':
2557 return std::make_pair(0U, &RISCV::GPRRegClass);
2558 case 'f':
2559 if (Subtarget.hasStdExtF() && VT == MVT::f32)
2560 return std::make_pair(0U, &RISCV::FPR32RegClass);
2561 if (Subtarget.hasStdExtD() && VT == MVT::f64)
2562 return std::make_pair(0U, &RISCV::FPR64RegClass);
2563 break;
2564 default:
2565 break;
2566 }
2567 }
2568
2569 // Clang will correctly decode the usage of register name aliases into their
2570 // official names. However, other frontends like `rustc` do not. This allows
2571 // users of these frontends to use the ABI names for registers in LLVM-style
2572 // register constraints.
2573 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower())
2574 .Case("{zero}", RISCV::X0)
2575 .Case("{ra}", RISCV::X1)
2576 .Case("{sp}", RISCV::X2)
2577 .Case("{gp}", RISCV::X3)
2578 .Case("{tp}", RISCV::X4)
2579 .Case("{t0}", RISCV::X5)
2580 .Case("{t1}", RISCV::X6)
2581 .Case("{t2}", RISCV::X7)
2582 .Cases("{s0}", "{fp}", RISCV::X8)
2583 .Case("{s1}", RISCV::X9)
2584 .Case("{a0}", RISCV::X10)
2585 .Case("{a1}", RISCV::X11)
2586 .Case("{a2}", RISCV::X12)
2587 .Case("{a3}", RISCV::X13)
2588 .Case("{a4}", RISCV::X14)
2589 .Case("{a5}", RISCV::X15)
2590 .Case("{a6}", RISCV::X16)
2591 .Case("{a7}", RISCV::X17)
2592 .Case("{s2}", RISCV::X18)
2593 .Case("{s3}", RISCV::X19)
2594 .Case("{s4}", RISCV::X20)
2595 .Case("{s5}", RISCV::X21)
2596 .Case("{s6}", RISCV::X22)
2597 .Case("{s7}", RISCV::X23)
2598 .Case("{s8}", RISCV::X24)
2599 .Case("{s9}", RISCV::X25)
2600 .Case("{s10}", RISCV::X26)
2601 .Case("{s11}", RISCV::X27)
2602 .Case("{t3}", RISCV::X28)
2603 .Case("{t4}", RISCV::X29)
2604 .Case("{t5}", RISCV::X30)
2605 .Case("{t6}", RISCV::X31)
2606 .Default(RISCV::NoRegister);
2607 if (XRegFromAlias != RISCV::NoRegister)
2608 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
2609
2610 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
2611 // TableGen record rather than the AsmName to choose registers for InlineAsm
2612 // constraints, plus we want to match those names to the widest floating point
2613 // register type available, manually select floating point registers here.
2614 //
2615 // The second case is the ABI name of the register, so that frontends can also
2616 // use the ABI names in register constraint lists.
2617 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) {
2618 std::pair<Register, Register> FReg =
2619 StringSwitch<std::pair<Register, Register>>(Constraint.lower())
2620 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D})
2621 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D})
2622 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D})
2623 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D})
2624 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D})
2625 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D})
2626 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D})
2627 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D})
2628 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D})
2629 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D})
2630 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D})
2631 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D})
2632 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D})
2633 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D})
2634 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D})
2635 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D})
2636 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D})
2637 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D})
2638 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D})
2639 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D})
2640 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D})
2641 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D})
2642 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D})
2643 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D})
2644 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D})
2645 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D})
2646 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D})
2647 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D})
2648 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D})
2649 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D})
2650 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D})
2651 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D})
2652 .Default({RISCV::NoRegister, RISCV::NoRegister});
2653 if (FReg.first != RISCV::NoRegister)
2654 return Subtarget.hasStdExtD()
2655 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass)
2656 : std::make_pair(FReg.first, &RISCV::FPR32RegClass);
2657 }
2658
2659 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2660}
2661
2662unsigned
2663RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2664 // Currently only support length 1 constraints.
2665 if (ConstraintCode.size() == 1) {
2666 switch (ConstraintCode[0]) {
2667 case 'A':
2668 return InlineAsm::Constraint_A;
2669 default:
2670 break;
2671 }
2672 }
2673
2674 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2675}
2676
2677void RISCVTargetLowering::LowerAsmOperandForConstraint(
2678 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2679 SelectionDAG &DAG) const {
2680 // Currently only support length 1 constraints.
2681 if (Constraint.length() == 1) {
2682 switch (Constraint[0]) {
2683 case 'I':
2684 // Validate & create a 12-bit signed immediate operand.
2685 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2686 uint64_t CVal = C->getSExtValue();
2687 if (isInt<12>(CVal))
2688 Ops.push_back(
2689 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2690 }
2691 return;
2692 case 'J':
2693 // Validate & create an integer zero operand.
2694 if (auto *C = dyn_cast<ConstantSDNode>(Op))
2695 if (C->getZExtValue() == 0)
2696 Ops.push_back(
2697 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
2698 return;
2699 case 'K':
2700 // Validate & create a 5-bit unsigned immediate operand.
2701 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2702 uint64_t CVal = C->getZExtValue();
2703 if (isUInt<5>(CVal))
2704 Ops.push_back(
2705 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2706 }
2707 return;
2708 default:
2709 break;
2710 }
2711 }
2712 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2713}
2714
2715Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
2716 Instruction *Inst,
2717 AtomicOrdering Ord) const {
2718 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
2719 return Builder.CreateFence(Ord);
2720 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
2721 return Builder.CreateFence(AtomicOrdering::Release);
2722 return nullptr;
2723}
2724
2725Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
2726 Instruction *Inst,
2727 AtomicOrdering Ord) const {
2728 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
2729 return Builder.CreateFence(AtomicOrdering::Acquire);
2730 return nullptr;
2731}
2732
2733TargetLowering::AtomicExpansionKind
2734RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2735 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
2736 // point operations can't be used in an lr/sc sequence without breaking the
2737 // forward-progress guarantee.
2738 if (AI->isFloatingPointOperation())
2739 return AtomicExpansionKind::CmpXChg;
2740
2741 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
2742 if (Size == 8 || Size == 16)
2743 return AtomicExpansionKind::MaskedIntrinsic;
2744 return AtomicExpansionKind::None;
2745}
2746
2747static Intrinsic::ID
2748getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
2749 if (XLen == 32) {
2750 switch (BinOp) {
2751 default:
2752 llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2752)
;
2753 case AtomicRMWInst::Xchg:
2754 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
2755 case AtomicRMWInst::Add:
2756 return Intrinsic::riscv_masked_atomicrmw_add_i32;
2757 case AtomicRMWInst::Sub:
2758 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
2759 case AtomicRMWInst::Nand:
2760 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
2761 case AtomicRMWInst::Max:
2762 return Intrinsic::riscv_masked_atomicrmw_max_i32;
2763 case AtomicRMWInst::Min:
2764 return Intrinsic::riscv_masked_atomicrmw_min_i32;
2765 case AtomicRMWInst::UMax:
2766 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
2767 case AtomicRMWInst::UMin:
2768 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
2769 }
2770 }
2771
2772 if (XLen == 64) {
2773 switch (BinOp) {
2774 default:
2775 llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2775)
;
2776 case AtomicRMWInst::Xchg:
2777 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
2778 case AtomicRMWInst::Add:
2779 return Intrinsic::riscv_masked_atomicrmw_add_i64;
2780 case AtomicRMWInst::Sub:
2781 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
2782 case AtomicRMWInst::Nand:
2783 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
2784 case AtomicRMWInst::Max:
2785 return Intrinsic::riscv_masked_atomicrmw_max_i64;
2786 case AtomicRMWInst::Min:
2787 return Intrinsic::riscv_masked_atomicrmw_min_i64;
2788 case AtomicRMWInst::UMax:
2789 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
2790 case AtomicRMWInst::UMin:
2791 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
2792 }
2793 }
2794
2795 llvm_unreachable("Unexpected XLen\n")::llvm::llvm_unreachable_internal("Unexpected XLen\n", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2795)
;
2796}
2797
2798Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2799 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
2800 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
2801 unsigned XLen = Subtarget.getXLen();
2802 Value *Ordering =
2803 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
2804 Type *Tys[] = {AlignedAddr->getType()};
2805 Function *LrwOpScwLoop = Intrinsic::getDeclaration(
2806 AI->getModule(),
2807 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
2808
2809 if (XLen == 64) {
2810 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
2811 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2812 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
2813 }
2814
2815 Value *Result;
2816
2817 // Must pass the shift amount needed to sign extend the loaded value prior
2818 // to performing a signed comparison for min/max. ShiftAmt is the number of
2819 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
2820 // is the number of bits to left+right shift the value in order to
2821 // sign-extend.
2822 if (AI->getOperation() == AtomicRMWInst::Min ||
2823 AI->getOperation() == AtomicRMWInst::Max) {
2824 const DataLayout &DL = AI->getModule()->getDataLayout();
2825 unsigned ValWidth =
2826 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
2827 Value *SextShamt =
2828 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
2829 Result = Builder.CreateCall(LrwOpScwLoop,
2830 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
2831 } else {
2832 Result =
2833 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
2834 }
2835
2836 if (XLen == 64)
2837 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2838 return Result;
2839}
2840
2841TargetLowering::AtomicExpansionKind
2842RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
2843 AtomicCmpXchgInst *CI) const {
2844 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
2845 if (Size == 8 || Size == 16)
2846 return AtomicExpansionKind::MaskedIntrinsic;
2847 return AtomicExpansionKind::None;
2848}
2849
2850Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2851 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2852 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2853 unsigned XLen = Subtarget.getXLen();
2854 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
2855 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
2856 if (XLen == 64) {
2857 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2858 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2859 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2860 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2861 }
2862 Type *Tys[] = {AlignedAddr->getType()};
2863 Function *MaskedCmpXchg =
2864 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
2865 Value *Result = Builder.CreateCall(
2866 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2867 if (XLen == 64)
2868 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2869 return Result;
2870}
2871
2872unsigned RISCVTargetLowering::getExceptionPointerRegister(
2873 const Constant *PersonalityFn) const {
2874 return RISCV::X10;
2875}
2876
2877unsigned RISCVTargetLowering::getExceptionSelectorRegister(
2878 const Constant *PersonalityFn) const {
2879 return RISCV::X11;
2880}
2881
2882bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
2883 // Return false to suppress the unnecessary extensions if the LibCall
2884 // arguments or return value is f32 type for LP64 ABI.
2885 RISCVABI::ABI ABI = Subtarget.getTargetABI();
2886 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
2887 return false;
2888
2889 return true;
2890}
2891
2892#define GET_REGISTER_MATCHER
2893#include "RISCVGenAsmMatcher.inc"
2894
2895Register
2896RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
2897 const MachineFunction &MF) const {
2898 Register Reg = MatchRegisterAltName(RegName);
2899 if (Reg == RISCV::NoRegister)
2900 Reg = MatchRegisterName(RegName);
2901 if (Reg == RISCV::NoRegister)
2902 report_fatal_error(
2903 Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
2904 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
2905 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
2906 report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
2907 StringRef(RegName) + "\"."));
2908 return Reg;
2909}

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h

1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/Analysis/ProfileSummaryInfo.h"
32#include "llvm/CodeGen/DAGCombine.h"
33#include "llvm/CodeGen/ISDOpcodes.h"
34#include "llvm/CodeGen/RuntimeLibcalls.h"
35#include "llvm/CodeGen/SelectionDAG.h"
36#include "llvm/CodeGen/SelectionDAGNodes.h"
37#include "llvm/CodeGen/TargetCallingConv.h"
38#include "llvm/CodeGen/ValueTypes.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/CallSite.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/InlineAsm.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/Type.h"
50#include "llvm/MC/MCRegisterInfo.h"
51#include "llvm/Support/Alignment.h"
52#include "llvm/Support/AtomicOrdering.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MachineValueType.h"
56#include "llvm/Target/TargetMachine.h"
57#include "llvm/Transforms/Utils/SizeOpts.h"
58#include <algorithm>
59#include <cassert>
60#include <climits>
61#include <cstdint>
62#include <iterator>
63#include <map>
64#include <string>
65#include <utility>
66#include <vector>
67
68namespace llvm {
69
70class BranchProbability;
71class CCState;
72class CCValAssign;
73class Constant;
74class FastISel;
75class FunctionLoweringInfo;
76class GlobalValue;
77class GISelKnownBits;
78class IntrinsicInst;
79struct KnownBits;
80class LegacyDivergenceAnalysis;
81class LLVMContext;
82class MachineBasicBlock;
83class MachineFunction;
84class MachineInstr;
85class MachineJumpTableInfo;
86class MachineLoop;
87class MachineRegisterInfo;
88class MCContext;
89class MCExpr;
90class Module;
91class TargetRegisterClass;
92class TargetLibraryInfo;
93class TargetRegisterInfo;
94class Value;
95
96namespace Sched {
97
98 enum Preference {
99 None, // No preference
100 Source, // Follow source order.
101 RegPressure, // Scheduling for lowest register pressure.
102 Hybrid, // Scheduling for both latency and register pressure.
103 ILP, // Scheduling for ILP in low register pressure mode.
104 VLIW // Scheduling for VLIW targets.
105 };
106
107} // end namespace Sched
108
109/// This base class for TargetLowering contains the SelectionDAG-independent
110/// parts that can be used from the rest of CodeGen.
111class TargetLoweringBase {
112public:
113 /// This enum indicates whether operations are valid for a target, and if not,
114 /// what action should be used to make them valid.
115 enum LegalizeAction : uint8_t {
116 Legal, // The target natively supports this operation.
117 Promote, // This operation should be executed in a larger type.
118 Expand, // Try to expand this to other ops, otherwise use a libcall.
119 LibCall, // Don't try to expand this to other ops, always use a libcall.
120 Custom // Use the LowerOperation hook to implement custom lowering.
121 };
122
123 /// This enum indicates whether a types are legal for a target, and if not,
124 /// what action should be used to make them valid.
125 enum LegalizeTypeAction : uint8_t {
126 TypeLegal, // The target natively supports this type.
127 TypePromoteInteger, // Replace this integer with a larger one.
128 TypeExpandInteger, // Split this integer into two of half the size.
129 TypeSoftenFloat, // Convert this float to a same size integer type.
130 TypeExpandFloat, // Split this float into two of half the size.
131 TypeScalarizeVector, // Replace this one-element vector with its element.
132 TypeSplitVector, // Split this vector into two of half the size.
133 TypeWidenVector, // This vector should be widened into a larger vector.
134 TypePromoteFloat // Replace this float with a larger one.
135 };
136
137 /// LegalizeKind holds the legalization kind that needs to happen to EVT
138 /// in order to type-legalize it.
139 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
140
141 /// Enum that describes how the target represents true/false values.
142 enum BooleanContent {
143 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
144 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
145 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
146 };
147
148 /// Enum that describes what type of support for selects the target has.
149 enum SelectSupportKind {
150 ScalarValSelect, // The target supports scalar selects (ex: cmov).
151 ScalarCondVectorVal, // The target supports selects with a scalar condition
152 // and vector values (ex: cmov).
153 VectorMaskSelect // The target supports vector selects with a vector
154 // mask (ex: x86 blends).
155 };
156
157 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
158 /// to, if at all. Exists because different targets have different levels of
159 /// support for these atomic instructions, and also have different options
160 /// w.r.t. what they should expand to.
161 enum class AtomicExpansionKind {
162 None, // Don't expand the instruction.
163 LLSC, // Expand the instruction into loadlinked/storeconditional; used
164 // by ARM/AArch64.
165 LLOnly, // Expand the (load) instruction into just a load-linked, which has
166 // greater atomic guarantees than a normal load.
167 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
168 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
169 };
170
171 /// Enum that specifies when a multiplication should be expanded.
172 enum class MulExpansionKind {
173 Always, // Always expand the instruction.
174 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
175 // or custom.
176 };
177
178 class ArgListEntry {
179 public:
180 Value *Val = nullptr;
181 SDValue Node = SDValue();
182 Type *Ty = nullptr;
183 bool IsSExt : 1;
184 bool IsZExt : 1;
185 bool IsInReg : 1;
186 bool IsSRet : 1;
187 bool IsNest : 1;
188 bool IsByVal : 1;
189 bool IsInAlloca : 1;
190 bool IsReturned : 1;
191 bool IsSwiftSelf : 1;
192 bool IsSwiftError : 1;
193 bool IsCFGuardTarget : 1;
194 uint16_t Alignment = 0;
195 Type *ByValType = nullptr;
196
197 ArgListEntry()
198 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
199 IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
200 IsSwiftSelf(false), IsSwiftError(false), IsCFGuardTarget(false) {}
201
202 void setAttributes(const CallBase *Call, unsigned ArgIdx);
203
204 void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx) {
205 return setAttributes(cast<CallBase>(CS->getInstruction()), ArgIdx);
206 }
207 };
208 using ArgListTy = std::vector<ArgListEntry>;
209
210 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
211 ArgListTy &Args) const {};
212
213 static ISD::NodeType getExtendForContent(BooleanContent Content) {
214 switch (Content) {
215 case UndefinedBooleanContent:
216 // Extend by adding rubbish bits.
217 return ISD::ANY_EXTEND;
218 case ZeroOrOneBooleanContent:
219 // Extend by adding zero bits.
220 return ISD::ZERO_EXTEND;
221 case ZeroOrNegativeOneBooleanContent:
222 // Extend by copying the sign bit.
223 return ISD::SIGN_EXTEND;
224 }
225 llvm_unreachable("Invalid content kind")::llvm::llvm_unreachable_internal("Invalid content kind", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 225)
;
226 }
227
228 explicit TargetLoweringBase(const TargetMachine &TM);
229 TargetLoweringBase(const TargetLoweringBase &) = delete;
230 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
231 virtual ~TargetLoweringBase() = default;
232
233 /// Return true if the target support strict float operation
234 bool isStrictFPEnabled() const {
235 return IsStrictFPEnabled;
236 }
237
238protected:
239 /// Initialize all of the actions to default values.
240 void initActions();
241
242public:
243 const TargetMachine &getTargetMachine() const { return TM; }
244
245 virtual bool useSoftFloat() const { return false; }
246
247 /// Return the pointer type for the given address space, defaults to
248 /// the pointer type from the data layout.
249 /// FIXME: The default needs to be removed once all the code is updated.
250 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
251 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
252 }
253
254 /// Return the in-memory pointer type for the given address space, defaults to
255 /// the pointer type from the data layout. FIXME: The default needs to be
256 /// removed once all the code is updated.
257 MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
258 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
259 }
260
261 /// Return the type for frame index, which is determined by
262 /// the alloca address space specified through the data layout.
263 MVT getFrameIndexTy(const DataLayout &DL) const {
264 return getPointerTy(DL, DL.getAllocaAddrSpace());
265 }
266
267 /// Return the type for operands of fence.
268 /// TODO: Let fence operands be of i32 type and remove this.
269 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
270 return getPointerTy(DL);
271 }
272
273 /// EVT is not used in-tree, but is used by out-of-tree target.
274 /// A documentation for this function would be nice...
275 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
276
277 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
278 bool LegalTypes = true) const;
279
280 /// Returns the type to be used for the index operand of:
281 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
282 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
283 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
284 return getPointerTy(DL);
285 }
286
287 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
288 return true;
289 }
290
291 /// Return true if it is profitable to convert a select of FP constants into
292 /// a constant pool load whose address depends on the select condition. The
293 /// parameter may be used to differentiate a select with FP compare from
294 /// integer compare.
295 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
296 return true;
297 }
298
299 /// Return true if multiple condition registers are available.
300 bool hasMultipleConditionRegisters() const {
301 return HasMultipleConditionRegisters;
302 }
303
304 /// Return true if the target has BitExtract instructions.
305 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
306
307 /// Return the preferred vector type legalization action.
308 virtual TargetLoweringBase::LegalizeTypeAction
309 getPreferredVectorAction(MVT VT) const {
310 // The default action for one element vectors is to scalarize
311 if (VT.getVectorNumElements() == 1)
312 return TypeScalarizeVector;
313 // The default action for an odd-width vector is to widen.
314 if (!VT.isPow2VectorType())
315 return TypeWidenVector;
316 // The default action for other vectors is to promote
317 return TypePromoteInteger;
318 }
319
320 // There are two general methods for expanding a BUILD_VECTOR node:
321 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
322 // them together.
323 // 2. Build the vector on the stack and then load it.
324 // If this function returns true, then method (1) will be used, subject to
325 // the constraint that all of the necessary shuffles are legal (as determined
326 // by isShuffleMaskLegal). If this function returns false, then method (2) is
327 // always used. The vector type, and the number of defined values, are
328 // provided.
329 virtual bool
330 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
331 unsigned DefinedValues) const {
332 return DefinedValues < 3;
333 }
334
335 /// Return true if integer divide is usually cheaper than a sequence of
336 /// several shifts, adds, and multiplies for this target.
337 /// The definition of "cheaper" may depend on whether we're optimizing
338 /// for speed or for size.
339 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
340
341 /// Return true if the target can handle a standalone remainder operation.
342 virtual bool hasStandaloneRem(EVT VT) const {
343 return true;
344 }
345
346 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
347 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
348 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
349 return false;
350 }
351
352 /// Reciprocal estimate status values used by the functions below.
353 enum ReciprocalEstimate : int {
354 Unspecified = -1,
355 Disabled = 0,
356 Enabled = 1
357 };
358
359 /// Return a ReciprocalEstimate enum value for a square root of the given type
360 /// based on the function's attributes. If the operation is not overridden by
361 /// the function's attributes, "Unspecified" is returned and target defaults
362 /// are expected to be used for instruction selection.
363 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
364
365 /// Return a ReciprocalEstimate enum value for a division of the given type
366 /// based on the function's attributes. If the operation is not overridden by
367 /// the function's attributes, "Unspecified" is returned and target defaults
368 /// are expected to be used for instruction selection.
369 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
370
371 /// Return the refinement step count for a square root of the given type based
372 /// on the function's attributes. If the operation is not overridden by
373 /// the function's attributes, "Unspecified" is returned and target defaults
374 /// are expected to be used for instruction selection.
375 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
376
377 /// Return the refinement step count for a division of the given type based
378 /// on the function's attributes. If the operation is not overridden by
379 /// the function's attributes, "Unspecified" is returned and target defaults
380 /// are expected to be used for instruction selection.
381 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
382
383 /// Returns true if target has indicated at least one type should be bypassed.
384 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
385
386 /// Returns map of slow types for division or remainder with corresponding
387 /// fast types
388 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
389 return BypassSlowDivWidths;
390 }
391
392 /// Return true if Flow Control is an expensive operation that should be
393 /// avoided.
394 bool isJumpExpensive() const { return JumpIsExpensive; }
395
396 /// Return true if selects are only cheaper than branches if the branch is
397 /// unlikely to be predicted right.
398 bool isPredictableSelectExpensive() const {
399 return PredictableSelectIsExpensive;
400 }
401
402 /// If a branch or a select condition is skewed in one direction by more than
403 /// this factor, it is very likely to be predicted correctly.
404 virtual BranchProbability getPredictableBranchThreshold() const;
405
406 /// Return true if the following transform is beneficial:
407 /// fold (conv (load x)) -> (load (conv*)x)
408 /// On architectures that don't natively support some vector loads
409 /// efficiently, casting the load to a smaller vector of larger types and
410 /// loading is more efficient, however, this can be undone by optimizations in
411 /// dag combiner.
412 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
413 const SelectionDAG &DAG,
414 const MachineMemOperand &MMO) const {
415 // Don't do if we could do an indexed load on the original type, but not on
416 // the new one.
417 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
418 return true;
419
420 MVT LoadMVT = LoadVT.getSimpleVT();
421
422 // Don't bother doing this if it's just going to be promoted again later, as
423 // doing so might interfere with other combines.
424 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
425 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
426 return false;
427
428 bool Fast = false;
429 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
430 MMO, &Fast) && Fast;
431 }
432
433 /// Return true if the following transform is beneficial:
434 /// (store (y (conv x)), y*)) -> (store x, (x*))
435 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
436 const SelectionDAG &DAG,
437 const MachineMemOperand &MMO) const {
438 // Default to the same logic as loads.
439 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
440 }
441
442 /// Return true if it is expected to be cheaper to do a store of a non-zero
443 /// vector constant with the given size and type for the address space than to
444 /// store the individual scalar element constants.
445 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
446 unsigned NumElem,
447 unsigned AddrSpace) const {
448 return false;
449 }
450
451 /// Allow store merging for the specified type after legalization in addition
452 /// to before legalization. This may transform stores that do not exist
453 /// earlier (for example, stores created from intrinsics).
454 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
455 return true;
456 }
457
458 /// Returns if it's reasonable to merge stores to MemVT size.
459 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
460 const SelectionDAG &DAG) const {
461 return true;
462 }
463
464 /// Return true if it is cheap to speculate a call to intrinsic cttz.
465 virtual bool isCheapToSpeculateCttz() const {
466 return false;
467 }
468
469 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
470 virtual bool isCheapToSpeculateCtlz() const {
471 return false;
472 }
473
474 /// Return true if ctlz instruction is fast.
475 virtual bool isCtlzFast() const {
476 return false;
477 }
478
479 /// Return true if instruction generated for equality comparison is folded
480 /// with instruction generated for signed comparison.
481 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
482
483 /// Return true if it is safe to transform an integer-domain bitwise operation
484 /// into the equivalent floating-point operation. This should be set to true
485 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
486 /// type.
487 virtual bool hasBitPreservingFPLogic(EVT VT) const {
488 return false;
489 }
490
491 /// Return true if it is cheaper to split the store of a merged int val
492 /// from a pair of smaller values into multiple stores.
493 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
494 return false;
495 }
496
497 /// Return if the target supports combining a
498 /// chain like:
499 /// \code
500 /// %andResult = and %val1, #mask
501 /// %icmpResult = icmp %andResult, 0
502 /// \endcode
503 /// into a single machine instruction of a form like:
504 /// \code
505 /// cc = test %register, #mask
506 /// \endcode
507 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
508 return false;
509 }
510
511 /// Use bitwise logic to make pairs of compares more efficient. For example:
512 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
513 /// This should be true when it takes more than one instruction to lower
514 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
515 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
516 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
517 return false;
518 }
519
520 /// Return the preferred operand type if the target has a quick way to compare
521 /// integer values of the given size. Assume that any legal integer type can
522 /// be compared efficiently. Targets may override this to allow illegal wide
523 /// types to return a vector type if there is support to compare that type.
524 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
525 MVT VT = MVT::getIntegerVT(NumBits);
526 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
527 }
528
529 /// Return true if the target should transform:
530 /// (X & Y) == Y ---> (~X & Y) == 0
531 /// (X & Y) != Y ---> (~X & Y) != 0
532 ///
533 /// This may be profitable if the target has a bitwise and-not operation that
534 /// sets comparison flags. A target may want to limit the transformation based
535 /// on the type of Y or if Y is a constant.
536 ///
537 /// Note that the transform will not occur if Y is known to be a power-of-2
538 /// because a mask and compare of a single bit can be handled by inverting the
539 /// predicate, for example:
540 /// (X & 8) == 8 ---> (X & 8) != 0
541 virtual bool hasAndNotCompare(SDValue Y) const {
542 return false;
543 }
544
545 /// Return true if the target has a bitwise and-not operation:
546 /// X = ~A & B
547 /// This can be used to simplify select or other instructions.
548 virtual bool hasAndNot(SDValue X) const {
549 // If the target has the more complex version of this operation, assume that
550 // it has this operation too.
551 return hasAndNotCompare(X);
552 }
553
554 /// Return true if the target has a bit-test instruction:
555 /// (X & (1 << Y)) ==/!= 0
556 /// This knowledge can be used to prevent breaking the pattern,
557 /// or creating it if it could be recognized.
558 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
559
560 /// There are two ways to clear extreme bits (either low or high):
561 /// Mask: x & (-1 << y) (the instcombine canonical form)
562 /// Shifts: x >> y << y
563 /// Return true if the variant with 2 variable shifts is preferred.
564 /// Return false if there is no preference.
565 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
566 // By default, let's assume that no one prefers shifts.
567 return false;
568 }
569
570 /// Return true if it is profitable to fold a pair of shifts into a mask.
571 /// This is usually true on most targets. But some targets, like Thumb1,
572 /// have immediate shift instructions, but no immediate "and" instruction;
573 /// this makes the fold unprofitable.
574 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
575 CombineLevel Level) const {
576 return true;
577 }
578
579 /// Should we tranform the IR-optimal check for whether given truncation
580 /// down into KeptBits would be truncating or not:
581 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
582 /// Into it's more traditional form:
583 /// ((%x << C) a>> C) dstcond %x
584 /// Return true if we should transform.
585 /// Return false if there is no preference.
586 virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
587 unsigned KeptBits) const {
588 // By default, let's assume that no one prefers shifts.
589 return false;
590 }
591
592 /// Given the pattern
593 /// (X & (C l>>/<< Y)) ==/!= 0
594 /// return true if it should be transformed into:
595 /// ((X <</l>> Y) & C) ==/!= 0
596 /// WARNING: if 'X' is a constant, the fold may deadlock!
597 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
598 /// here because it can end up being not linked in.
599 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
600 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
601 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
602 SelectionDAG &DAG) const {
603 if (hasBitTest(X, Y)) {
604 // One interesting pattern that we'd want to form is 'bit test':
605 // ((1 << Y) & C) ==/!= 0
606 // But we also need to be careful not to try to reverse that fold.
607
608 // Is this '1 << Y' ?
609 if (OldShiftOpcode == ISD::SHL && CC->isOne())
610 return false; // Keep the 'bit test' pattern.
611
612 // Will it be '1 << Y' after the transform ?
613 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
614 return true; // Do form the 'bit test' pattern.
615 }
616
617 // If 'X' is a constant, and we transform, then we will immediately
618 // try to undo the fold, thus causing endless combine loop.
619 // So by default, let's assume everyone prefers the fold
620 // iff 'X' is not a constant.
621 return !XC;
622 }
623
624 /// These two forms are equivalent:
625 /// sub %y, (xor %x, -1)
626 /// add (add %x, 1), %y
627 /// The variant with two add's is IR-canonical.
628 /// Some targets may prefer one to the other.
629 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
630 // By default, let's assume that everyone prefers the form with two add's.
631 return true;
632 }
633
634 /// Return true if the target wants to use the optimization that
635 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
636 /// promotedInst1(...(promotedInstN(ext(load)))).
637 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
638
639 /// Return true if the target can combine store(extractelement VectorTy,
640 /// Idx).
641 /// \p Cost[out] gives the cost of that transformation when this is true.
642 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
643 unsigned &Cost) const {
644 return false;
645 }
646
647 /// Return true if inserting a scalar into a variable element of an undef
648 /// vector is more efficiently handled by splatting the scalar instead.
649 virtual bool shouldSplatInsEltVarIndex(EVT) const {
650 return false;
651 }
652
653 /// Return true if target always beneficiates from combining into FMA for a
654 /// given value type. This must typically return false on targets where FMA
655 /// takes more cycles to execute than FADD.
656 virtual bool enableAggressiveFMAFusion(EVT VT) const {
657 return false;
658 }
659
660 /// Return the ValueType of the result of SETCC operations.
661 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
662 EVT VT) const;
663
664 /// Return the ValueType for comparison libcalls. Comparions libcalls include
665 /// floating point comparion calls, and Ordered/Unordered check calls on
666 /// floating point numbers.
667 virtual
668 MVT::SimpleValueType getCmpLibcallReturnType() const;
669
670 /// For targets without i1 registers, this gives the nature of the high-bits
671 /// of boolean values held in types wider than i1.
672 ///
673 /// "Boolean values" are special true/false values produced by nodes like
674 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
675 /// Not to be confused with general values promoted from i1. Some cpus
676 /// distinguish between vectors of boolean and scalars; the isVec parameter
677 /// selects between the two kinds. For example on X86 a scalar boolean should
678 /// be zero extended from i1, while the elements of a vector of booleans
679 /// should be sign extended from i1.
680 ///
681 /// Some cpus also treat floating point types the same way as they treat
682 /// vectors instead of the way they treat scalars.
683 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
684 if (isVec)
685 return BooleanVectorContents;
686 return isFloat ? BooleanFloatContents : BooleanContents;
687 }
688
689 BooleanContent getBooleanContents(EVT Type) const {
690 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
691 }
692
693 /// Return target scheduling preference.
694 Sched::Preference getSchedulingPreference() const {
695 return SchedPreferenceInfo;
696 }
697
698 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
699 /// for different nodes. This function returns the preference (or none) for
700 /// the given node.
701 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
702 return Sched::None;
703 }
704
705 /// Return the register class that should be used for the specified value
706 /// type.
707 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
708 (void)isDivergent;
709 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
710 assert(RC && "This value type is not natively supported!")((RC && "This value type is not natively supported!")
? static_cast<void> (0) : __assert_fail ("RC && \"This value type is not natively supported!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 710, __PRETTY_FUNCTION__))
;
711 return RC;
712 }
713
714 /// Allows target to decide about the register class of the
715 /// specific value that is live outside the defining block.
716 /// Returns true if the value needs uniform register class.
717 virtual bool requiresUniformRegister(MachineFunction &MF,
718 const Value *) const {
719 return false;
720 }
721
722 /// Return the 'representative' register class for the specified value
723 /// type.
724 ///
725 /// The 'representative' register class is the largest legal super-reg
726 /// register class for the register class of the value type. For example, on
727 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
728 /// register class is GR64 on x86_64.
729 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
730 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
731 return RC;
732 }
733
734 /// Return the cost of the 'representative' register class for the specified
735 /// value type.
736 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
737 return RepRegClassCostForVT[VT.SimpleTy];
738 }
739
740 /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
741 /// instructions, and false if a library call is preferred (e.g for code-size
742 /// reasons).
743 virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
744 return true;
745 }
746
747 /// Return true if the target has native support for the specified value type.
748 /// This means that it has a register that directly holds it without
749 /// promotions or expansions.
750 bool isTypeLegal(EVT VT) const {
751 assert(!VT.isSimple() ||((!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof
(RegClassForVT)) ? static_cast<void> (0) : __assert_fail
("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 752, __PRETTY_FUNCTION__))
752 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT))((!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof
(RegClassForVT)) ? static_cast<void> (0) : __assert_fail
("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 752, __PRETTY_FUNCTION__))
;
753 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
754 }
755
756 class ValueTypeActionImpl {
757 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
758 /// that indicates how instruction selection should deal with the type.
759 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
760
761 public:
762 ValueTypeActionImpl() {
763 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
764 TypeLegal);
765 }
766
767 LegalizeTypeAction getTypeAction(MVT VT) const {
768 return ValueTypeActions[VT.SimpleTy];
769 }
770
771 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
772 ValueTypeActions[VT.SimpleTy] = Action;
773 }
774 };
775
776 const ValueTypeActionImpl &getValueTypeActions() const {
777 return ValueTypeActions;
778 }
779
780 /// Return how we should legalize values of this type, either it is already
781 /// legal (return 'Legal') or we need to promote it to a larger type (return
782 /// 'Promote'), or we need to expand it into multiple registers of smaller
783 /// integer type (return 'Expand'). 'Custom' is not an option.
784 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
785 return getTypeConversion(Context, VT).first;
786 }
787 LegalizeTypeAction getTypeAction(MVT VT) const {
788 return ValueTypeActions.getTypeAction(VT);
789 }
790
791 /// For types supported by the target, this is an identity function. For
792 /// types that must be promoted to larger types, this returns the larger type
793 /// to promote to. For integer types that are larger than the largest integer
794 /// register, this contains one step in the expansion to get to the smaller
795 /// register. For illegal floating point types, this returns the integer type
796 /// to transform to.
797 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
798 return getTypeConversion(Context, VT).second;
799 }
800
801 /// For types supported by the target, this is an identity function. For
802 /// types that must be expanded (i.e. integer types that are larger than the
803 /// largest integer register or illegal floating point types), this returns
804 /// the largest legal type it will be expanded to.
805 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
806 assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail
("!VT.isVector()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 806, __PRETTY_FUNCTION__))
;
807 while (true) {
808 switch (getTypeAction(Context, VT)) {
809 case TypeLegal:
810 return VT;
811 case TypeExpandInteger:
812 VT = getTypeToTransformTo(Context, VT);
813 break;
814 default:
815 llvm_unreachable("Type is not legal nor is it to be expanded!")::llvm::llvm_unreachable_internal("Type is not legal nor is it to be expanded!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 815)
;
816 }
817 }
818 }
819
820 /// Vector types are broken down into some number of legal first class types.
821 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
822 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
823 /// turns into 4 EVT::i32 values with both PPC and X86.
824 ///
825 /// This method returns the number of registers needed, and the VT for each
826 /// register. It also returns the VT and quantity of the intermediate values
827 /// before they are promoted/expanded.
828 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
829 EVT &IntermediateVT,
830 unsigned &NumIntermediates,
831 MVT &RegisterVT) const;
832
833 /// Certain targets such as MIPS require that some types such as vectors are
834 /// always broken down into scalars in some contexts. This occurs even if the
835 /// vector type is legal.
836 virtual unsigned getVectorTypeBreakdownForCallingConv(
837 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
838 unsigned &NumIntermediates, MVT &RegisterVT) const {
839 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
840 RegisterVT);
841 }
842
843 struct IntrinsicInfo {
844 unsigned opc = 0; // target opcode
845 EVT memVT; // memory VT
846
847 // value representing memory location
848 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
849
850 int offset = 0; // offset off of ptrVal
851 uint64_t size = 0; // the size of the memory location
852 // (taken from memVT if zero)
853 MaybeAlign align = Align::None(); // alignment
854
855 MachineMemOperand::Flags flags = MachineMemOperand::MONone;
856 IntrinsicInfo() = default;
857 };
858
859 /// Given an intrinsic, checks if on the target the intrinsic will need to map
860 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
861 /// true and store the intrinsic information into the IntrinsicInfo that was
862 /// passed to the function.
863 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
864 MachineFunction &,
865 unsigned /*Intrinsic*/) const {
866 return false;
867 }
868
869 /// Returns true if the target can instruction select the specified FP
870 /// immediate natively. If false, the legalizer will materialize the FP
871 /// immediate as a load from a constant pool.
872 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
873 bool ForCodeSize = false) const {
874 return false;
875 }
876
877 /// Targets can use this to indicate that they only support *some*
878 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
879 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
880 /// legal.
881 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
882 return true;
883 }
884
885 /// Returns true if the operation can trap for the value type.
886 ///
887 /// VT must be a legal type. By default, we optimistically assume most
888 /// operations don't trap except for integer divide and remainder.
889 virtual bool canOpTrap(unsigned Op, EVT VT) const;
890
891 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
892 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
893 /// constant pool entry.
894 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
895 EVT /*VT*/) const {
896 return false;
897 }
898
899 /// Return how this operation should be treated: either it is legal, needs to
900 /// be promoted to a larger size, needs to be expanded to some other code
901 /// sequence, or the target has a custom expander for it.
902 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
903 if (VT.isExtended()) return Expand;
904 // If a target-specific SDNode requires legalization, require the target
905 // to provide custom legalization for it.
906 if (Op >= array_lengthof(OpActions[0])) return Custom;
907 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
908 }
909
910 /// Custom method defined by each target to indicate if an operation which
911 /// may require a scale is supported natively by the target.
912 /// If not, the operation is illegal.
913 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
914 unsigned Scale) const {
915 return false;
916 }
917
918 /// Some fixed point operations may be natively supported by the target but
919 /// only for specific scales. This method allows for checking
920 /// if the width is supported by the target for a given operation that may
921 /// depend on scale.
922 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
923 unsigned Scale) const {
924 auto Action = getOperationAction(Op, VT);
925 if (Action != Legal)
926 return Action;
927
928 // This operation is supported in this type but may only work on specific
929 // scales.
930 bool Supported;
931 switch (Op) {
932 default:
933 llvm_unreachable("Unexpected fixed point operation.")::llvm::llvm_unreachable_internal("Unexpected fixed point operation."
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 933)
;
934 case ISD::SMULFIX:
935 case ISD::SMULFIXSAT:
936 case ISD::UMULFIX:
937 case ISD::UMULFIXSAT:
938 case ISD::SDIVFIX:
939 case ISD::UDIVFIX:
940 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
941 break;
942 }
943
944 return Supported ? Action : Expand;
945 }
946
947 // If Op is a strict floating-point operation, return the result
948 // of getOperationAction for the equivalent non-strict operation.
949 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
950 unsigned EqOpc;
951 switch (Op) {
952 default: llvm_unreachable("Unexpected FP pseudo-opcode")::llvm::llvm_unreachable_internal("Unexpected FP pseudo-opcode"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 952)
;
953#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
954 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
955#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
956 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
957#include "llvm/IR/ConstrainedOps.def"
958 }
959
960 return getOperationAction(EqOpc, VT);
961 }
962
963 /// Return true if the specified operation is legal on this target or can be
964 /// made legal with custom lowering. This is used to help guide high-level
965 /// lowering decisions.
966 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
967 return (VT == MVT::Other || isTypeLegal(VT)) &&
968 (getOperationAction(Op, VT) == Legal ||
969 getOperationAction(Op, VT) == Custom);
970 }
971
972 /// Return true if the specified operation is legal on this target or can be
973 /// made legal using promotion. This is used to help guide high-level lowering
974 /// decisions.
975 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
976 return (VT == MVT::Other || isTypeLegal(VT)) &&
977 (getOperationAction(Op, VT) == Legal ||
978 getOperationAction(Op, VT) == Promote);
979 }
980
981 /// Return true if the specified operation is legal on this target or can be
982 /// made legal with custom lowering or using promotion. This is used to help
983 /// guide high-level lowering decisions.
984 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
985 return (VT == MVT::Other || isTypeLegal(VT)) &&
986 (getOperationAction(Op, VT) == Legal ||
987 getOperationAction(Op, VT) == Custom ||
988 getOperationAction(Op, VT) == Promote);
989 }
990
991 /// Return true if the operation uses custom lowering, regardless of whether
992 /// the type is legal or not.
993 bool isOperationCustom(unsigned Op, EVT VT) const {
994 return getOperationAction(Op, VT) == Custom;
995 }
996
997 /// Return true if lowering to a jump table is allowed.
998 virtual bool areJTsAllowed(const Function *Fn) const {
999 if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
1000 return false;
1001
1002 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1003 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1004 }
1005
1006 /// Check whether the range [Low,High] fits in a machine word.
1007 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1008 const DataLayout &DL) const {
1009 // FIXME: Using the pointer type doesn't seem ideal.
1010 uint64_t BW = DL.getIndexSizeInBits(0u);
1011 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX(18446744073709551615UL) - 1) + 1;
1012 return Range <= BW;
1013 }
1014
1015 /// Return true if lowering to a jump table is suitable for a set of case
1016 /// clusters which may contain \p NumCases cases, \p Range range of values.
1017 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1018 uint64_t Range, ProfileSummaryInfo *PSI,
1019 BlockFrequencyInfo *BFI) const;
1020
1021 /// Return true if lowering to a bit test is suitable for a set of case
1022 /// clusters which contains \p NumDests unique destinations, \p Low and
1023 /// \p High as its lowest and highest case values, and expects \p NumCmps
1024 /// case value comparisons. Check if the number of destinations, comparison
1025 /// metric, and range are all suitable.
1026 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1027 const APInt &Low, const APInt &High,
1028 const DataLayout &DL) const {
1029 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1030 // range of cases both require only one branch to lower. Just looking at the
1031 // number of clusters and destinations should be enough to decide whether to
1032 // build bit tests.
1033
1034 // To lower a range with bit tests, the range must fit the bitwidth of a
1035 // machine word.
1036 if (!rangeFitsInWord(Low, High, DL))
1037 return false;
1038
1039 // Decide whether it's profitable to lower this range with bit tests. Each
1040 // destination requires a bit test and branch, and there is an overall range
1041 // check branch. For a small number of clusters, separate comparisons might
1042 // be cheaper, and for many destinations, splitting the range might be
1043 // better.
1044 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1045 (NumDests == 3 && NumCmps >= 6);
1046 }
1047
1048 /// Return true if the specified operation is illegal on this target or
1049 /// unlikely to be made legal with custom lowering. This is used to help guide
1050 /// high-level lowering decisions.
1051 bool isOperationExpand(unsigned Op, EVT VT) const {
1052 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1053 }
1054
1055 /// Return true if the specified operation is legal on this target.
1056 bool isOperationLegal(unsigned Op, EVT VT) const {
1057 return (VT == MVT::Other || isTypeLegal(VT)) &&
1058 getOperationAction(Op, VT) == Legal;
1059 }
1060
1061 /// Return how this load with extension should be treated: either it is legal,
1062 /// needs to be promoted to a larger size, needs to be expanded to some other
1063 /// code sequence, or the target has a custom expander for it.
1064 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1065 EVT MemVT) const {
1066 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1067 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1068 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1069 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&((ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT
::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!") ? static_cast<void> (0) : __assert_fail
("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1070, __PRETTY_FUNCTION__))
1070 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!")((ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT
::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!") ? static_cast<void> (0) : __assert_fail
("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1070, __PRETTY_FUNCTION__))
;
1071 unsigned Shift = 4 * ExtType;
1072 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1073 }
1074
1075 /// Return true if the specified load with extension is legal on this target.
1076 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1077 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1078 }
1079
1080 /// Return true if the specified load with extension is legal or custom
1081 /// on this target.
1082 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1083 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1084 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1085 }
1086
1087 /// Return how this store with truncation should be treated: either it is
1088 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1089 /// other code sequence, or the target has a custom expander for it.
1090 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1091 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1092 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1093 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1094 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&((ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1095, __PRETTY_FUNCTION__))
1095 "Table isn't big enough!")((ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1095, __PRETTY_FUNCTION__))
;
1096 return TruncStoreActions[ValI][MemI];
1097 }
1098
1099 /// Return true if the specified store with truncation is legal on this
1100 /// target.
1101 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1102 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1103 }
1104
1105 /// Return true if the specified store with truncation has solution on this
1106 /// target.
1107 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1108 return isTypeLegal(ValVT) &&
1109 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1110 getTruncStoreAction(ValVT, MemVT) == Custom);
1111 }
1112
1113 /// Return how the indexed load should be treated: either it is legal, needs
1114 /// to be promoted to a larger size, needs to be expanded to some other code
1115 /// sequence, or the target has a custom expander for it.
1116 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1117 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1118 }
1119
1120 /// Return true if the specified indexed load is legal on this target.
1121 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1122 return VT.isSimple() &&
1123 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1124 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1125 }
1126
1127 /// Return how the indexed store should be treated: either it is legal, needs
1128 /// to be promoted to a larger size, needs to be expanded to some other code
1129 /// sequence, or the target has a custom expander for it.
1130 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1131 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1132 }
1133
1134 /// Return true if the specified indexed load is legal on this target.
1135 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1136 return VT.isSimple() &&
1137 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1138 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1139 }
1140
1141 /// Return how the indexed load should be treated: either it is legal, needs
1142 /// to be promoted to a larger size, needs to be expanded to some other code
1143 /// sequence, or the target has a custom expander for it.
1144 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1145 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1146 }
1147
1148 /// Return true if the specified indexed load is legal on this target.
1149 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1150 return VT.isSimple() &&
1151 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1152 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1153 }
1154
1155 /// Return how the indexed store should be treated: either it is legal, needs
1156 /// to be promoted to a larger size, needs to be expanded to some other code
1157 /// sequence, or the target has a custom expander for it.
1158 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1159 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1160 }
1161
1162 /// Return true if the specified indexed load is legal on this target.
1163 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1164 return VT.isSimple() &&
1165 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1166 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1167 }
1168
1169 /// Return how the condition code should be treated: either it is legal, needs
1170 /// to be expanded to some other code sequence, or the target has a custom
1171 /// expander for it.
1172 LegalizeAction
1173 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1174 assert((unsigned)CC < array_lengthof(CondCodeActions) &&(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1176, __PRETTY_FUNCTION__))
1175 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1176, __PRETTY_FUNCTION__))
1176 "Table isn't big enough!")(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1176, __PRETTY_FUNCTION__))
;
1177 // See setCondCodeAction for how this is encoded.
1178 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1179 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1180 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1181 assert(Action != Promote && "Can't promote condition code!")((Action != Promote && "Can't promote condition code!"
) ? static_cast<void> (0) : __assert_fail ("Action != Promote && \"Can't promote condition code!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1181, __PRETTY_FUNCTION__))
;
1182 return Action;
1183 }
1184
1185 /// Return true if the specified condition code is legal on this target.
1186 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1187 return getCondCodeAction(CC, VT) == Legal;
1188 }
1189
1190 /// Return true if the specified condition code is legal or custom on this
1191 /// target.
1192 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1193 return getCondCodeAction(CC, VT) == Legal ||
1194 getCondCodeAction(CC, VT) == Custom;
1195 }
1196
1197 /// If the action for this operation is to promote, this method returns the
1198 /// ValueType to promote to.
1199 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1200 assert(getOperationAction(Op, VT) == Promote &&((getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"
) ? static_cast<void> (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1201, __PRETTY_FUNCTION__))
1201 "This operation isn't promoted!")((getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"
) ? static_cast<void> (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1201, __PRETTY_FUNCTION__))
;
1202
1203 // See if this has an explicit type specified.
1204 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1205 MVT::SimpleValueType>::const_iterator PTTI =
1206 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1207 if (PTTI != PromoteToType.end()) return PTTI->second;
1208
1209 assert((VT.isInteger() || VT.isFloatingPoint()) &&(((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."
) ? static_cast<void> (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1210, __PRETTY_FUNCTION__))
1210 "Cannot autopromote this type, add it with AddPromotedToType.")(((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."
) ? static_cast<void> (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1210, __PRETTY_FUNCTION__))
;
1211
1212 MVT NVT = VT;
1213 do {
1214 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1215 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&((NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid
&& "Didn't find type to promote to!") ? static_cast<
void> (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1216, __PRETTY_FUNCTION__))
1216 "Didn't find type to promote to!")((NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid
&& "Didn't find type to promote to!") ? static_cast<
void> (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1216, __PRETTY_FUNCTION__))
;
1217 } while (!isTypeLegal(NVT) ||
1218 getOperationAction(Op, NVT) == Promote);
1219 return NVT;
1220 }
1221
1222 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1223 /// operations except for the pointer size. If AllowUnknown is true, this
1224 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1225 /// otherwise it will assert.
1226 EVT getValueType(const DataLayout &DL, Type *Ty,
1227 bool AllowUnknown = false) const {
1228 // Lower scalar pointers to native pointer types.
1229 if (auto *PTy = dyn_cast<PointerType>(Ty))
1230 return getPointerTy(DL, PTy->getAddressSpace());
1231
1232 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1233 Type *EltTy = VTy->getElementType();
1234 // Lower vectors of pointers to native pointer types.
1235 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1236 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1237 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1238 }
1239 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1240 VTy->getElementCount());
1241 }
1242
1243 return EVT::getEVT(Ty, AllowUnknown);
1244 }
1245
1246 EVT getMemValueType(const DataLayout &DL, Type *Ty,
1247 bool AllowUnknown = false) const {
1248 // Lower scalar pointers to native pointer types.
1249 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1250 return getPointerMemTy(DL, PTy->getAddressSpace());
1251 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1252 Type *Elm = VTy->getElementType();
1253 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1254 EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1255 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1256 }
1257 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1258 VTy->getElementCount());
1259 }
1260
1261 return getValueType(DL, Ty, AllowUnknown);
1262 }
1263
1264
1265 /// Return the MVT corresponding to this LLVM type. See getValueType.
1266 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1267 bool AllowUnknown = false) const {
1268 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1269 }
1270
1271 /// Return the desired alignment for ByVal or InAlloca aggregate function
1272 /// arguments in the caller parameter area. This is the actual alignment, not
1273 /// its logarithm.
1274 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1275
1276 /// Return the type of registers that this ValueType will eventually require.
1277 MVT getRegisterType(MVT VT) const {
1278 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT))(((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1278, __PRETTY_FUNCTION__))
;
1279 return RegisterTypeForVT[VT.SimpleTy];
1280 }
1281
1282 /// Return the type of registers that this ValueType will eventually require.
1283 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1284 if (VT.isSimple()) {
1285 assert((unsigned)VT.getSimpleVT().SimpleTy <(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1286, __PRETTY_FUNCTION__))
1286 array_lengthof(RegisterTypeForVT))(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1286, __PRETTY_FUNCTION__))
;
1287 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1288 }
1289 if (VT.isVector()) {
1290 EVT VT1;
1291 MVT RegisterVT;
1292 unsigned NumIntermediates;
1293 (void)getVectorTypeBreakdown(Context, VT, VT1,
1294 NumIntermediates, RegisterVT);
1295 return RegisterVT;
1296 }
1297 if (VT.isInteger()) {
1298 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1299 }
1300 llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1300)
;
1301 }
1302
1303 /// Return the number of registers that this ValueType will eventually
1304 /// require.
1305 ///
1306 /// This is one for any types promoted to live in larger registers, but may be
1307 /// more than one for types (like i64) that are split into pieces. For types
1308 /// like i140, which are first promoted then expanded, it is the number of
1309 /// registers needed to hold all the bits of the original type. For an i140
1310 /// on a 32 bit machine this means 5 registers.
1311 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1312 if (VT.isSimple()) {
1313 assert((unsigned)VT.getSimpleVT().SimpleTy <(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1314, __PRETTY_FUNCTION__))
1314 array_lengthof(NumRegistersForVT))(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1314, __PRETTY_FUNCTION__))
;
1315 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1316 }
1317 if (VT.isVector()) {
1318 EVT VT1;
1319 MVT VT2;
1320 unsigned NumIntermediates;
1321 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1322 }
1323 if (VT.isInteger()) {
1324 unsigned BitWidth = VT.getSizeInBits();
1325 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1326 return (BitWidth + RegWidth - 1) / RegWidth;
1327 }
1328 llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1328)
;
1329 }
1330
1331 /// Certain combinations of ABIs, Targets and features require that types
1332 /// are legal for some operations and not for other operations.
1333 /// For MIPS all vector types must be passed through the integer register set.
1334 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1335 CallingConv::ID CC, EVT VT) const {
1336 return getRegisterType(Context, VT);
1337 }
1338
1339 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1340 /// this occurs when a vector type is used, as vector are passed through the
1341 /// integer register set.
1342 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1343 CallingConv::ID CC,
1344 EVT VT) const {
1345 return getNumRegisters(Context, VT);
1346 }
1347
1348 /// Certain targets have context senstive alignment requirements, where one
1349 /// type has the alignment requirement of another type.
1350 virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1351 DataLayout DL) const {
1352 return Align(DL.getABITypeAlignment(ArgTy));
1353 }
1354
1355 /// If true, then instruction selection should seek to shrink the FP constant
1356 /// of the specified type to a smaller type in order to save space and / or
1357 /// reduce runtime.
1358 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1359
1360 /// Return true if it is profitable to reduce a load to a smaller type.
1361 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1362 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1363 EVT NewVT) const {
1364 // By default, assume that it is cheaper to extract a subvector from a wide
1365 // vector load rather than creating multiple narrow vector loads.
1366 if (NewVT.isVector() && !Load->hasOneUse())
1367 return false;
1368
1369 return true;
1370 }
1371
1372 /// When splitting a value of the specified type into parts, does the Lo
1373 /// or Hi part come first? This usually follows the endianness, except
1374 /// for ppcf128, where the Hi part always comes first.
1375 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1376 return DL.isBigEndian() || VT == MVT::ppcf128;
1377 }
1378
1379 /// If true, the target has custom DAG combine transformations that it can
1380 /// perform for the specified node.
1381 bool hasTargetDAGCombine(ISD::NodeType NT) const {
1382 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))((unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray
)) ? static_cast<void> (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1382, __PRETTY_FUNCTION__))
;
1383 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1384 }
1385
1386 unsigned getGatherAllAliasesMaxDepth() const {
1387 return GatherAllAliasesMaxDepth;
1388 }
1389
1390 /// Returns the size of the platform's va_list object.
1391 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1392 return getPointerTy(DL).getSizeInBits();
1393 }
1394
1395 /// Get maximum # of store operations permitted for llvm.memset
1396 ///
1397 /// This function returns the maximum number of store operations permitted
1398 /// to replace a call to llvm.memset. The value is set by the target at the
1399 /// performance threshold for such a replacement. If OptSize is true,
1400 /// return the limit for functions that have OptSize attribute.
1401 unsigned getMaxStoresPerMemset(bool OptSize) const {
1402 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1403 }
1404
1405 /// Get maximum # of store operations permitted for llvm.memcpy
1406 ///
1407 /// This function returns the maximum number of store operations permitted
1408 /// to replace a call to llvm.memcpy. The value is set by the target at the
1409 /// performance threshold for such a replacement. If OptSize is true,
1410 /// return the limit for functions that have OptSize attribute.
1411 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1412 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1413 }
1414
1415 /// \brief Get maximum # of store operations to be glued together
1416 ///
1417 /// This function returns the maximum number of store operations permitted
1418 /// to glue together during lowering of llvm.memcpy. The value is set by
1419 // the target at the performance threshold for such a replacement.
1420 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1421 return MaxGluedStoresPerMemcpy;
1422 }
1423
1424 /// Get maximum # of load operations permitted for memcmp
1425 ///
1426 /// This function returns the maximum number of load operations permitted
1427 /// to replace a call to memcmp. The value is set by the target at the
1428 /// performance threshold for such a replacement. If OptSize is true,
1429 /// return the limit for functions that have OptSize attribute.
1430 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1431 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1432 }
1433
1434 /// Get maximum # of store operations permitted for llvm.memmove
1435 ///
1436 /// This function returns the maximum number of store operations permitted
1437 /// to replace a call to llvm.memmove. The value is set by the target at the
1438 /// performance threshold for such a replacement. If OptSize is true,
1439 /// return the limit for functions that have OptSize attribute.
1440 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1441 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1442 }
1443
1444 /// Determine if the target supports unaligned memory accesses.
1445 ///
1446 /// This function returns true if the target allows unaligned memory accesses
1447 /// of the specified type in the given address space. If true, it also returns
1448 /// whether the unaligned memory access is "fast" in the last argument by
1449 /// reference. This is used, for example, in situations where an array
1450 /// copy/move/set is converted to a sequence of store operations. Its use
1451 /// helps to ensure that such replacements don't generate code that causes an
1452 /// alignment error (trap) on the target machine.
1453 virtual bool allowsMisalignedMemoryAccesses(
1454 EVT, unsigned AddrSpace = 0, unsigned Align = 1,
1455 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1456 bool * /*Fast*/ = nullptr) const {
1457 return false;
1458 }
1459
1460 /// LLT handling variant.
1461 virtual bool allowsMisalignedMemoryAccesses(
1462 LLT, unsigned AddrSpace = 0, unsigned Align = 1,
1463 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1464 bool * /*Fast*/ = nullptr) const {
1465 return false;
1466 }
1467
1468 /// This function returns true if the memory access is aligned or if the
1469 /// target allows this specific unaligned memory access. If the access is
1470 /// allowed, the optional final parameter returns if the access is also fast
1471 /// (as defined by the target).
1472 bool allowsMemoryAccessForAlignment(
1473 LLVMContext &Context, const DataLayout &DL, EVT VT,
1474 unsigned AddrSpace = 0, unsigned Alignment = 1,
1475 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1476 bool *Fast = nullptr) const;
1477
1478 /// Return true if the memory access of this type is aligned or if the target
1479 /// allows this specific unaligned access for the given MachineMemOperand.
1480 /// If the access is allowed, the optional final parameter returns if the
1481 /// access is also fast (as defined by the target).
1482 bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1483 const DataLayout &DL, EVT VT,
1484 const MachineMemOperand &MMO,
1485 bool *Fast = nullptr) const;
1486
1487 /// Return true if the target supports a memory access of this type for the
1488 /// given address space and alignment. If the access is allowed, the optional
1489 /// final parameter returns if the access is also fast (as defined by the
1490 /// target).
1491 virtual bool
1492 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1493 unsigned AddrSpace = 0, unsigned Alignment = 1,
1494 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1495 bool *Fast = nullptr) const;
1496
1497 /// Return true if the target supports a memory access of this type for the
1498 /// given MachineMemOperand. If the access is allowed, the optional
1499 /// final parameter returns if the access is also fast (as defined by the
1500 /// target).
1501 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1502 const MachineMemOperand &MMO,
1503 bool *Fast = nullptr) const;
1504
1505 /// Returns the target specific optimal type for load and store operations as
1506 /// a result of memset, memcpy, and memmove lowering.
1507 ///
1508 /// If DstAlign is zero that means it's safe to destination alignment can
1509 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1510 /// a need to check it against alignment requirement, probably because the
1511 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1512 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1513 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1514 /// does not need to be loaded. It returns EVT::Other if the type should be
1515 /// determined using generic target-independent logic.
1516 virtual EVT
1517 getOptimalMemOpType(uint64_t /*Size*/, unsigned /*DstAlign*/,
1518 unsigned /*SrcAlign*/, bool /*IsMemset*/,
1519 bool /*ZeroMemset*/, bool /*MemcpyStrSrc*/,
1520 const AttributeList & /*FuncAttributes*/) const {
1521 return MVT::Other;
1522 }
1523
1524
1525 /// LLT returning variant.
1526 virtual LLT
1527 getOptimalMemOpLLT(uint64_t /*Size*/, unsigned /*DstAlign*/,
1528 unsigned /*SrcAlign*/, bool /*IsMemset*/,
1529 bool /*ZeroMemset*/, bool /*MemcpyStrSrc*/,
1530 const AttributeList & /*FuncAttributes*/) const {
1531 return LLT();
1532 }
1533
1534 /// Returns true if it's safe to use load / store of the specified type to
1535 /// expand memcpy / memset inline.
1536 ///
1537 /// This is mostly true for all types except for some special cases. For
1538 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1539 /// fstpl which also does type conversion. Note the specified type doesn't
1540 /// have to be legal as the hook is used before type legalization.
1541 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1542
1543 /// Return lower limit for number of blocks in a jump table.
1544 virtual unsigned getMinimumJumpTableEntries() const;
1545
1546 /// Return lower limit of the density in a jump table.
1547 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1548
1549 /// Return upper limit for number of entries in a jump table.
1550 /// Zero if no limit.
1551 unsigned getMaximumJumpTableSize() const;
1552
1553 virtual bool isJumpTableRelative() const {
1554 return TM.isPositionIndependent();
1555 }
1556
1557 /// If a physical register, this specifies the register that
1558 /// llvm.savestack/llvm.restorestack should save and restore.
1559 unsigned getStackPointerRegisterToSaveRestore() const {
1560 return StackPointerRegisterToSaveRestore;
1561 }
1562
1563 /// If a physical register, this returns the register that receives the
1564 /// exception address on entry to an EH pad.
1565 virtual unsigned
1566 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1567 // 0 is guaranteed to be the NoRegister value on all targets
1568 return 0;
1569 }
1570
1571 /// If a physical register, this returns the register that receives the
1572 /// exception typeid on entry to a landing pad.
1573 virtual unsigned
1574 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1575 // 0 is guaranteed to be the NoRegister value on all targets
1576 return 0;
1577 }
1578
1579 virtual bool needsFixedCatchObjects() const {
1580 report_fatal_error("Funclet EH is not implemented for this target");
1581 }
1582
1583 /// Return the minimum stack alignment of an argument.
1584 Align getMinStackArgumentAlignment() const {
1585 return MinStackArgumentAlignment;
1586 }
1587
1588 /// Return the minimum function alignment.
1589 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1590
1591 /// Return the preferred function alignment.
1592 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1593
1594 /// Return the preferred loop alignment.
1595 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1596 return PrefLoopAlignment;
1597 }
1598
1599 /// Should loops be aligned even when the function is marked OptSize (but not
1600 /// MinSize).
1601 virtual bool alignLoopsWithOptSize() const {
1602 return false;
1603 }
1604
1605 /// If the target has a standard location for the stack protector guard,
1606 /// returns the address of that location. Otherwise, returns nullptr.
1607 /// DEPRECATED: please override useLoadStackGuardNode and customize
1608 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1609 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1610
1611 /// Inserts necessary declarations for SSP (stack protection) purpose.
1612 /// Should be used only when getIRStackGuard returns nullptr.
1613 virtual void insertSSPDeclarations(Module &M) const;
1614
1615 /// Return the variable that's previously inserted by insertSSPDeclarations,
1616 /// if any, otherwise return nullptr. Should be used only when
1617 /// getIRStackGuard returns nullptr.
1618 virtual Value *getSDagStackGuard(const Module &M) const;
1619
1620 /// If this function returns true, stack protection checks should XOR the
1621 /// frame pointer (or whichever pointer is used to address locals) into the
1622 /// stack guard value before checking it. getIRStackGuard must return nullptr
1623 /// if this returns true.
1624 virtual bool useStackGuardXorFP() const { return false; }
1625
1626 /// If the target has a standard stack protection check function that
1627 /// performs validation and error handling, returns the function. Otherwise,
1628 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1629 /// Should be used only when getIRStackGuard returns nullptr.
1630 virtual Function *getSSPStackGuardCheck(const Module &M) const;
1631
1632protected:
1633 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1634 bool UseTLS) const;
1635
1636public:
1637 /// Returns the target-specific address of the unsafe stack pointer.
1638 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1639
1640 /// Returns the name of the symbol used to emit stack probes or the empty
1641 /// string if not applicable.
1642 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1643 return "";
1644 }
1645
1646 /// Returns true if a cast between SrcAS and DestAS is a noop.
1647 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1648 return false;
1649 }
1650
1651 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1652 /// are happy to sink it into basic blocks. A cast may be free, but not
1653 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1654 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1655 return isNoopAddrSpaceCast(SrcAS, DestAS);
1656 }
1657
1658 /// Return true if the pointer arguments to CI should be aligned by aligning
1659 /// the object whose address is being passed. If so then MinSize is set to the
1660 /// minimum size the object must be to be aligned and PrefAlign is set to the
1661 /// preferred alignment.
1662 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1663 unsigned & /*PrefAlign*/) const {
1664 return false;
1665 }
1666
1667 //===--------------------------------------------------------------------===//
1668 /// \name Helpers for TargetTransformInfo implementations
1669 /// @{
1670
1671 /// Get the ISD node that corresponds to the Instruction class opcode.
1672 int InstructionOpcodeToISD(unsigned Opcode) const;
1673
1674 /// Estimate the cost of type-legalization and the legalized type.
1675 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1676 Type *Ty) const;
1677
1678 /// @}
1679
1680 //===--------------------------------------------------------------------===//
1681 /// \name Helpers for atomic expansion.
1682 /// @{
1683
1684 /// Returns the maximum atomic operation size (in bits) supported by
1685 /// the backend. Atomic operations greater than this size (as well
1686 /// as ones that are not naturally aligned), will be expanded by
1687 /// AtomicExpandPass into an __atomic_* library call.
1688 unsigned getMaxAtomicSizeInBitsSupported() const {
1689 return MaxAtomicSizeInBitsSupported;
1690 }
1691
1692 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1693 /// the backend supports. Any smaller operations are widened in
1694 /// AtomicExpandPass.
1695 ///
1696 /// Note that *unlike* operations above the maximum size, atomic ops
1697 /// are still natively supported below the minimum; they just
1698 /// require a more complex expansion.
1699 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1700
1701 /// Whether the target supports unaligned atomic operations.
1702 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1703
1704 /// Whether AtomicExpandPass should automatically insert fences and reduce
1705 /// ordering for this atomic. This should be true for most architectures with
1706 /// weak memory ordering. Defaults to false.
1707 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1708 return false;
1709 }
1710
1711 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1712 /// corresponding pointee type. This may entail some non-trivial operations to
1713 /// truncate or reconstruct types that will be illegal in the backend. See
1714 /// ARMISelLowering for an example implementation.
1715 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1716 AtomicOrdering Ord) const {
1717 llvm_unreachable("Load linked unimplemented on this target")::llvm::llvm_unreachable_internal("Load linked unimplemented on this target"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1717)
;
1718 }
1719
1720 /// Perform a store-conditional operation to Addr. Return the status of the
1721 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1722 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1723 Value *Addr, AtomicOrdering Ord) const {
1724 llvm_unreachable("Store conditional unimplemented on this target")::llvm::llvm_unreachable_internal("Store conditional unimplemented on this target"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1724)
;
1725 }
1726
1727 /// Perform a masked atomicrmw using a target-specific intrinsic. This
1728 /// represents the core LL/SC loop which will be lowered at a late stage by
1729 /// the backend.
1730 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
1731 AtomicRMWInst *AI,
1732 Value *AlignedAddr, Value *Incr,
1733 Value *Mask, Value *ShiftAmt,
1734 AtomicOrdering Ord) const {
1735 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked atomicrmw expansion unimplemented on this target"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1735)
;
1736 }
1737
1738 /// Perform a masked cmpxchg using a target-specific intrinsic. This
1739 /// represents the core LL/SC loop which will be lowered at a late stage by
1740 /// the backend.
1741 virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
1742 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1743 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1744 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked cmpxchg expansion unimplemented on this target"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1744)
;
1745 }
1746
1747 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1748 /// It is called by AtomicExpandPass before expanding an
1749 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1750 /// if shouldInsertFencesForAtomic returns true.
1751 ///
1752 /// Inst is the original atomic instruction, prior to other expansions that
1753 /// may be performed.
1754 ///
1755 /// This function should either return a nullptr, or a pointer to an IR-level
1756 /// Instruction*. Even complex fence sequences can be represented by a
1757 /// single Instruction* through an intrinsic to be lowered later.
1758 /// Backends should override this method to produce target-specific intrinsic
1759 /// for their fences.
1760 /// FIXME: Please note that the default implementation here in terms of
1761 /// IR-level fences exists for historical/compatibility reasons and is
1762 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1763 /// consistency. For example, consider the following example:
1764 /// atomic<int> x = y = 0;
1765 /// int r1, r2, r3, r4;
1766 /// Thread 0:
1767 /// x.store(1);
1768 /// Thread 1:
1769 /// y.store(1);
1770 /// Thread 2:
1771 /// r1 = x.load();
1772 /// r2 = y.load();
1773 /// Thread 3:
1774 /// r3 = y.load();
1775 /// r4 = x.load();
1776 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1777 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1778 /// IR-level fences can prevent it.
1779 /// @{
1780 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1781 AtomicOrdering Ord) const {
1782 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1783 return Builder.CreateFence(Ord);
1784 else
1785 return nullptr;
1786 }
1787
1788 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1789 Instruction *Inst,
1790 AtomicOrdering Ord) const {
1791 if (isAcquireOrStronger(Ord))
1792 return Builder.CreateFence(Ord);
1793 else
1794 return nullptr;
1795 }
1796 /// @}
1797
1798 // Emits code that executes when the comparison result in the ll/sc
1799 // expansion of a cmpxchg instruction is such that the store-conditional will
1800 // not execute. This makes it possible to balance out the load-linked with
1801 // a dedicated instruction, if desired.
1802 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1803 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1804 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1805
1806 /// Returns true if the given (atomic) store should be expanded by the
1807 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1808 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1809 return false;
1810 }
1811
1812 /// Returns true if arguments should be sign-extended in lib calls.
1813 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1814 return IsSigned;
1815 }
1816
1817 /// Returns true if arguments should be extended in lib calls.
1818 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
1819 return true;
1820 }
1821
1822 /// Returns how the given (atomic) load should be expanded by the
1823 /// IR-level AtomicExpand pass.
1824 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1825 return AtomicExpansionKind::None;
1826 }
1827
1828 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1829 /// AtomicExpand pass.
1830 virtual AtomicExpansionKind
1831 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1832 return AtomicExpansionKind::None;
1833 }
1834
1835 /// Returns how the IR-level AtomicExpand pass should expand the given
1836 /// AtomicRMW, if at all. Default is to never expand.
1837 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1838 return RMW->isFloatingPointOperation() ?
1839 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
1840 }
1841
1842 /// On some platforms, an AtomicRMW that never actually modifies the value
1843 /// (such as fetch_add of 0) can be turned into a fence followed by an
1844 /// atomic load. This may sound useless, but it makes it possible for the
1845 /// processor to keep the cacheline shared, dramatically improving
1846 /// performance. And such idempotent RMWs are useful for implementing some
1847 /// kinds of locks, see for example (justification + benchmarks):
1848 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1849 /// This method tries doing that transformation, returning the atomic load if
1850 /// it succeeds, and nullptr otherwise.
1851 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1852 /// another round of expansion.
1853 virtual LoadInst *
1854 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1855 return nullptr;
1856 }
1857
1858 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1859 /// SIGN_EXTEND, or ANY_EXTEND).
1860 virtual ISD::NodeType getExtendForAtomicOps() const {
1861 return ISD::ZERO_EXTEND;
1862 }
1863
1864 /// @}
1865
1866 /// Returns true if we should normalize
1867 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1868 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1869 /// that it saves us from materializing N0 and N1 in an integer register.
1870 /// Targets that are able to perform and/or on flags should return false here.
1871 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1872 EVT VT) const {
1873 // If a target has multiple condition registers, then it likely has logical
1874 // operations on those registers.
1875 if (hasMultipleConditionRegisters())
1876 return false;
1877 // Only do the transform if the value won't be split into multiple
1878 // registers.
1879 LegalizeTypeAction Action = getTypeAction(Context, VT);
1880 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1881 Action != TypeSplitVector;
1882 }
1883
1884 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
1885
1886 /// Return true if a select of constants (select Cond, C1, C2) should be
1887 /// transformed into simple math ops with the condition value. For example:
1888 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
1889 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
1890 return false;
1891 }
1892
1893 /// Return true if it is profitable to transform an integer
1894 /// multiplication-by-constant into simpler operations like shifts and adds.
1895 /// This may be true if the target does not directly support the
1896 /// multiplication operation for the specified type or the sequence of simpler
1897 /// ops is faster than the multiply.
1898 virtual bool decomposeMulByConstant(LLVMContext &Context,
1899 EVT VT, SDValue C) const {
1900 return false;
1901 }
1902
1903 /// Return true if it is more correct/profitable to use strict FP_TO_INT
1904 /// conversion operations - canonicalizing the FP source value instead of
1905 /// converting all cases and then selecting based on value.
1906 /// This may be true if the target throws exceptions for out of bounds
1907 /// conversions or has fast FP CMOV.
1908 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
1909 bool IsSigned) const {
1910 return false;
1911 }
1912
1913 //===--------------------------------------------------------------------===//
1914 // TargetLowering Configuration Methods - These methods should be invoked by
1915 // the derived class constructor to configure this object for the target.
1916 //
1917protected:
1918 /// Specify how the target extends the result of integer and floating point
1919 /// boolean values from i1 to a wider type. See getBooleanContents.
1920 void setBooleanContents(BooleanContent Ty) {
1921 BooleanContents = Ty;
1922 BooleanFloatContents = Ty;
1923 }
1924
1925 /// Specify how the target extends the result of integer and floating point
1926 /// boolean values from i1 to a wider type. See getBooleanContents.
1927 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1928 BooleanContents = IntTy;
1929 BooleanFloatContents = FloatTy;
1930 }
1931
1932 /// Specify how the target extends the result of a vector boolean value from a
1933 /// vector of i1 to a wider type. See getBooleanContents.
1934 void setBooleanVectorContents(BooleanContent Ty) {
1935 BooleanVectorContents = Ty;
1936 }
1937
1938 /// Specify the target scheduling preference.
1939 void setSchedulingPreference(Sched::Preference Pref) {
1940 SchedPreferenceInfo = Pref;
1941 }
1942
1943 /// Indicate the minimum number of blocks to generate jump tables.
1944 void setMinimumJumpTableEntries(unsigned Val);
1945
1946 /// Indicate the maximum number of entries in jump tables.
1947 /// Set to zero to generate unlimited jump tables.
1948 void setMaximumJumpTableSize(unsigned);
1949
1950 /// If set to a physical register, this specifies the register that
1951 /// llvm.savestack/llvm.restorestack should save and restore.
1952 void setStackPointerRegisterToSaveRestore(unsigned R) {
1953 StackPointerRegisterToSaveRestore = R;
1954 }
1955
1956 /// Tells the code generator that the target has multiple (allocatable)
1957 /// condition registers that can be used to store the results of comparisons
1958 /// for use by selects and conditional branches. With multiple condition
1959 /// registers, the code generator will not aggressively sink comparisons into
1960 /// the blocks of their users.
1961 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1962 HasMultipleConditionRegisters = hasManyRegs;
1963 }
1964
1965 /// Tells the code generator that the target has BitExtract instructions.
1966 /// The code generator will aggressively sink "shift"s into the blocks of
1967 /// their users if the users will generate "and" instructions which can be
1968 /// combined with "shift" to BitExtract instructions.
1969 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1970 HasExtractBitsInsn = hasExtractInsn;
1971 }
1972
1973 /// Tells the code generator not to expand logic operations on comparison
1974 /// predicates into separate sequences that increase the amount of flow
1975 /// control.
1976 void setJumpIsExpensive(bool isExpensive = true);
1977
1978 /// Tells the code generator which bitwidths to bypass.
1979 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1980 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1981 }
1982
1983 /// Add the specified register class as an available regclass for the
1984 /// specified value type. This indicates the selector can handle values of
1985 /// that class natively.
1986 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1987 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT))(((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)) ?
static_cast<void> (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1987, __PRETTY_FUNCTION__))
;
1988 RegClassForVT[VT.SimpleTy] = RC;
1989 }
1990
1991 /// Return the largest legal super-reg register class of the register class
1992 /// for the specified type and its associated "cost".
1993 virtual std::pair<const TargetRegisterClass *, uint8_t>
1994 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1995
1996 /// Once all of the register classes are added, this allows us to compute
1997 /// derived properties we expose.
1998 void computeRegisterProperties(const TargetRegisterInfo *TRI);
1999
2000 /// Indicate that the specified operation does not work with the specified
2001 /// type and indicate what to do about it. Note that VT may refer to either
2002 /// the type of a result or that of an operand of Op.
2003 void setOperationAction(unsigned Op, MVT VT,
2004 LegalizeAction Action) {
2005 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!")((Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("Op < array_lengthof(OpActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2005, __PRETTY_FUNCTION__))
;
2006 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2007 }
2008
2009 /// Indicate that the specified load with extension does not work with the
2010 /// specified type and indicate what to do about it.
2011 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2012 LegalizeAction Action) {
2013 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&((ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid
() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2014, __PRETTY_FUNCTION__))
2014 MemVT.isValid() && "Table isn't big enough!")((ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid
() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2014, __PRETTY_FUNCTION__))
;
2015 assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(((unsigned)Action < 0x10 && "too many bits for bitfield array"
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2015, __PRETTY_FUNCTION__))
;
2016 unsigned Shift = 4 * ExtType;
2017 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2018 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2019 }
2020
2021 /// Indicate that the specified truncating store does not work with the
2022 /// specified type and indicate what to do about it.
2023 void setTruncStoreAction(MVT ValVT, MVT MemVT,
2024 LegalizeAction Action) {
2025 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!")((ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2025, __PRETTY_FUNCTION__))
;
2026 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2027 }
2028
2029 /// Indicate that the specified indexed load does or does not work with the
2030 /// specified type and indicate what to do abort it.
2031 ///
2032 /// NOTE: All indexed mode loads are initialized to Expand in
2033 /// TargetLowering.cpp
2034 void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2035 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2036 }
2037
2038 /// Indicate that the specified indexed store does or does not work with the
2039 /// specified type and indicate what to do about it.
2040 ///
2041 /// NOTE: All indexed mode stores are initialized to Expand in
2042 /// TargetLowering.cpp
2043 void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2044 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2045 }
2046
2047 /// Indicate that the specified indexed masked load does or does not work with
2048 /// the specified type and indicate what to do about it.
2049 ///
2050 /// NOTE: All indexed mode masked loads are initialized to Expand in
2051 /// TargetLowering.cpp
2052 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2053 LegalizeAction Action) {
2054 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2055 }
2056
2057 /// Indicate that the specified indexed masked store does or does not work
2058 /// with the specified type and indicate what to do about it.
2059 ///
2060 /// NOTE: All indexed mode masked stores are initialized to Expand in
2061 /// TargetLowering.cpp
2062 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2063 LegalizeAction Action) {
2064 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2065 }
2066
2067 /// Indicate that the specified condition code is or isn't supported on the
2068 /// target and indicate what to do about it.
2069 void setCondCodeAction(ISD::CondCode CC, MVT VT,
2070 LegalizeAction Action) {
2071 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&((VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions
) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2072, __PRETTY_FUNCTION__))
2072 "Table isn't big enough!")((VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions
) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2072, __PRETTY_FUNCTION__))
;
2073 assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(((unsigned)Action < 0x10 && "too many bits for bitfield array"
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2073, __PRETTY_FUNCTION__))
;
2074 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2075 /// value and the upper 29 bits index into the second dimension of the array
2076 /// to select what 32-bit value to use.
2077 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2078 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2079 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2080 }
2081
2082 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2083 /// to trying a larger integer/fp until it can find one that works. If that
2084 /// default is insufficient, this method can be used by the target to override
2085 /// the default.
2086 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2087 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2088 }
2089
2090 /// Convenience method to set an operation to Promote and specify the type
2091 /// in a single call.
2092 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2093 setOperationAction(Opc, OrigVT, Promote);
2094 AddPromotedToType(Opc, OrigVT, DestVT);
2095 }
2096
2097 /// Targets should invoke this method for each target independent node that
2098 /// they want to provide a custom DAG combiner for by implementing the
2099 /// PerformDAGCombine virtual method.
2100 void setTargetDAGCombine(ISD::NodeType NT) {
2101 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))((unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray
)) ? static_cast<void> (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2101, __PRETTY_FUNCTION__))
;
2102 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
2103 }
2104
2105 /// Set the target's minimum function alignment.
2106 void setMinFunctionAlignment(Align Alignment) {
2107 MinFunctionAlignment = Alignment;
2108 }
2109
2110 /// Set the target's preferred function alignment. This should be set if
2111 /// there is a performance benefit to higher-than-minimum alignment
2112 void setPrefFunctionAlignment(Align Alignment) {
2113 PrefFunctionAlignment = Alignment;
2114 }
2115
2116 /// Set the target's preferred loop alignment. Default alignment is one, it
2117 /// means the target does not care about loop alignment. The target may also
2118 /// override getPrefLoopAlignment to provide per-loop values.
2119 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2120
2121 /// Set the minimum stack alignment of an argument.
2122 void setMinStackArgumentAlignment(Align Alignment) {
2123 MinStackArgumentAlignment = Alignment;
2124 }
2125
2126 /// Set the maximum atomic operation size supported by the
2127 /// backend. Atomic operations greater than this size (as well as
2128 /// ones that are not naturally aligned), will be expanded by
2129 /// AtomicExpandPass into an __atomic_* library call.
2130 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2131 MaxAtomicSizeInBitsSupported = SizeInBits;
2132 }
2133
2134 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2135 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2136 MinCmpXchgSizeInBits = SizeInBits;
2137 }
2138
2139 /// Sets whether unaligned atomic operations are supported.
2140 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2141 SupportsUnalignedAtomics = UnalignedSupported;
2142 }
2143
2144public:
2145 //===--------------------------------------------------------------------===//
2146 // Addressing mode description hooks (used by LSR etc).
2147 //
2148
2149 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2150 /// instructions reading the address. This allows as much computation as
2151 /// possible to be done in the address mode for that operand. This hook lets
2152 /// targets also pass back when this should be done on intrinsics which
2153 /// load/store.
2154 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2155 SmallVectorImpl<Value*> &/*Ops*/,
2156 Type *&/*AccessTy*/) const {
2157 return false;
2158 }
2159
2160 /// This represents an addressing mode of:
2161 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2162 /// If BaseGV is null, there is no BaseGV.
2163 /// If BaseOffs is zero, there is no base offset.
2164 /// If HasBaseReg is false, there is no base register.
2165 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2166 /// no scale.
2167 struct AddrMode {
2168 GlobalValue *BaseGV = nullptr;
2169 int64_t BaseOffs = 0;
2170 bool HasBaseReg = false;
2171 int64_t Scale = 0;
2172 AddrMode() = default;
2173 };
2174
2175 /// Return true if the addressing mode represented by AM is legal for this
2176 /// target, for a load/store of the specified type.
2177 ///
2178 /// The type may be VoidTy, in which case only return true if the addressing
2179 /// mode is legal for a load/store of any legal type. TODO: Handle
2180 /// pre/postinc as well.
2181 ///
2182 /// If the address space cannot be determined, it will be -1.
2183 ///
2184 /// TODO: Remove default argument
2185 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2186 Type *Ty, unsigned AddrSpace,
2187 Instruction *I = nullptr) const;
2188
2189 /// Return the cost of the scaling factor used in the addressing mode
2190 /// represented by AM for this target, for a load/store of the specified type.
2191 ///
2192 /// If the AM is supported, the return value must be >= 0.
2193 /// If the AM is not supported, it returns a negative value.
2194 /// TODO: Handle pre/postinc as well.
2195 /// TODO: Remove default argument
2196 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
2197 Type *Ty, unsigned AS = 0) const {
2198 // Default: assume that any scaling factor used in a legal AM is free.
2199 if (isLegalAddressingMode(DL, AM, Ty, AS))
2200 return 0;
2201 return -1;
2202 }
2203
2204 /// Return true if the specified immediate is legal icmp immediate, that is
2205 /// the target has icmp instructions which can compare a register against the
2206 /// immediate without having to materialize the immediate into a register.
2207 virtual bool isLegalICmpImmediate(int64_t) const {
2208 return true;
2209 }
2210
2211 /// Return true if the specified immediate is legal add immediate, that is the
2212 /// target has add instructions which can add a register with the immediate
2213 /// without having to materialize the immediate into a register.
2214 virtual bool isLegalAddImmediate(int64_t) const {
2215 return true;
2216 }
2217
2218 /// Return true if the specified immediate is legal for the value input of a
2219 /// store instruction.
2220 virtual bool isLegalStoreImmediate(int64_t Value) const {
2221 // Default implementation assumes that at least 0 works since it is likely
2222 // that a zero register exists or a zero immediate is allowed.
2223 return Value == 0;
2224 }
2225
2226 /// Return true if it's significantly cheaper to shift a vector by a uniform
2227 /// scalar than by an amount which will vary across each lane. On x86, for
2228 /// example, there is a "psllw" instruction for the former case, but no simple
2229 /// instruction for a general "a << b" operation on vectors.
2230 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2231 return false;
2232 }
2233
2234 /// Returns true if the opcode is a commutative binary operation.
2235 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2236 // FIXME: This should get its info from the td file.
2237 switch (Opcode) {
2238 case ISD::ADD:
2239 case ISD::SMIN:
2240 case ISD::SMAX:
2241 case ISD::UMIN:
2242 case ISD::UMAX:
2243 case ISD::MUL:
2244 case ISD::MULHU:
2245 case ISD::MULHS:
2246 case ISD::SMUL_LOHI:
2247 case ISD::UMUL_LOHI:
2248 case ISD::FADD:
2249 case ISD::FMUL:
2250 case ISD::AND:
2251 case ISD::OR:
2252 case ISD::XOR:
2253 case ISD::SADDO:
2254 case ISD::UADDO:
2255 case ISD::ADDC:
2256 case ISD::ADDE:
2257 case ISD::SADDSAT:
2258 case ISD::UADDSAT:
2259 case ISD::FMINNUM:
2260 case ISD::FMAXNUM:
2261 case ISD::FMINNUM_IEEE:
2262 case ISD::FMAXNUM_IEEE:
2263 case ISD::FMINIMUM:
2264 case ISD::FMAXIMUM:
2265 return true;
2266 default: return false;
2267 }
2268 }
2269
2270 /// Return true if the node is a math/logic binary operator.
2271 virtual bool isBinOp(unsigned Opcode) const {
2272 // A commutative binop must be a binop.
2273 if (isCommutativeBinOp(Opcode))
2274 return true;
2275 // These are non-commutative binops.
2276 switch (Opcode) {
2277 case ISD::SUB:
2278 case ISD::SHL:
2279 case ISD::SRL:
2280 case ISD::SRA:
2281 case ISD::SDIV:
2282 case ISD::UDIV:
2283 case ISD::SREM:
2284 case ISD::UREM:
2285 case ISD::FSUB:
2286 case ISD::FDIV:
2287 case ISD::FREM:
2288 return true;
2289 default:
2290 return false;
2291 }
2292 }
2293
2294 /// Return true if it's free to truncate a value of type FromTy to type
2295 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2296 /// by referencing its sub-register AX.
2297 /// Targets must return false when FromTy <= ToTy.
2298 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2299 return false;
2300 }
2301
2302 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2303 /// whether a call is in tail position. Typically this means that both results
2304 /// would be assigned to the same register or stack slot, but it could mean
2305 /// the target performs adequate checks of its own before proceeding with the
2306 /// tail call. Targets must return false when FromTy <= ToTy.
2307 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2308 return false;
2309 }
2310
2311 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2312 return false;
2313 }
2314
2315 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2316
2317 /// Return true if the extension represented by \p I is free.
2318 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2319 /// this method can use the context provided by \p I to decide
2320 /// whether or not \p I is free.
2321 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2322 /// In other words, if is[Z|FP]Free returns true, then this method
2323 /// returns true as well. The converse is not true.
2324 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2325 /// \pre \p I must be a sign, zero, or fp extension.
2326 bool isExtFree(const Instruction *I) const {
2327 switch (I->getOpcode()) {
2328 case Instruction::FPExt:
2329 if (isFPExtFree(EVT::getEVT(I->getType()),
2330 EVT::getEVT(I->getOperand(0)->getType())))
2331 return true;
2332 break;
2333 case Instruction::ZExt:
2334 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2335 return true;
2336 break;
2337 case Instruction::SExt:
2338 break;
2339 default:
2340 llvm_unreachable("Instruction is not an extension")::llvm::llvm_unreachable_internal("Instruction is not an extension"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2340)
;
2341 }
2342 return isExtFreeImpl(I);
2343 }
2344
2345 /// Return true if \p Load and \p Ext can form an ExtLoad.
2346 /// For example, in AArch64
2347 /// %L = load i8, i8* %ptr
2348 /// %E = zext i8 %L to i32
2349 /// can be lowered into one load instruction
2350 /// ldrb w0, [x0]
2351 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2352 const DataLayout &DL) const {
2353 EVT VT = getValueType(DL, Ext->getType());
2354 EVT LoadVT = getValueType(DL, Load->getType());
2355
2356 // If the load has other users and the truncate is not free, the ext
2357 // probably isn't free.
2358 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2359 !isTruncateFree(Ext->getType(), Load->getType()))
2360 return false;
2361
2362 // Check whether the target supports casts folded into loads.
2363 unsigned LType;
2364 if (isa<ZExtInst>(Ext))
2365 LType = ISD::ZEXTLOAD;
2366 else {
2367 assert(isa<SExtInst>(Ext) && "Unexpected ext type!")((isa<SExtInst>(Ext) && "Unexpected ext type!")
? static_cast<void> (0) : __assert_fail ("isa<SExtInst>(Ext) && \"Unexpected ext type!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2367, __PRETTY_FUNCTION__))
;
2368 LType = ISD::SEXTLOAD;
2369 }
2370
2371 return isLoadExtLegal(LType, VT, LoadVT);
2372 }
2373
2374 /// Return true if any actual instruction that defines a value of type FromTy
2375 /// implicitly zero-extends the value to ToTy in the result register.
2376 ///
2377 /// The function should return true when it is likely that the truncate can
2378 /// be freely folded with an instruction defining a value of FromTy. If
2379 /// the defining instruction is unknown (because you're looking at a
2380 /// function argument, PHI, etc.) then the target may require an
2381 /// explicit truncate, which is not necessarily free, but this function
2382 /// does not deal with those cases.
2383 /// Targets must return false when FromTy >= ToTy.
2384 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2385 return false;
2386 }
2387
2388 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2389 return false;
2390 }
2391
2392 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2393 /// zero-extension.
2394 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2395 return false;
2396 }
2397
2398 /// Return true if sinking I's operands to the same basic block as I is
2399 /// profitable, e.g. because the operands can be folded into a target
2400 /// instruction during instruction selection. After calling the function
2401 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2402 /// come first).
2403 virtual bool shouldSinkOperands(Instruction *I,
2404 SmallVectorImpl<Use *> &Ops) const {
2405 return false;
2406 }
2407
2408 /// Return true if the target supplies and combines to a paired load
2409 /// two loaded values of type LoadedType next to each other in memory.
2410 /// RequiredAlignment gives the minimal alignment constraints that must be met
2411 /// to be able to select this paired load.
2412 ///
2413 /// This information is *not* used to generate actual paired loads, but it is
2414 /// used to generate a sequence of loads that is easier to combine into a
2415 /// paired load.
2416 /// For instance, something like this:
2417 /// a = load i64* addr
2418 /// b = trunc i64 a to i32
2419 /// c = lshr i64 a, 32
2420 /// d = trunc i64 c to i32
2421 /// will be optimized into:
2422 /// b = load i32* addr1
2423 /// d = load i32* addr2
2424 /// Where addr1 = addr2 +/- sizeof(i32).
2425 ///
2426 /// In other words, unless the target performs a post-isel load combining,
2427 /// this information should not be provided because it will generate more
2428 /// loads.
2429 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2430 unsigned & /*RequiredAlignment*/) const {
2431 return false;
2432 }
2433
2434 /// Return true if the target has a vector blend instruction.
2435 virtual bool hasVectorBlend() const { return false; }
2436
2437 /// Get the maximum supported factor for interleaved memory accesses.
2438 /// Default to be the minimum interleave factor: 2.
2439 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2440
2441 /// Lower an interleaved load to target specific intrinsics. Return
2442 /// true on success.
2443 ///
2444 /// \p LI is the vector load instruction.
2445 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2446 /// \p Indices is the corresponding indices for each shufflevector.
2447 /// \p Factor is the interleave factor.
2448 virtual bool lowerInterleavedLoad(LoadInst *LI,
2449 ArrayRef<ShuffleVectorInst *> Shuffles,
2450 ArrayRef<unsigned> Indices,
2451 unsigned Factor) const {
2452 return false;
2453 }
2454
2455 /// Lower an interleaved store to target specific intrinsics. Return
2456 /// true on success.
2457 ///
2458 /// \p SI is the vector store instruction.
2459 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2460 /// \p Factor is the interleave factor.
2461 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2462 unsigned Factor) const {
2463 return false;
2464 }
2465
2466 /// Return true if zero-extending the specific node Val to type VT2 is free
2467 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2468 /// because it's folded such as X86 zero-extending loads).
2469 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2470 return isZExtFree(Val.getValueType(), VT2);
5
Calling 'SDValue::getValueType'
2471 }
2472
2473 /// Return true if an fpext operation is free (for instance, because
2474 /// single-precision floating-point numbers are implicitly extended to
2475 /// double-precision).
2476 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2477 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&((SrcVT.isFloatingPoint() && DestVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2478, __PRETTY_FUNCTION__))
2478 "invalid fpext types")((SrcVT.isFloatingPoint() && DestVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2478, __PRETTY_FUNCTION__))
;
2479 return false;
2480 }
2481
2482 /// Return true if an fpext operation input to an \p Opcode operation is free
2483 /// (for instance, because half-precision floating-point numbers are
2484 /// implicitly extended to float-precision) for an FMA instruction.
2485 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2486 EVT DestVT, EVT SrcVT) const {
2487 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2488, __PRETTY_FUNCTION__))
2488 "invalid fpext types")((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2488, __PRETTY_FUNCTION__))
;
2489 return isFPExtFree(DestVT, SrcVT);
2490 }
2491
2492 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2493 /// extend node) is profitable.
2494 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2495
2496 /// Return true if an fneg operation is free to the point where it is never
2497 /// worthwhile to replace it with a bitwise operation.
2498 virtual bool isFNegFree(EVT VT) const {
2499 assert(VT.isFloatingPoint())((VT.isFloatingPoint()) ? static_cast<void> (0) : __assert_fail
("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2499, __PRETTY_FUNCTION__))
;
2500 return false;
2501 }
2502
2503 /// Return true if an fabs operation is free to the point where it is never
2504 /// worthwhile to replace it with a bitwise operation.
2505 virtual bool isFAbsFree(EVT VT) const {
2506 assert(VT.isFloatingPoint())((VT.isFloatingPoint()) ? static_cast<void> (0) : __assert_fail
("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2506, __PRETTY_FUNCTION__))
;
2507 return false;
2508 }
2509
2510 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2511 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2512 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2513 ///
2514 /// NOTE: This may be called before legalization on types for which FMAs are
2515 /// not legal, but should return true if those types will eventually legalize
2516 /// to types that support FMAs. After legalization, it will only be called on
2517 /// types that support FMAs (via Legal or Custom actions)
2518 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
2519 EVT) const {
2520 return false;
2521 }
2522
2523 /// IR version
2524 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2525 return false;
2526 }
2527
2528 /// Returns true if the FADD or FSUB node passed could legally be combined with
2529 /// an fmul to form an ISD::FMAD.
2530 virtual bool isFMADLegalForFAddFSub(const SelectionDAG &DAG,
2531 const SDNode *N) const {
2532 assert(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB)((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::
FSUB) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2532, __PRETTY_FUNCTION__))
;
2533 return isOperationLegal(ISD::FMAD, N->getValueType(0));
2534 }
2535
2536 /// Return true if it's profitable to narrow operations of type VT1 to
2537 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2538 /// i32 to i16.
2539 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2540 return false;
2541 }
2542
2543 /// Return true if it is beneficial to convert a load of a constant to
2544 /// just the constant itself.
2545 /// On some targets it might be more efficient to use a combination of
2546 /// arithmetic instructions to materialize the constant instead of loading it
2547 /// from a constant pool.
2548 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2549 Type *Ty) const {
2550 return false;
2551 }
2552
2553 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2554 /// from this source type with this index. This is needed because
2555 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2556 /// the first element, and only the target knows which lowering is cheap.
2557 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2558 unsigned Index) const {
2559 return false;
2560 }
2561
2562 /// Try to convert an extract element of a vector binary operation into an
2563 /// extract element followed by a scalar operation.
2564 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
2565 return false;
2566 }
2567
2568 /// Return true if extraction of a scalar element from the given vector type
2569 /// at the given index is cheap. For example, if scalar operations occur on
2570 /// the same register file as vector operations, then an extract element may
2571 /// be a sub-register rename rather than an actual instruction.
2572 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
2573 return false;
2574 }
2575
2576 /// Try to convert math with an overflow comparison into the corresponding DAG
2577 /// node operation. Targets may want to override this independently of whether
2578 /// the operation is legal/custom for the given type because it may obscure
2579 /// matching of other patterns.
2580 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
2581 // TODO: The default logic is inherited from code in CodeGenPrepare.
2582 // The opcode should not make a difference by default?
2583 if (Opcode != ISD::UADDO)
2584 return false;
2585
2586 // Allow the transform as long as we have an integer type that is not
2587 // obviously illegal and unsupported.
2588 if (VT.isVector())
2589 return false;
2590 return VT.isSimple() || !isOperationExpand(Opcode, VT);
2591 }
2592
2593 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2594 // even if the vector itself has multiple uses.
2595 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2596 return false;
2597 }
2598
2599 // Return true if CodeGenPrepare should consider splitting large offset of a
2600 // GEP to make the GEP fit into the addressing mode and can be sunk into the
2601 // same blocks of its users.
2602 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2603
2604 /// Return true if creating a shift of the type by the given
2605 /// amount is not profitable.
2606 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
2607 return false;
2608 }
2609
2610 //===--------------------------------------------------------------------===//
2611 // Runtime Library hooks
2612 //
2613
2614 /// Rename the default libcall routine name for the specified libcall.
2615 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2616 LibcallRoutineNames[Call] = Name;
2617 }
2618
2619 /// Get the libcall routine name for the specified libcall.
2620 const char *getLibcallName(RTLIB::Libcall Call) const {
2621 return LibcallRoutineNames[Call];
2622 }
2623
2624 /// Override the default CondCode to be used to test the result of the
2625 /// comparison libcall against zero.
2626 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2627 CmpLibcallCCs[Call] = CC;
2628 }
2629
2630 /// Get the CondCode that's to be used to test the result of the comparison
2631 /// libcall against zero.
2632 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2633 return CmpLibcallCCs[Call];
2634 }
2635
2636 /// Set the CallingConv that should be used for the specified libcall.
2637 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2638 LibcallCallingConvs[Call] = CC;
2639 }
2640
2641 /// Get the CallingConv that should be used for the specified libcall.
2642 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2643 return LibcallCallingConvs[Call];
2644 }
2645
2646 /// Execute target specific actions to finalize target lowering.
2647 /// This is used to set extra flags in MachineFrameInformation and freezing
2648 /// the set of reserved registers.
2649 /// The default implementation just freezes the set of reserved registers.
2650 virtual void finalizeLowering(MachineFunction &MF) const;
2651
2652private:
2653 const TargetMachine &TM;
2654
2655 /// Tells the code generator that the target has multiple (allocatable)
2656 /// condition registers that can be used to store the results of comparisons
2657 /// for use by selects and conditional branches. With multiple condition
2658 /// registers, the code generator will not aggressively sink comparisons into
2659 /// the blocks of their users.
2660 bool HasMultipleConditionRegisters;
2661
2662 /// Tells the code generator that the target has BitExtract instructions.
2663 /// The code generator will aggressively sink "shift"s into the blocks of
2664 /// their users if the users will generate "and" instructions which can be
2665 /// combined with "shift" to BitExtract instructions.
2666 bool HasExtractBitsInsn;
2667
2668 /// Tells the code generator to bypass slow divide or remainder
2669 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2670 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2671 /// div/rem when the operands are positive and less than 256.
2672 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2673
2674 /// Tells the code generator that it shouldn't generate extra flow control
2675 /// instructions and should attempt to combine flow control instructions via
2676 /// predication.
2677 bool JumpIsExpensive;
2678
2679 /// Information about the contents of the high-bits in boolean values held in
2680 /// a type wider than i1. See getBooleanContents.
2681 BooleanContent BooleanContents;
2682
2683 /// Information about the contents of the high-bits in boolean values held in
2684 /// a type wider than i1. See getBooleanContents.
2685 BooleanContent BooleanFloatContents;
2686
2687 /// Information about the contents of the high-bits in boolean vector values
2688 /// when the element type is wider than i1. See getBooleanContents.
2689 BooleanContent BooleanVectorContents;
2690
2691 /// The target scheduling preference: shortest possible total cycles or lowest
2692 /// register usage.
2693 Sched::Preference SchedPreferenceInfo;
2694
2695 /// The minimum alignment that any argument on the stack needs to have.
2696 Align MinStackArgumentAlignment;
2697
2698 /// The minimum function alignment (used when optimizing for size, and to
2699 /// prevent explicitly provided alignment from leading to incorrect code).
2700 Align MinFunctionAlignment;
2701
2702 /// The preferred function alignment (used when alignment unspecified and
2703 /// optimizing for speed).
2704 Align PrefFunctionAlignment;
2705
2706 /// The preferred loop alignment (in log2 bot in bytes).
2707 Align PrefLoopAlignment;
2708
2709 /// Size in bits of the maximum atomics size the backend supports.
2710 /// Accesses larger than this will be expanded by AtomicExpandPass.
2711 unsigned MaxAtomicSizeInBitsSupported;
2712
2713 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2714 /// backend supports.
2715 unsigned MinCmpXchgSizeInBits;
2716
2717 /// This indicates if the target supports unaligned atomic operations.
2718 bool SupportsUnalignedAtomics;
2719
2720 /// If set to a physical register, this specifies the register that
2721 /// llvm.savestack/llvm.restorestack should save and restore.
2722 unsigned StackPointerRegisterToSaveRestore;
2723
2724 /// This indicates the default register class to use for each ValueType the
2725 /// target supports natively.
2726 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2727 uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE];
2728 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2729
2730 /// This indicates the "representative" register class to use for each
2731 /// ValueType the target supports natively. This information is used by the
2732 /// scheduler to track register pressure. By default, the representative
2733 /// register class is the largest legal super-reg register class of the
2734 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2735 /// representative class would be GR32.
2736 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2737
2738 /// This indicates the "cost" of the "representative" register class for each
2739 /// ValueType. The cost is used by the scheduler to approximate register
2740 /// pressure.
2741 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2742
2743 /// For any value types we are promoting or expanding, this contains the value
2744 /// type that we are changing to. For Expanded types, this contains one step
2745 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2746 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2747 /// the same type (e.g. i32 -> i32).
2748 MVT TransformToType[MVT::LAST_VALUETYPE];
2749
2750 /// For each operation and each value type, keep a LegalizeAction that
2751 /// indicates how instruction selection should deal with the operation. Most
2752 /// operations are Legal (aka, supported natively by the target), but
2753 /// operations that are not should be described. Note that operations on
2754 /// non-legal value types are not described here.
2755 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2756
2757 /// For each load extension type and each value type, keep a LegalizeAction
2758 /// that indicates how instruction selection should deal with a load of a
2759 /// specific value type and extension type. Uses 4-bits to store the action
2760 /// for each of the 4 load ext types.
2761 uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2762
2763 /// For each value type pair keep a LegalizeAction that indicates whether a
2764 /// truncating store of a specific value type and truncating type is legal.
2765 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2766
2767 /// For each indexed mode and each value type, keep a quad of LegalizeAction
2768 /// that indicates how instruction selection should deal with the load /
2769 /// store / maskedload / maskedstore.
2770 ///
2771 /// The first dimension is the value_type for the reference. The second
2772 /// dimension represents the various modes for load store.
2773 uint16_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2774
2775 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2776 /// indicates how instruction selection should deal with the condition code.
2777 ///
2778 /// Because each CC action takes up 4 bits, we need to have the array size be
2779 /// large enough to fit all of the value types. This can be done by rounding
2780 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2781 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2782
2783 ValueTypeActionImpl ValueTypeActions;
2784
2785private:
2786 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2787
2788 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2789 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2790 /// array.
2791 unsigned char
2792 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT8-1)/CHAR_BIT8];
2793
2794 /// For operations that must be promoted to a specific type, this holds the
2795 /// destination type. This map should be sparse, so don't hold it as an
2796 /// array.
2797 ///
2798 /// Targets add entries to this map with AddPromotedToType(..), clients access
2799 /// this with getTypeToPromoteTo(..).
2800 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2801 PromoteToType;
2802
2803 /// Stores the name each libcall.
2804 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2805
2806 /// The ISD::CondCode that should be used to test the result of each of the
2807 /// comparison libcall against zero.
2808 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2809
2810 /// Stores the CallingConv that should be used for each libcall.
2811 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2812
2813 /// Set default libcall names and calling conventions.
2814 void InitLibcalls(const Triple &TT);
2815
2816 /// The bits of IndexedModeActions used to store the legalisation actions
2817 /// We store the data as | ML | MS | L | S | each taking 4 bits.
2818 enum IndexedModeActionsBits {
2819 IMAB_Store = 0,
2820 IMAB_Load = 4,
2821 IMAB_MaskedStore = 8,
2822 IMAB_MaskedLoad = 12
2823 };
2824
2825 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
2826 LegalizeAction Action) {
2827 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&((VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE
&& (unsigned)Action < 0xf && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2828, __PRETTY_FUNCTION__))
2828 (unsigned)Action < 0xf && "Table isn't big enough!")((VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE
&& (unsigned)Action < 0xf && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && (unsigned)Action < 0xf && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2828, __PRETTY_FUNCTION__))
;
2829 unsigned Ty = (unsigned)VT.SimpleTy;
2830 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
2831 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
2832 }
2833
2834 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
2835 unsigned Shift) const {
2836 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&((IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid()
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2837, __PRETTY_FUNCTION__))
2837 "Table isn't big enough!")((IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid()
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2837, __PRETTY_FUNCTION__))
;
2838 unsigned Ty = (unsigned)VT.SimpleTy;
2839 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
2840 }
2841
2842protected:
2843 /// Return true if the extension represented by \p I is free.
2844 /// \pre \p I is a sign, zero, or fp extension and
2845 /// is[Z|FP]ExtFree of the related types is not true.
2846 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2847
2848 /// Depth that GatherAllAliases should should continue looking for chain
2849 /// dependencies when trying to find a more preferable chain. As an
2850 /// approximation, this should be more than the number of consecutive stores
2851 /// expected to be merged.
2852 unsigned GatherAllAliasesMaxDepth;
2853
2854 /// \brief Specify maximum number of store instructions per memset call.</