Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1153, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RISCVISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/RISCV -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/RISCV -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp

1//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that RISCV uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCVISelLowering.h"
15#include "RISCV.h"
16#include "RISCVMachineFunctionInfo.h"
17#include "RISCVRegisterInfo.h"
18#include "RISCVSubtarget.h"
19#include "RISCVTargetMachine.h"
20#include "Utils/RISCVMatInt.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/SelectionDAGISel.h"
29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30#include "llvm/CodeGen/ValueTypes.h"
31#include "llvm/IR/DiagnosticInfo.h"
32#include "llvm/IR/DiagnosticPrinter.h"
33#include "llvm/IR/IntrinsicsRISCV.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/raw_ostream.h"
37
38using namespace llvm;
39
40#define DEBUG_TYPE"riscv-lower" "riscv-lower"
41
42STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"riscv-lower", "NumTailCalls"
, "Number of tail calls"}
;
43
44RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45 const RISCVSubtarget &STI)
46 : TargetLowering(TM), Subtarget(STI) {
47
48 if (Subtarget.isRV32E())
49 report_fatal_error("Codegen not yet implemented for RV32E");
50
51 RISCVABI::ABI ABI = Subtarget.getTargetABI();
52 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI")((ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"
) ? static_cast<void> (0) : __assert_fail ("ABI != RISCVABI::ABI_Unknown && \"Improperly initialised target ABI\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 52, __PRETTY_FUNCTION__))
;
53
54 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
55 !Subtarget.hasStdExtF()) {
56 errs() << "Hard-float 'f' ABI can't be used for a target that "
57 "doesn't support the F instruction set extension (ignoring "
58 "target-abi)\n";
59 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
60 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
61 !Subtarget.hasStdExtD()) {
62 errs() << "Hard-float 'd' ABI can't be used for a target that "
63 "doesn't support the D instruction set extension (ignoring "
64 "target-abi)\n";
65 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
66 }
67
68 switch (ABI) {
69 default:
70 report_fatal_error("Don't know how to lower this ABI");
71 case RISCVABI::ABI_ILP32:
72 case RISCVABI::ABI_ILP32F:
73 case RISCVABI::ABI_ILP32D:
74 case RISCVABI::ABI_LP64:
75 case RISCVABI::ABI_LP64F:
76 case RISCVABI::ABI_LP64D:
77 break;
78 }
79
80 MVT XLenVT = Subtarget.getXLenVT();
81
82 // Set up the register classes.
83 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
84
85 if (Subtarget.hasStdExtF())
86 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
87 if (Subtarget.hasStdExtD())
88 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
89
90 // Compute derived properties from the register classes.
91 computeRegisterProperties(STI.getRegisterInfo());
92
93 setStackPointerRegisterToSaveRestore(RISCV::X2);
94
95 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
96 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
97
98 // TODO: add all necessary setOperationAction calls.
99 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
100
101 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
102 setOperationAction(ISD::BR_CC, XLenVT, Expand);
103 setOperationAction(ISD::SELECT, XLenVT, Custom);
104 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
105
106 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
107 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
108
109 setOperationAction(ISD::VASTART, MVT::Other, Custom);
110 setOperationAction(ISD::VAARG, MVT::Other, Expand);
111 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
112 setOperationAction(ISD::VAEND, MVT::Other, Expand);
113
114 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
115 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
116
117 if (Subtarget.is64Bit()) {
118 setOperationAction(ISD::ADD, MVT::i32, Custom);
119 setOperationAction(ISD::SUB, MVT::i32, Custom);
120 setOperationAction(ISD::SHL, MVT::i32, Custom);
121 setOperationAction(ISD::SRA, MVT::i32, Custom);
122 setOperationAction(ISD::SRL, MVT::i32, Custom);
123 }
124
125 if (!Subtarget.hasStdExtM()) {
126 setOperationAction(ISD::MUL, XLenVT, Expand);
127 setOperationAction(ISD::MULHS, XLenVT, Expand);
128 setOperationAction(ISD::MULHU, XLenVT, Expand);
129 setOperationAction(ISD::SDIV, XLenVT, Expand);
130 setOperationAction(ISD::UDIV, XLenVT, Expand);
131 setOperationAction(ISD::SREM, XLenVT, Expand);
132 setOperationAction(ISD::UREM, XLenVT, Expand);
133 }
134
135 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
136 setOperationAction(ISD::MUL, MVT::i32, Custom);
137 setOperationAction(ISD::SDIV, MVT::i32, Custom);
138 setOperationAction(ISD::UDIV, MVT::i32, Custom);
139 setOperationAction(ISD::UREM, MVT::i32, Custom);
140 }
141
142 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
143 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
144 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
145 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
146
147 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
148 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
149 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
150
151 setOperationAction(ISD::ROTL, XLenVT, Expand);
152 setOperationAction(ISD::ROTR, XLenVT, Expand);
153 setOperationAction(ISD::BSWAP, XLenVT, Expand);
154 setOperationAction(ISD::CTTZ, XLenVT, Expand);
155 setOperationAction(ISD::CTLZ, XLenVT, Expand);
156 setOperationAction(ISD::CTPOP, XLenVT, Expand);
157
158 ISD::CondCode FPCCToExtend[] = {
159 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
160 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
161 ISD::SETGE, ISD::SETNE};
162
163 ISD::NodeType FPOpToExtend[] = {
164 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
165 ISD::FP_TO_FP16};
166
167 if (Subtarget.hasStdExtF()) {
168 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
169 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
170 for (auto CC : FPCCToExtend)
171 setCondCodeAction(CC, MVT::f32, Expand);
172 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
173 setOperationAction(ISD::SELECT, MVT::f32, Custom);
174 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
175 for (auto Op : FPOpToExtend)
176 setOperationAction(Op, MVT::f32, Expand);
177 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
178 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
179 }
180
181 if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
182 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
183
184 if (Subtarget.hasStdExtD()) {
185 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
186 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
187 for (auto CC : FPCCToExtend)
188 setCondCodeAction(CC, MVT::f64, Expand);
189 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
190 setOperationAction(ISD::SELECT, MVT::f64, Custom);
191 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
192 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
193 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
194 for (auto Op : FPOpToExtend)
195 setOperationAction(Op, MVT::f64, Expand);
196 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
197 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
198 }
199
200 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
201 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
202 setOperationAction(ISD::ConstantPool, XLenVT, Custom);
203
204 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
205
206 // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
207 // Unfortunately this can't be determined just from the ISA naming string.
208 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
209 Subtarget.is64Bit() ? Legal : Custom);
210
211 setOperationAction(ISD::TRAP, MVT::Other, Legal);
212 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
213
214 if (Subtarget.hasStdExtA()) {
215 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
216 setMinCmpXchgSizeInBits(32);
217 } else {
218 setMaxAtomicSizeInBitsSupported(0);
219 }
220
221 setBooleanContents(ZeroOrOneBooleanContent);
222
223 // Function alignments.
224 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
225 setMinFunctionAlignment(FunctionAlignment);
226 setPrefFunctionAlignment(FunctionAlignment);
227
228 // Effectively disable jump table generation.
229 setMinimumJumpTableEntries(INT_MAX2147483647);
230}
231
232EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
233 EVT VT) const {
234 if (!VT.isVector())
235 return getPointerTy(DL);
236 return VT.changeVectorElementTypeToInteger();
237}
238
239bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
240 const CallInst &I,
241 MachineFunction &MF,
242 unsigned Intrinsic) const {
243 switch (Intrinsic) {
244 default:
245 return false;
246 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
247 case Intrinsic::riscv_masked_atomicrmw_add_i32:
248 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
249 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
250 case Intrinsic::riscv_masked_atomicrmw_max_i32:
251 case Intrinsic::riscv_masked_atomicrmw_min_i32:
252 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
253 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
254 case Intrinsic::riscv_masked_cmpxchg_i32:
255 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
256 Info.opc = ISD::INTRINSIC_W_CHAIN;
257 Info.memVT = MVT::getVT(PtrTy->getElementType());
258 Info.ptrVal = I.getArgOperand(0);
259 Info.offset = 0;
260 Info.align = Align(4);
261 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
262 MachineMemOperand::MOVolatile;
263 return true;
264 }
265}
266
267bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
268 const AddrMode &AM, Type *Ty,
269 unsigned AS,
270 Instruction *I) const {
271 // No global is ever allowed as a base.
272 if (AM.BaseGV)
273 return false;
274
275 // Require a 12-bit signed offset.
276 if (!isInt<12>(AM.BaseOffs))
277 return false;
278
279 switch (AM.Scale) {
280 case 0: // "r+i" or just "i", depending on HasBaseReg.
281 break;
282 case 1:
283 if (!AM.HasBaseReg) // allow "r+i".
284 break;
285 return false; // disallow "r+r" or "r+r+i".
286 default:
287 return false;
288 }
289
290 return true;
291}
292
293bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
294 return isInt<12>(Imm);
295}
296
297bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
298 return isInt<12>(Imm);
299}
300
301// On RV32, 64-bit integers are split into their high and low parts and held
302// in two different registers, so the trunc is free since the low register can
303// just be used.
304bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
305 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
306 return false;
307 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
308 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
309 return (SrcBits == 64 && DestBits == 32);
310}
311
312bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
313 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
314 !SrcVT.isInteger() || !DstVT.isInteger())
315 return false;
316 unsigned SrcBits = SrcVT.getSizeInBits();
317 unsigned DestBits = DstVT.getSizeInBits();
318 return (SrcBits == 64 && DestBits == 32);
319}
320
321bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
322 // Zexts are free if they can be combined with a load.
323 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1
Assuming 'LD' is null
2
Taking false branch
324 EVT MemVT = LD->getMemoryVT();
325 if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
326 (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
327 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
328 LD->getExtensionType() == ISD::ZEXTLOAD))
329 return true;
330 }
331
332 return TargetLowering::isZExtFree(Val, VT2);
3
Value assigned to 'Val.Node'
4
Calling 'TargetLoweringBase::isZExtFree'
333}
334
335bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
336 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
337}
338
339bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
340 return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
341 (VT == MVT::f64 && Subtarget.hasStdExtD());
342}
343
344// Changes the condition code and swaps operands if necessary, so the SetCC
345// operation matches one of the comparisons supported directly in the RISC-V
346// ISA.
347static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
348 switch (CC) {
349 default:
350 break;
351 case ISD::SETGT:
352 case ISD::SETLE:
353 case ISD::SETUGT:
354 case ISD::SETULE:
355 CC = ISD::getSetCCSwappedOperands(CC);
356 std::swap(LHS, RHS);
357 break;
358 }
359}
360
361// Return the RISC-V branch opcode that matches the given DAG integer
362// condition code. The CondCode must be one of those supported by the RISC-V
363// ISA (see normaliseSetCC).
364static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
365 switch (CC) {
366 default:
367 llvm_unreachable("Unsupported CondCode")::llvm::llvm_unreachable_internal("Unsupported CondCode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 367)
;
368 case ISD::SETEQ:
369 return RISCV::BEQ;
370 case ISD::SETNE:
371 return RISCV::BNE;
372 case ISD::SETLT:
373 return RISCV::BLT;
374 case ISD::SETGE:
375 return RISCV::BGE;
376 case ISD::SETULT:
377 return RISCV::BLTU;
378 case ISD::SETUGE:
379 return RISCV::BGEU;
380 }
381}
382
383SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
384 SelectionDAG &DAG) const {
385 switch (Op.getOpcode()) {
386 default:
387 report_fatal_error("unimplemented operand");
388 case ISD::GlobalAddress:
389 return lowerGlobalAddress(Op, DAG);
390 case ISD::BlockAddress:
391 return lowerBlockAddress(Op, DAG);
392 case ISD::ConstantPool:
393 return lowerConstantPool(Op, DAG);
394 case ISD::GlobalTLSAddress:
395 return lowerGlobalTLSAddress(Op, DAG);
396 case ISD::SELECT:
397 return lowerSELECT(Op, DAG);
398 case ISD::VASTART:
399 return lowerVASTART(Op, DAG);
400 case ISD::FRAMEADDR:
401 return lowerFRAMEADDR(Op, DAG);
402 case ISD::RETURNADDR:
403 return lowerRETURNADDR(Op, DAG);
404 case ISD::SHL_PARTS:
405 return lowerShiftLeftParts(Op, DAG);
406 case ISD::SRA_PARTS:
407 return lowerShiftRightParts(Op, DAG, true);
408 case ISD::SRL_PARTS:
409 return lowerShiftRightParts(Op, DAG, false);
410 case ISD::BITCAST: {
411 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&((Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
"Unexpected custom legalisation") ? static_cast<void> (
0) : __assert_fail ("Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 412, __PRETTY_FUNCTION__))
412 "Unexpected custom legalisation")((Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
"Unexpected custom legalisation") ? static_cast<void> (
0) : __assert_fail ("Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 412, __PRETTY_FUNCTION__))
;
413 SDLoc DL(Op);
414 SDValue Op0 = Op.getOperand(0);
415 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
416 return SDValue();
417 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
418 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
419 return FPConv;
420 }
421 }
422}
423
424static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
425 SelectionDAG &DAG, unsigned Flags) {
426 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
427}
428
429static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
430 SelectionDAG &DAG, unsigned Flags) {
431 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
432 Flags);
433}
434
435static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
436 SelectionDAG &DAG, unsigned Flags) {
437 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
438 N->getOffset(), Flags);
439}
440
441template <class NodeTy>
442SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
443 bool IsLocal) const {
444 SDLoc DL(N);
445 EVT Ty = getPointerTy(DAG.getDataLayout());
446
447 if (isPositionIndependent()) {
448 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
449 if (IsLocal)
450 // Use PC-relative addressing to access the symbol. This generates the
451 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
452 // %pcrel_lo(auipc)).
453 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
454
455 // Use PC-relative addressing to access the GOT for this symbol, then load
456 // the address from the GOT. This generates the pattern (PseudoLA sym),
457 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
458 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
459 }
460
461 switch (getTargetMachine().getCodeModel()) {
462 default:
463 report_fatal_error("Unsupported code model for lowering");
464 case CodeModel::Small: {
465 // Generate a sequence for accessing addresses within the first 2 GiB of
466 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
467 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
468 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
469 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
470 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
471 }
472 case CodeModel::Medium: {
473 // Generate a sequence for accessing addresses within any 2GiB range within
474 // the address space. This generates the pattern (PseudoLLA sym), which
475 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
476 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
477 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
478 }
479 }
480}
481
482SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
483 SelectionDAG &DAG) const {
484 SDLoc DL(Op);
485 EVT Ty = Op.getValueType();
486 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
487 int64_t Offset = N->getOffset();
488 MVT XLenVT = Subtarget.getXLenVT();
489
490 const GlobalValue *GV = N->getGlobal();
491 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
492 SDValue Addr = getAddr(N, DAG, IsLocal);
493
494 // In order to maximise the opportunity for common subexpression elimination,
495 // emit a separate ADD node for the global address offset instead of folding
496 // it in the global address node. Later peephole optimisations may choose to
497 // fold it back in when profitable.
498 if (Offset != 0)
499 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
500 DAG.getConstant(Offset, DL, XLenVT));
501 return Addr;
502}
503
504SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
505 SelectionDAG &DAG) const {
506 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
507
508 return getAddr(N, DAG);
509}
510
511SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
512 SelectionDAG &DAG) const {
513 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
514
515 return getAddr(N, DAG);
516}
517
518SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
519 SelectionDAG &DAG,
520 bool UseGOT) const {
521 SDLoc DL(N);
522 EVT Ty = getPointerTy(DAG.getDataLayout());
523 const GlobalValue *GV = N->getGlobal();
524 MVT XLenVT = Subtarget.getXLenVT();
525
526 if (UseGOT) {
527 // Use PC-relative addressing to access the GOT for this TLS symbol, then
528 // load the address from the GOT and add the thread pointer. This generates
529 // the pattern (PseudoLA_TLS_IE sym), which expands to
530 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
531 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
532 SDValue Load =
533 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
534
535 // Add the thread pointer.
536 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
537 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
538 }
539
540 // Generate a sequence for accessing the address relative to the thread
541 // pointer, with the appropriate adjustment for the thread pointer offset.
542 // This generates the pattern
543 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
544 SDValue AddrHi =
545 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
546 SDValue AddrAdd =
547 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
548 SDValue AddrLo =
549 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
550
551 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
552 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
553 SDValue MNAdd = SDValue(
554 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
555 0);
556 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
557}
558
559SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
560 SelectionDAG &DAG) const {
561 SDLoc DL(N);
562 EVT Ty = getPointerTy(DAG.getDataLayout());
563 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
564 const GlobalValue *GV = N->getGlobal();
565
566 // Use a PC-relative addressing mode to access the global dynamic GOT address.
567 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
568 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
569 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
570 SDValue Load =
571 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
572
573 // Prepare argument list to generate call.
574 ArgListTy Args;
575 ArgListEntry Entry;
576 Entry.Node = Load;
577 Entry.Ty = CallTy;
578 Args.push_back(Entry);
579
580 // Setup call to __tls_get_addr.
581 TargetLowering::CallLoweringInfo CLI(DAG);
582 CLI.setDebugLoc(DL)
583 .setChain(DAG.getEntryNode())
584 .setLibCallee(CallingConv::C, CallTy,
585 DAG.getExternalSymbol("__tls_get_addr", Ty),
586 std::move(Args));
587
588 return LowerCallTo(CLI).first;
589}
590
591SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
592 SelectionDAG &DAG) const {
593 SDLoc DL(Op);
594 EVT Ty = Op.getValueType();
595 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
596 int64_t Offset = N->getOffset();
597 MVT XLenVT = Subtarget.getXLenVT();
598
599 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
600
601 SDValue Addr;
602 switch (Model) {
603 case TLSModel::LocalExec:
604 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
605 break;
606 case TLSModel::InitialExec:
607 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
608 break;
609 case TLSModel::LocalDynamic:
610 case TLSModel::GeneralDynamic:
611 Addr = getDynamicTLSAddr(N, DAG);
612 break;
613 }
614
615 // In order to maximise the opportunity for common subexpression elimination,
616 // emit a separate ADD node for the global address offset instead of folding
617 // it in the global address node. Later peephole optimisations may choose to
618 // fold it back in when profitable.
619 if (Offset != 0)
620 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
621 DAG.getConstant(Offset, DL, XLenVT));
622 return Addr;
623}
624
625SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
626 SDValue CondV = Op.getOperand(0);
627 SDValue TrueV = Op.getOperand(1);
628 SDValue FalseV = Op.getOperand(2);
629 SDLoc DL(Op);
630 MVT XLenVT = Subtarget.getXLenVT();
631
632 // If the result type is XLenVT and CondV is the output of a SETCC node
633 // which also operated on XLenVT inputs, then merge the SETCC node into the
634 // lowered RISCVISD::SELECT_CC to take advantage of the integer
635 // compare+branch instructions. i.e.:
636 // (select (setcc lhs, rhs, cc), truev, falsev)
637 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
638 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
639 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
640 SDValue LHS = CondV.getOperand(0);
641 SDValue RHS = CondV.getOperand(1);
642 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
643 ISD::CondCode CCVal = CC->get();
644
645 normaliseSetCC(LHS, RHS, CCVal);
646
647 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
648 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
649 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
650 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
651 }
652
653 // Otherwise:
654 // (select condv, truev, falsev)
655 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
656 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
657 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
658
659 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
660 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
661
662 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
663}
664
665SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
666 MachineFunction &MF = DAG.getMachineFunction();
667 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
668
669 SDLoc DL(Op);
670 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
671 getPointerTy(MF.getDataLayout()));
672
673 // vastart just stores the address of the VarArgsFrameIndex slot into the
674 // memory location argument.
675 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
676 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
677 MachinePointerInfo(SV));
678}
679
680SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
681 SelectionDAG &DAG) const {
682 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
683 MachineFunction &MF = DAG.getMachineFunction();
684 MachineFrameInfo &MFI = MF.getFrameInfo();
685 MFI.setFrameAddressIsTaken(true);
686 Register FrameReg = RI.getFrameRegister(MF);
687 int XLenInBytes = Subtarget.getXLen() / 8;
688
689 EVT VT = Op.getValueType();
690 SDLoc DL(Op);
691 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
692 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
693 while (Depth--) {
694 int Offset = -(XLenInBytes * 2);
695 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
696 DAG.getIntPtrConstant(Offset, DL));
697 FrameAddr =
698 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
699 }
700 return FrameAddr;
701}
702
703SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
704 SelectionDAG &DAG) const {
705 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
706 MachineFunction &MF = DAG.getMachineFunction();
707 MachineFrameInfo &MFI = MF.getFrameInfo();
708 MFI.setReturnAddressIsTaken(true);
709 MVT XLenVT = Subtarget.getXLenVT();
710 int XLenInBytes = Subtarget.getXLen() / 8;
711
712 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
713 return SDValue();
714
715 EVT VT = Op.getValueType();
716 SDLoc DL(Op);
717 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
718 if (Depth) {
719 int Off = -XLenInBytes;
720 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
721 SDValue Offset = DAG.getConstant(Off, DL, VT);
722 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
723 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
724 MachinePointerInfo());
725 }
726
727 // Return the value of the return address register, marking it an implicit
728 // live-in.
729 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
730 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
731}
732
733SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
734 SelectionDAG &DAG) const {
735 SDLoc DL(Op);
736 SDValue Lo = Op.getOperand(0);
737 SDValue Hi = Op.getOperand(1);
738 SDValue Shamt = Op.getOperand(2);
739 EVT VT = Lo.getValueType();
740
741 // if Shamt-XLEN < 0: // Shamt < XLEN
742 // Lo = Lo << Shamt
743 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
744 // else:
745 // Lo = 0
746 // Hi = Lo << (Shamt-XLEN)
747
748 SDValue Zero = DAG.getConstant(0, DL, VT);
749 SDValue One = DAG.getConstant(1, DL, VT);
750 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
751 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
752 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
753 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
754
755 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
756 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
757 SDValue ShiftRightLo =
758 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
759 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
760 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
761 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
762
763 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
764
765 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
766 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
767
768 SDValue Parts[2] = {Lo, Hi};
769 return DAG.getMergeValues(Parts, DL);
770}
771
772SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
773 bool IsSRA) const {
774 SDLoc DL(Op);
775 SDValue Lo = Op.getOperand(0);
776 SDValue Hi = Op.getOperand(1);
777 SDValue Shamt = Op.getOperand(2);
778 EVT VT = Lo.getValueType();
779
780 // SRA expansion:
781 // if Shamt-XLEN < 0: // Shamt < XLEN
782 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
783 // Hi = Hi >>s Shamt
784 // else:
785 // Lo = Hi >>s (Shamt-XLEN);
786 // Hi = Hi >>s (XLEN-1)
787 //
788 // SRL expansion:
789 // if Shamt-XLEN < 0: // Shamt < XLEN
790 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
791 // Hi = Hi >>u Shamt
792 // else:
793 // Lo = Hi >>u (Shamt-XLEN);
794 // Hi = 0;
795
796 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
797
798 SDValue Zero = DAG.getConstant(0, DL, VT);
799 SDValue One = DAG.getConstant(1, DL, VT);
800 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
801 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
802 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
803 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
804
805 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
806 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
807 SDValue ShiftLeftHi =
808 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
809 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
810 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
811 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
812 SDValue HiFalse =
813 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
814
815 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
816
817 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
818 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
819
820 SDValue Parts[2] = {Lo, Hi};
821 return DAG.getMergeValues(Parts, DL);
822}
823
824// Returns the opcode of the target-specific SDNode that implements the 32-bit
825// form of the given Opcode.
826static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
827 switch (Opcode) {
828 default:
829 llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 829)
;
830 case ISD::SHL:
831 return RISCVISD::SLLW;
832 case ISD::SRA:
833 return RISCVISD::SRAW;
834 case ISD::SRL:
835 return RISCVISD::SRLW;
836 case ISD::SDIV:
837 return RISCVISD::DIVW;
838 case ISD::UDIV:
839 return RISCVISD::DIVUW;
840 case ISD::UREM:
841 return RISCVISD::REMUW;
842 }
843}
844
845// Converts the given 32-bit operation to a target-specific SelectionDAG node.
846// Because i32 isn't a legal type for RV64, these operations would otherwise
847// be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
848// later one because the fact the operation was originally of type i32 is
849// lost.
850static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
851 SDLoc DL(N);
852 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
853 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
854 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
855 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
856 // ReplaceNodeResults requires we maintain the same type for the return value.
857 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
858}
859
860// Converts the given 32-bit operation to a i64 operation with signed extension
861// semantic to reduce the signed extension instructions.
862static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
863 SDLoc DL(N);
864 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
865 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
866 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
867 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
868 DAG.getValueType(MVT::i32));
869 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
870}
871
872void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
873 SmallVectorImpl<SDValue> &Results,
874 SelectionDAG &DAG) const {
875 SDLoc DL(N);
876 switch (N->getOpcode()) {
877 default:
878 llvm_unreachable("Don't know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Don't know how to custom type legalize this operation!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 878)
;
879 case ISD::READCYCLECOUNTER: {
880 assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 881, __PRETTY_FUNCTION__))
881 "READCYCLECOUNTER only has custom type legalization on riscv32")((!Subtarget.is64Bit() && "READCYCLECOUNTER only has custom type legalization on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"READCYCLECOUNTER only has custom type legalization on riscv32\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 881, __PRETTY_FUNCTION__))
;
882
883 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
884 SDValue RCW =
885 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
886
887 Results.push_back(
888 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
889 Results.push_back(RCW.getValue(2));
890 break;
891 }
892 case ISD::ADD:
893 case ISD::SUB:
894 case ISD::MUL:
895 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 896, __PRETTY_FUNCTION__))
896 "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 896, __PRETTY_FUNCTION__))
;
897 if (N->getOperand(1).getOpcode() == ISD::Constant)
898 return;
899 Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
900 break;
901 case ISD::SHL:
902 case ISD::SRA:
903 case ISD::SRL:
904 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 905, __PRETTY_FUNCTION__))
905 "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && "Unexpected custom legalisation") ? static_cast
<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 905, __PRETTY_FUNCTION__))
;
906 if (N->getOperand(1).getOpcode() == ISD::Constant)
907 return;
908 Results.push_back(customLegalizeToWOp(N, DAG));
909 break;
910 case ISD::SDIV:
911 case ISD::UDIV:
912 case ISD::UREM:
913 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtM() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 914, __PRETTY_FUNCTION__))
914 Subtarget.hasStdExtM() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtM() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtM() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 914, __PRETTY_FUNCTION__))
;
915 if (N->getOperand(0).getOpcode() == ISD::Constant ||
916 N->getOperand(1).getOpcode() == ISD::Constant)
917 return;
918 Results.push_back(customLegalizeToWOp(N, DAG));
919 break;
920 case ISD::BITCAST: {
921 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtF() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 922, __PRETTY_FUNCTION__))
922 Subtarget.hasStdExtF() && "Unexpected custom legalisation")((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit
() && Subtarget.hasStdExtF() && "Unexpected custom legalisation"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && Subtarget.hasStdExtF() && \"Unexpected custom legalisation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 922, __PRETTY_FUNCTION__))
;
923 SDLoc DL(N);
924 SDValue Op0 = N->getOperand(0);
925 if (Op0.getValueType() != MVT::f32)
926 return;
927 SDValue FPConv =
928 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
929 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
930 break;
931 }
932 }
933}
934
935SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
936 DAGCombinerInfo &DCI) const {
937 SelectionDAG &DAG = DCI.DAG;
938
939 switch (N->getOpcode()) {
940 default:
941 break;
942 case RISCVISD::SplitF64: {
943 SDValue Op0 = N->getOperand(0);
944 // If the input to SplitF64 is just BuildPairF64 then the operation is
945 // redundant. Instead, use BuildPairF64's operands directly.
946 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
947 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
948
949 SDLoc DL(N);
950
951 // It's cheaper to materialise two 32-bit integers than to load a double
952 // from the constant pool and transfer it to integer registers through the
953 // stack.
954 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
955 APInt V = C->getValueAPF().bitcastToAPInt();
956 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
957 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
958 return DCI.CombineTo(N, Lo, Hi);
959 }
960
961 // This is a target-specific version of a DAGCombine performed in
962 // DAGCombiner::visitBITCAST. It performs the equivalent of:
963 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
964 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
965 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
966 !Op0.getNode()->hasOneUse())
967 break;
968 SDValue NewSplitF64 =
969 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
970 Op0.getOperand(0));
971 SDValue Lo = NewSplitF64.getValue(0);
972 SDValue Hi = NewSplitF64.getValue(1);
973 APInt SignBit = APInt::getSignMask(32);
974 if (Op0.getOpcode() == ISD::FNEG) {
975 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
976 DAG.getConstant(SignBit, DL, MVT::i32));
977 return DCI.CombineTo(N, Lo, NewHi);
978 }
979 assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0)
: __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 979, __PRETTY_FUNCTION__))
;
980 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
981 DAG.getConstant(~SignBit, DL, MVT::i32));
982 return DCI.CombineTo(N, Lo, NewHi);
983 }
984 case RISCVISD::SLLW:
985 case RISCVISD::SRAW:
986 case RISCVISD::SRLW: {
987 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
988 SDValue LHS = N->getOperand(0);
989 SDValue RHS = N->getOperand(1);
990 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
991 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
992 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) ||
993 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)))
994 return SDValue();
995 break;
996 }
997 case RISCVISD::FMV_X_ANYEXTW_RV64: {
998 SDLoc DL(N);
999 SDValue Op0 = N->getOperand(0);
1000 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1001 // conversion is unnecessary and can be replaced with an ANY_EXTEND
1002 // of the FMV_W_X_RV64 operand.
1003 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1004 SDValue AExtOp =
1005 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
1006 return DCI.CombineTo(N, AExtOp);
1007 }
1008
1009 // This is a target-specific version of a DAGCombine performed in
1010 // DAGCombiner::visitBITCAST. It performs the equivalent of:
1011 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1012 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1013 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1014 !Op0.getNode()->hasOneUse())
1015 break;
1016 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1017 Op0.getOperand(0));
1018 APInt SignBit = APInt::getSignMask(32).sext(64);
1019 if (Op0.getOpcode() == ISD::FNEG) {
1020 return DCI.CombineTo(N,
1021 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1022 DAG.getConstant(SignBit, DL, MVT::i64)));
1023 }
1024 assert(Op0.getOpcode() == ISD::FABS)((Op0.getOpcode() == ISD::FABS) ? static_cast<void> (0)
: __assert_fail ("Op0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1024, __PRETTY_FUNCTION__))
;
1025 return DCI.CombineTo(N,
1026 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1027 DAG.getConstant(~SignBit, DL, MVT::i64)));
1028 }
1029 }
1030
1031 return SDValue();
1032}
1033
1034bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1035 const SDNode *N, CombineLevel Level) const {
1036 // The following folds are only desirable if `(OP _, c1 << c2)` can be
1037 // materialised in fewer instructions than `(OP _, c1)`:
1038 //
1039 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1040 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1041 SDValue N0 = N->getOperand(0);
1042 EVT Ty = N0.getValueType();
1043 if (Ty.isScalarInteger() &&
1044 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1045 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1046 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1047 if (C1 && C2) {
1048 APInt C1Int = C1->getAPIntValue();
1049 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1050
1051 // We can materialise `c1 << c2` into an add immediate, so it's "free",
1052 // and the combine should happen, to potentially allow further combines
1053 // later.
1054 if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1055 isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1056 return true;
1057
1058 // We can materialise `c1` in an add immediate, so it's "free", and the
1059 // combine should be prevented.
1060 if (C1Int.getMinSignedBits() <= 64 &&
1061 isLegalAddImmediate(C1Int.getSExtValue()))
1062 return false;
1063
1064 // Neither constant will fit into an immediate, so find materialisation
1065 // costs.
1066 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1067 Subtarget.is64Bit());
1068 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1069 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1070
1071 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1072 // combine should be prevented.
1073 if (C1Cost < ShiftedC1Cost)
1074 return false;
1075 }
1076 }
1077 return true;
1078}
1079
1080unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1081 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1082 unsigned Depth) const {
1083 switch (Op.getOpcode()) {
1084 default:
1085 break;
1086 case RISCVISD::SLLW:
1087 case RISCVISD::SRAW:
1088 case RISCVISD::SRLW:
1089 case RISCVISD::DIVW:
1090 case RISCVISD::DIVUW:
1091 case RISCVISD::REMUW:
1092 // TODO: As the result is sign-extended, this is conservatively correct. A
1093 // more precise answer could be calculated for SRAW depending on known
1094 // bits in the shift amount.
1095 return 33;
1096 }
1097
1098 return 1;
1099}
1100
1101static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1102 MachineBasicBlock *BB) {
1103 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction")((MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::ReadCycleWide && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1103, __PRETTY_FUNCTION__))
;
1104
1105 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1106 // Should the count have wrapped while it was being read, we need to try
1107 // again.
1108 // ...
1109 // read:
1110 // rdcycleh x3 # load high word of cycle
1111 // rdcycle x2 # load low word of cycle
1112 // rdcycleh x4 # load high word of cycle
1113 // bne x3, x4, read # check if high word reads match, otherwise try again
1114 // ...
1115
1116 MachineFunction &MF = *BB->getParent();
1117 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1118 MachineFunction::iterator It = ++BB->getIterator();
1119
1120 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1121 MF.insert(It, LoopMBB);
1122
1123 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1124 MF.insert(It, DoneMBB);
1125
1126 // Transfer the remainder of BB and its successor edges to DoneMBB.
1127 DoneMBB->splice(DoneMBB->begin(), BB,
1128 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1129 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1130
1131 BB->addSuccessor(LoopMBB);
1132
1133 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1134 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1135 Register LoReg = MI.getOperand(0).getReg();
1136 Register HiReg = MI.getOperand(1).getReg();
1137 DebugLoc DL = MI.getDebugLoc();
1138
1139 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1140 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1141 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1142 .addReg(RISCV::X0);
1143 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1144 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1145 .addReg(RISCV::X0);
1146 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1147 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1148 .addReg(RISCV::X0);
1149
1150 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1151 .addReg(HiReg)
1152 .addReg(ReadAgainReg)
1153 .addMBB(LoopMBB);
1154
1155 LoopMBB->addSuccessor(LoopMBB);
1156 LoopMBB->addSuccessor(DoneMBB);
1157
1158 MI.eraseFromParent();
1159
1160 return DoneMBB;
1161}
1162
1163static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1164 MachineBasicBlock *BB) {
1165 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction")((MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::SplitF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1165, __PRETTY_FUNCTION__))
;
1166
1167 MachineFunction &MF = *BB->getParent();
1168 DebugLoc DL = MI.getDebugLoc();
1169 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1170 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1171 Register LoReg = MI.getOperand(0).getReg();
1172 Register HiReg = MI.getOperand(1).getReg();
1173 Register SrcReg = MI.getOperand(2).getReg();
1174 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1175 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
1176
1177 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1178 RI);
1179 MachineMemOperand *MMO =
1180 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1181 MachineMemOperand::MOLoad, 8, 8);
1182 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1183 .addFrameIndex(FI)
1184 .addImm(0)
1185 .addMemOperand(MMO);
1186 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1187 .addFrameIndex(FI)
1188 .addImm(4)
1189 .addMemOperand(MMO);
1190 MI.eraseFromParent(); // The pseudo instruction is gone now.
1191 return BB;
1192}
1193
1194static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1195 MachineBasicBlock *BB) {
1196 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1197, __PRETTY_FUNCTION__))
1197 "Unexpected instruction")((MI.getOpcode() == RISCV::BuildPairF64Pseudo && "Unexpected instruction"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == RISCV::BuildPairF64Pseudo && \"Unexpected instruction\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1197, __PRETTY_FUNCTION__))
;
1198
1199 MachineFunction &MF = *BB->getParent();
1200 DebugLoc DL = MI.getDebugLoc();
1201 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1202 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1203 Register DstReg = MI.getOperand(0).getReg();
1204 Register LoReg = MI.getOperand(1).getReg();
1205 Register HiReg = MI.getOperand(2).getReg();
1206 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1207 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
1208
1209 MachineMemOperand *MMO =
1210 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1211 MachineMemOperand::MOStore, 8, 8);
1212 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1213 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1214 .addFrameIndex(FI)
1215 .addImm(0)
1216 .addMemOperand(MMO);
1217 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1218 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1219 .addFrameIndex(FI)
1220 .addImm(4)
1221 .addMemOperand(MMO);
1222 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1223 MI.eraseFromParent(); // The pseudo instruction is gone now.
1224 return BB;
1225}
1226
1227static bool isSelectPseudo(MachineInstr &MI) {
1228 switch (MI.getOpcode()) {
1229 default:
1230 return false;
1231 case RISCV::Select_GPR_Using_CC_GPR:
1232 case RISCV::Select_FPR32_Using_CC_GPR:
1233 case RISCV::Select_FPR64_Using_CC_GPR:
1234 return true;
1235 }
1236}
1237
1238static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
1239 MachineBasicBlock *BB) {
1240 // To "insert" Select_* instructions, we actually have to insert the triangle
1241 // control-flow pattern. The incoming instructions know the destination vreg
1242 // to set, the condition code register to branch on, the true/false values to
1243 // select between, and the condcode to use to select the appropriate branch.
1244 //
1245 // We produce the following control flow:
1246 // HeadMBB
1247 // | \
1248 // | IfFalseMBB
1249 // | /
1250 // TailMBB
1251 //
1252 // When we find a sequence of selects we attempt to optimize their emission
1253 // by sharing the control flow. Currently we only handle cases where we have
1254 // multiple selects with the exact same condition (same LHS, RHS and CC).
1255 // The selects may be interleaved with other instructions if the other
1256 // instructions meet some requirements we deem safe:
1257 // - They are debug instructions. Otherwise,
1258 // - They do not have side-effects, do not access memory and their inputs do
1259 // not depend on the results of the select pseudo-instructions.
1260 // The TrueV/FalseV operands of the selects cannot depend on the result of
1261 // previous selects in the sequence.
1262 // These conditions could be further relaxed. See the X86 target for a
1263 // related approach and more information.
1264 Register LHS = MI.getOperand(1).getReg();
1265 Register RHS = MI.getOperand(2).getReg();
1266 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
1267
1268 SmallVector<MachineInstr *, 4> SelectDebugValues;
1269 SmallSet<Register, 4> SelectDests;
1270 SelectDests.insert(MI.getOperand(0).getReg());
1271
1272 MachineInstr *LastSelectPseudo = &MI;
1273
1274 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
1275 SequenceMBBI != E; ++SequenceMBBI) {
1276 if (SequenceMBBI->isDebugInstr())
1277 continue;
1278 else if (isSelectPseudo(*SequenceMBBI)) {
1279 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
1280 SequenceMBBI->getOperand(2).getReg() != RHS ||
1281 SequenceMBBI->getOperand(3).getImm() != CC ||
1282 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
1283 SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
1284 break;
1285 LastSelectPseudo = &*SequenceMBBI;
1286 SequenceMBBI->collectDebugValues(SelectDebugValues);
1287 SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
1288 } else {
1289 if (SequenceMBBI->hasUnmodeledSideEffects() ||
1290 SequenceMBBI->mayLoadOrStore())
1291 break;
1292 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
1293 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
1294 }))
1295 break;
1296 }
1297 }
1298
1299 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1300 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1301 DebugLoc DL = MI.getDebugLoc();
1302 MachineFunction::iterator I = ++BB->getIterator();
1303
1304 MachineBasicBlock *HeadMBB = BB;
1305 MachineFunction *F = BB->getParent();
1306 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
1307 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
1308
1309 F->insert(I, IfFalseMBB);
1310 F->insert(I, TailMBB);
1311
1312 // Transfer debug instructions associated with the selects to TailMBB.
1313 for (MachineInstr *DebugInstr : SelectDebugValues) {
1314 TailMBB->push_back(DebugInstr->removeFromParent());
1315 }
1316
1317 // Move all instructions after the sequence to TailMBB.
1318 TailMBB->splice(TailMBB->end(), HeadMBB,
1319 std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
1320 // Update machine-CFG edges by transferring all successors of the current
1321 // block to the new block which will contain the Phi nodes for the selects.
1322 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
1323 // Set the successors for HeadMBB.
1324 HeadMBB->addSuccessor(IfFalseMBB);
1325 HeadMBB->addSuccessor(TailMBB);
1326
1327 // Insert appropriate branch.
1328 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
1329
1330 BuildMI(HeadMBB, DL, TII.get(Opcode))
1331 .addReg(LHS)
1332 .addReg(RHS)
1333 .addMBB(TailMBB);
1334
1335 // IfFalseMBB just falls through to TailMBB.
1336 IfFalseMBB->addSuccessor(TailMBB);
1337
1338 // Create PHIs for all of the select pseudo-instructions.
1339 auto SelectMBBI = MI.getIterator();
1340 auto SelectEnd = std::next(LastSelectPseudo->getIterator());
1341 auto InsertionPoint = TailMBB->begin();
1342 while (SelectMBBI != SelectEnd) {
1343 auto Next = std::next(SelectMBBI);
1344 if (isSelectPseudo(*SelectMBBI)) {
1345 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
1346 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
1347 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
1348 .addReg(SelectMBBI->getOperand(4).getReg())
1349 .addMBB(HeadMBB)
1350 .addReg(SelectMBBI->getOperand(5).getReg())
1351 .addMBB(IfFalseMBB);
1352 SelectMBBI->eraseFromParent();
1353 }
1354 SelectMBBI = Next;
1355 }
1356
1357 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
1358 return TailMBB;
1359}
1360
1361MachineBasicBlock *
1362RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1363 MachineBasicBlock *BB) const {
1364 switch (MI.getOpcode()) {
1365 default:
1366 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1366)
;
1367 case RISCV::ReadCycleWide:
1368 assert(!Subtarget.is64Bit() &&((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1369, __PRETTY_FUNCTION__))
1369 "ReadCycleWrite is only to be used on riscv32")((!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"ReadCycleWrite is only to be used on riscv32\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1369, __PRETTY_FUNCTION__))
;
1370 return emitReadCycleWidePseudo(MI, BB);
1371 case RISCV::Select_GPR_Using_CC_GPR:
1372 case RISCV::Select_FPR32_Using_CC_GPR:
1373 case RISCV::Select_FPR64_Using_CC_GPR:
1374 return emitSelectPseudo(MI, BB);
1375 case RISCV::BuildPairF64Pseudo:
1376 return emitBuildPairF64Pseudo(MI, BB);
1377 case RISCV::SplitF64Pseudo:
1378 return emitSplitF64Pseudo(MI, BB);
1379 }
1380}
1381
1382// Calling Convention Implementation.
1383// The expectations for frontend ABI lowering vary from target to target.
1384// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
1385// details, but this is a longer term goal. For now, we simply try to keep the
1386// role of the frontend as simple and well-defined as possible. The rules can
1387// be summarised as:
1388// * Never split up large scalar arguments. We handle them here.
1389// * If a hardfloat calling convention is being used, and the struct may be
1390// passed in a pair of registers (fp+fp, int+fp), and both registers are
1391// available, then pass as two separate arguments. If either the GPRs or FPRs
1392// are exhausted, then pass according to the rule below.
1393// * If a struct could never be passed in registers or directly in a stack
1394// slot (as it is larger than 2*XLEN and the floating point rules don't
1395// apply), then pass it using a pointer with the byval attribute.
1396// * If a struct is less than 2*XLEN, then coerce to either a two-element
1397// word-sized array or a 2*XLEN scalar (depending on alignment).
1398// * The frontend can determine whether a struct is returned by reference or
1399// not based on its size and fields. If it will be returned by reference, the
1400// frontend must modify the prototype so a pointer with the sret annotation is
1401// passed as the first argument. This is not necessary for large scalar
1402// returns.
1403// * Struct return values and varargs should be coerced to structs containing
1404// register-size fields in the same situations they would be for fixed
1405// arguments.
1406
1407static const MCPhysReg ArgGPRs[] = {
1408 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
1409 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
1410};
1411static const MCPhysReg ArgFPR32s[] = {
1412 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
1413 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
1414};
1415static const MCPhysReg ArgFPR64s[] = {
1416 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
1417 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
1418};
1419
1420// Pass a 2*XLEN argument that has been split into two XLEN values through
1421// registers or the stack as necessary.
1422static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
1423 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
1424 MVT ValVT2, MVT LocVT2,
1425 ISD::ArgFlagsTy ArgFlags2) {
1426 unsigned XLenInBytes = XLen / 8;
1427 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1428 // At least one half can be passed via register.
1429 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
1430 VA1.getLocVT(), CCValAssign::Full));
1431 } else {
1432 // Both halves must be passed on the stack, with proper alignment.
1433 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
1434 State.addLoc(
1435 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
1436 State.AllocateStack(XLenInBytes, StackAlign),
1437 VA1.getLocVT(), CCValAssign::Full));
1438 State.addLoc(CCValAssign::getMem(
1439 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
1440 CCValAssign::Full));
1441 return false;
1442 }
1443
1444 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1445 // The second half can also be passed via register.
1446 State.addLoc(
1447 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
1448 } else {
1449 // The second half is passed via the stack, without additional alignment.
1450 State.addLoc(CCValAssign::getMem(
1451 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
1452 CCValAssign::Full));
1453 }
1454
1455 return false;
1456}
1457
1458// Implements the RISC-V calling convention. Returns true upon failure.
1459static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
1460 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
1461 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
1462 bool IsRet, Type *OrigTy) {
1463 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
1464 assert(XLen == 32 || XLen == 64)((XLen == 32 || XLen == 64) ? static_cast<void> (0) : __assert_fail
("XLen == 32 || XLen == 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1464, __PRETTY_FUNCTION__))
;
1465 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
1466
1467 // Any return value split in to more than two values can't be returned
1468 // directly.
1469 if (IsRet && ValNo > 1)
1470 return true;
1471
1472 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
1473 // variadic argument, or if no F32 argument registers are available.
1474 bool UseGPRForF32 = true;
1475 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
1476 // variadic argument, or if no F64 argument registers are available.
1477 bool UseGPRForF64 = true;
1478
1479 switch (ABI) {
1480 default:
1481 llvm_unreachable("Unexpected ABI")::llvm::llvm_unreachable_internal("Unexpected ABI", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1481)
;
1482 case RISCVABI::ABI_ILP32:
1483 case RISCVABI::ABI_LP64:
1484 break;
1485 case RISCVABI::ABI_ILP32F:
1486 case RISCVABI::ABI_LP64F:
1487 UseGPRForF32 = !IsFixed;
1488 break;
1489 case RISCVABI::ABI_ILP32D:
1490 case RISCVABI::ABI_LP64D:
1491 UseGPRForF32 = !IsFixed;
1492 UseGPRForF64 = !IsFixed;
1493 break;
1494 }
1495
1496 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
1497 UseGPRForF32 = true;
1498 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
1499 UseGPRForF64 = true;
1500
1501 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
1502 // variables rather than directly checking against the target ABI.
1503
1504 if (UseGPRForF32 && ValVT == MVT::f32) {
1505 LocVT = XLenVT;
1506 LocInfo = CCValAssign::BCvt;
1507 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
1508 LocVT = MVT::i64;
1509 LocInfo = CCValAssign::BCvt;
1510 }
1511
1512 // If this is a variadic argument, the RISC-V calling convention requires
1513 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
1514 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
1515 // be used regardless of whether the original argument was split during
1516 // legalisation or not. The argument will not be passed by registers if the
1517 // original type is larger than 2*XLEN, so the register alignment rule does
1518 // not apply.
1519 unsigned TwoXLenInBytes = (2 * XLen) / 8;
1520 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
1521 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
1522 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
1523 // Skip 'odd' register if necessary.
1524 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
1525 State.AllocateReg(ArgGPRs);
1526 }
1527
1528 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
1529 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
1530 State.getPendingArgFlags();
1531
1532 assert(PendingLocs.size() == PendingArgFlags.size() &&((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1533, __PRETTY_FUNCTION__))
1533 "PendingLocs and PendingArgFlags out of sync")((PendingLocs.size() == PendingArgFlags.size() && "PendingLocs and PendingArgFlags out of sync"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == PendingArgFlags.size() && \"PendingLocs and PendingArgFlags out of sync\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1533, __PRETTY_FUNCTION__))
;
1534
1535 // Handle passing f64 on RV32D with a soft float ABI or when floating point
1536 // registers are exhausted.
1537 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
1538 assert(!ArgFlags.isSplit() && PendingLocs.empty() &&((!ArgFlags.isSplit() && PendingLocs.empty() &&
"Can't lower f64 if it is split") ? static_cast<void> (
0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1539, __PRETTY_FUNCTION__))
1539 "Can't lower f64 if it is split")((!ArgFlags.isSplit() && PendingLocs.empty() &&
"Can't lower f64 if it is split") ? static_cast<void> (
0) : __assert_fail ("!ArgFlags.isSplit() && PendingLocs.empty() && \"Can't lower f64 if it is split\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1539, __PRETTY_FUNCTION__))
;
1540 // Depending on available argument GPRS, f64 may be passed in a pair of
1541 // GPRs, split between a GPR and the stack, or passed completely on the
1542 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
1543 // cases.
1544 Register Reg = State.AllocateReg(ArgGPRs);
1545 LocVT = MVT::i32;
1546 if (!Reg) {
1547 unsigned StackOffset = State.AllocateStack(8, 8);
1548 State.addLoc(
1549 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1550 return false;
1551 }
1552 if (!State.AllocateReg(ArgGPRs))
1553 State.AllocateStack(4, 4);
1554 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1555 return false;
1556 }
1557
1558 // Split arguments might be passed indirectly, so keep track of the pending
1559 // values.
1560 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
1561 LocVT = XLenVT;
1562 LocInfo = CCValAssign::Indirect;
1563 PendingLocs.push_back(
1564 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
1565 PendingArgFlags.push_back(ArgFlags);
1566 if (!ArgFlags.isSplitEnd()) {
1567 return false;
1568 }
1569 }
1570
1571 // If the split argument only had two elements, it should be passed directly
1572 // in registers or on the stack.
1573 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
1574 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() == 2 && \"Unexpected PendingLocs.size()\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1574, __PRETTY_FUNCTION__))
;
1575 // Apply the normal calling convention rules to the first half of the
1576 // split argument.
1577 CCValAssign VA = PendingLocs[0];
1578 ISD::ArgFlagsTy AF = PendingArgFlags[0];
1579 PendingLocs.clear();
1580 PendingArgFlags.clear();
1581 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
1582 ArgFlags);
1583 }
1584
1585 // Allocate to a register if possible, or else a stack slot.
1586 Register Reg;
1587 if (ValVT == MVT::f32 && !UseGPRForF32)
1588 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
1589 else if (ValVT == MVT::f64 && !UseGPRForF64)
1590 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
1591 else
1592 Reg = State.AllocateReg(ArgGPRs);
1593 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
1594
1595 // If we reach this point and PendingLocs is non-empty, we must be at the
1596 // end of a split argument that must be passed indirectly.
1597 if (!PendingLocs.empty()) {
1598 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()")((ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"
) ? static_cast<void> (0) : __assert_fail ("ArgFlags.isSplitEnd() && \"Expected ArgFlags.isSplitEnd()\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1598, __PRETTY_FUNCTION__))
;
1599 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()")((PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"
) ? static_cast<void> (0) : __assert_fail ("PendingLocs.size() > 2 && \"Unexpected PendingLocs.size()\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1599, __PRETTY_FUNCTION__))
;
1600
1601 for (auto &It : PendingLocs) {
1602 if (Reg)
1603 It.convertToReg(Reg);
1604 else
1605 It.convertToMem(StackOffset);
1606 State.addLoc(It);
1607 }
1608 PendingLocs.clear();
1609 PendingArgFlags.clear();
1610 return false;
1611 }
1612
1613 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&(((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
"Expected an XLenVT at this stage") ? static_cast<void>
(0) : __assert_fail ("(!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && \"Expected an XLenVT at this stage\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1614, __PRETTY_FUNCTION__))
1614 "Expected an XLenVT at this stage")(((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
"Expected an XLenVT at this stage") ? static_cast<void>
(0) : __assert_fail ("(!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && \"Expected an XLenVT at this stage\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1614, __PRETTY_FUNCTION__))
;
1615
1616 if (Reg) {
1617 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1618 return false;
1619 }
1620
1621 // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
1622 if (ValVT == MVT::f32 || ValVT == MVT::f64) {
1623 LocVT = ValVT;
1624 LocInfo = CCValAssign::Full;
1625 }
1626 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1627 return false;
1628}
1629
1630void RISCVTargetLowering::analyzeInputArgs(
1631 MachineFunction &MF, CCState &CCInfo,
1632 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
1633 unsigned NumArgs = Ins.size();
1634 FunctionType *FType = MF.getFunction().getFunctionType();
1635
1636 for (unsigned i = 0; i != NumArgs; ++i) {
1637 MVT ArgVT = Ins[i].VT;
1638 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
1639
1640 Type *ArgTy = nullptr;
1641 if (IsRet)
1642 ArgTy = FType->getReturnType();
1643 else if (Ins[i].isOrigArg())
1644 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
1645
1646 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1647 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1648 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
1649 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "InputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
'\n'; } } while (false)
1650 << EVT(ArgVT).getEVTString() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "InputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
'\n'; } } while (false)
;
1651 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1651)
;
1652 }
1653 }
1654}
1655
1656void RISCVTargetLowering::analyzeOutputArgs(
1657 MachineFunction &MF, CCState &CCInfo,
1658 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
1659 CallLoweringInfo *CLI) const {
1660 unsigned NumArgs = Outs.size();
1661
1662 for (unsigned i = 0; i != NumArgs; i++) {
1663 MVT ArgVT = Outs[i].VT;
1664 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1665 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
1666
1667 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1668 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1669 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
1670 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "OutputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
"\n"; } } while (false)
1671 << EVT(ArgVT).getEVTString() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("riscv-lower")) { dbgs() << "OutputArg #" << i <<
" has unhandled type " << EVT(ArgVT).getEVTString() <<
"\n"; } } while (false)
;
1672 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1672)
;
1673 }
1674 }
1675}
1676
1677// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
1678// values.
1679static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
1680 const CCValAssign &VA, const SDLoc &DL) {
1681 switch (VA.getLocInfo()) {
1682 default:
1683 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1683)
;
1684 case CCValAssign::Full:
1685 break;
1686 case CCValAssign::BCvt:
1687 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1688 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
1689 break;
1690 }
1691 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1692 break;
1693 }
1694 return Val;
1695}
1696
1697// The caller is responsible for loading the full value if the argument is
1698// passed with CCValAssign::Indirect.
1699static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
1700 const CCValAssign &VA, const SDLoc &DL) {
1701 MachineFunction &MF = DAG.getMachineFunction();
1702 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1703 EVT LocVT = VA.getLocVT();
1704 SDValue Val;
1705 const TargetRegisterClass *RC;
1706
1707 switch (LocVT.getSimpleVT().SimpleTy) {
1708 default:
1709 llvm_unreachable("Unexpected register type")::llvm::llvm_unreachable_internal("Unexpected register type",
"/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1709)
;
1710 case MVT::i32:
1711 case MVT::i64:
1712 RC = &RISCV::GPRRegClass;
1713 break;
1714 case MVT::f32:
1715 RC = &RISCV::FPR32RegClass;
1716 break;
1717 case MVT::f64:
1718 RC = &RISCV::FPR64RegClass;
1719 break;
1720 }
1721
1722 Register VReg = RegInfo.createVirtualRegister(RC);
1723 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1724 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1725
1726 if (VA.getLocInfo() == CCValAssign::Indirect)
1727 return Val;
1728
1729 return convertLocVTToValVT(DAG, Val, VA, DL);
1730}
1731
1732static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
1733 const CCValAssign &VA, const SDLoc &DL) {
1734 EVT LocVT = VA.getLocVT();
1735
1736 switch (VA.getLocInfo()) {
1737 default:
1738 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1738)
;
1739 case CCValAssign::Full:
1740 break;
1741 case CCValAssign::BCvt:
1742 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1743 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
1744 break;
1745 }
1746 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1747 break;
1748 }
1749 return Val;
1750}
1751
1752// The caller is responsible for loading the full value if the argument is
1753// passed with CCValAssign::Indirect.
1754static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
1755 const CCValAssign &VA, const SDLoc &DL) {
1756 MachineFunction &MF = DAG.getMachineFunction();
1757 MachineFrameInfo &MFI = MF.getFrameInfo();
1758 EVT LocVT = VA.getLocVT();
1759 EVT ValVT = VA.getValVT();
1760 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
1761 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
1762 VA.getLocMemOffset(), /*Immutable=*/true);
1763 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1764 SDValue Val;
1765
1766 ISD::LoadExtType ExtType;
1767 switch (VA.getLocInfo()) {
1768 default:
1769 llvm_unreachable("Unexpected CCValAssign::LocInfo")::llvm::llvm_unreachable_internal("Unexpected CCValAssign::LocInfo"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1769)
;
1770 case CCValAssign::Full:
1771 case CCValAssign::Indirect:
1772 case CCValAssign::BCvt:
1773 ExtType = ISD::NON_EXTLOAD;
1774 break;
1775 }
1776 Val = DAG.getExtLoad(
1777 ExtType, DL, LocVT, Chain, FIN,
1778 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
1779 return Val;
1780}
1781
1782static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
1783 const CCValAssign &VA, const SDLoc &DL) {
1784 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::
f64 && "Unexpected VA") ? static_cast<void> (0)
: __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1785, __PRETTY_FUNCTION__))
1785 "Unexpected VA")((VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::
f64 && "Unexpected VA") ? static_cast<void> (0)
: __assert_fail ("VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && \"Unexpected VA\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1785, __PRETTY_FUNCTION__))
;
1786 MachineFunction &MF = DAG.getMachineFunction();
1787 MachineFrameInfo &MFI = MF.getFrameInfo();
1788 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1789
1790 if (VA.isMemLoc()) {
1791 // f64 is passed on the stack.
1792 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
1793 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1794 return DAG.getLoad(MVT::f64, DL, Chain, FIN,
1795 MachinePointerInfo::getFixedStack(MF, FI));
1796 }
1797
1798 assert(VA.isRegLoc() && "Expected register VA assignment")((VA.isRegLoc() && "Expected register VA assignment")
? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected register VA assignment\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1798, __PRETTY_FUNCTION__))
;
1799
1800 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1801 RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
1802 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
1803 SDValue Hi;
1804 if (VA.getLocReg() == RISCV::X17) {
1805 // Second half of f64 is passed on the stack.
1806 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
1807 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1808 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1809 MachinePointerInfo::getFixedStack(MF, FI));
1810 } else {
1811 // Second half of f64 is passed in another GPR.
1812 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1813 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1814 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1815 }
1816 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1817}
1818
1819// FastCC has less than 1% performance improvement for some particular
1820// benchmark. But theoretically, it may has benenfit for some cases.
1821static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
1822 CCValAssign::LocInfo LocInfo,
1823 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1824
1825 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
1826 // X5 and X6 might be used for save-restore libcall.
1827 static const MCPhysReg GPRList[] = {
1828 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
1829 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
1830 RISCV::X29, RISCV::X30, RISCV::X31};
1831 if (unsigned Reg = State.AllocateReg(GPRList)) {
1832 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1833 return false;
1834 }
1835 }
1836
1837 if (LocVT == MVT::f32) {
1838 static const MCPhysReg FPR32List[] = {
1839 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
1840 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
1841 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
1842 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
1843 if (unsigned Reg = State.AllocateReg(FPR32List)) {
1844 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1845 return false;
1846 }
1847 }
1848
1849 if (LocVT == MVT::f64) {
1850 static const MCPhysReg FPR64List[] = {
1851 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
1852 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
1853 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
1854 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
1855 if (unsigned Reg = State.AllocateReg(FPR64List)) {
1856 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1857 return false;
1858 }
1859 }
1860
1861 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
1862 unsigned Offset4 = State.AllocateStack(4, 4);
1863 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
1864 return false;
1865 }
1866
1867 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
1868 unsigned Offset5 = State.AllocateStack(8, 8);
1869 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
1870 return false;
1871 }
1872
1873 return true; // CC didn't match.
1874}
1875
1876// Transform physical registers into virtual registers.
1877SDValue RISCVTargetLowering::LowerFormalArguments(
1878 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1879 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1880 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1881
1882 switch (CallConv) {
1883 default:
1884 report_fatal_error("Unsupported calling convention");
1885 case CallingConv::C:
1886 case CallingConv::Fast:
1887 break;
1888 }
1889
1890 MachineFunction &MF = DAG.getMachineFunction();
1891
1892 const Function &Func = MF.getFunction();
1893 if (Func.hasFnAttribute("interrupt")) {
1894 if (!Func.arg_empty())
1895 report_fatal_error(
1896 "Functions with the interrupt attribute cannot have arguments!");
1897
1898 StringRef Kind =
1899 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1900
1901 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1902 report_fatal_error(
1903 "Function interrupt attribute argument not supported!");
1904 }
1905
1906 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1907 MVT XLenVT = Subtarget.getXLenVT();
1908 unsigned XLenInBytes = Subtarget.getXLen() / 8;
1909 // Used with vargs to acumulate store chains.
1910 std::vector<SDValue> OutChains;
1911
1912 // Assign locations to all of the incoming arguments.
1913 SmallVector<CCValAssign, 16> ArgLocs;
1914 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1915
1916 if (CallConv == CallingConv::Fast)
1917 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
1918 else
1919 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
1920
1921 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1922 CCValAssign &VA = ArgLocs[i];
1923 SDValue ArgValue;
1924 // Passing f64 on RV32D with a soft float ABI must be handled as a special
1925 // case.
1926 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
1927 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
1928 else if (VA.isRegLoc())
1929 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
1930 else
1931 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
1932
1933 if (VA.getLocInfo() == CCValAssign::Indirect) {
1934 // If the original argument was split and passed by reference (e.g. i128
1935 // on RV32), we need to load all parts of it here (using the same
1936 // address).
1937 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1938 MachinePointerInfo()));
1939 unsigned ArgIndex = Ins[i].OrigArgIndex;
1940 assert(Ins[i].PartOffset == 0)((Ins[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Ins[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 1940, __PRETTY_FUNCTION__))
;
1941 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
1942 CCValAssign &PartVA = ArgLocs[i + 1];
1943 unsigned PartOffset = Ins[i + 1].PartOffset;
1944 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1945 DAG.getIntPtrConstant(PartOffset, DL));
1946 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1947 MachinePointerInfo()));
1948 ++i;
1949 }
1950 continue;
1951 }
1952 InVals.push_back(ArgValue);
1953 }
1954
1955 if (IsVarArg) {
1956 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
1957 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
1958 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1959 MachineFrameInfo &MFI = MF.getFrameInfo();
1960 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1961 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1962
1963 // Offset of the first variable argument from stack pointer, and size of
1964 // the vararg save area. For now, the varargs save area is either zero or
1965 // large enough to hold a0-a7.
1966 int VaArgOffset, VarArgsSaveSize;
1967
1968 // If all registers are allocated, then all varargs must be passed on the
1969 // stack and we don't need to save any argregs.
1970 if (ArgRegs.size() == Idx) {
1971 VaArgOffset = CCInfo.getNextStackOffset();
1972 VarArgsSaveSize = 0;
1973 } else {
1974 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
1975 VaArgOffset = -VarArgsSaveSize;
1976 }
1977
1978 // Record the frame index of the first variable argument
1979 // which is a value necessary to VASTART.
1980 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1981 RVFI->setVarArgsFrameIndex(FI);
1982
1983 // If saving an odd number of registers then create an extra stack slot to
1984 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
1985 // offsets to even-numbered registered remain 2*XLEN-aligned.
1986 if (Idx % 2) {
1987 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
1988 VarArgsSaveSize += XLenInBytes;
1989 }
1990
1991 // Copy the integer registers that may have been used for passing varargs
1992 // to the vararg save area.
1993 for (unsigned I = Idx; I < ArgRegs.size();
1994 ++I, VaArgOffset += XLenInBytes) {
1995 const Register Reg = RegInfo.createVirtualRegister(RC);
1996 RegInfo.addLiveIn(ArgRegs[I], Reg);
1997 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
1998 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1999 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2000 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2001 MachinePointerInfo::getFixedStack(MF, FI));
2002 cast<StoreSDNode>(Store.getNode())
2003 ->getMemOperand()
2004 ->setValue((Value *)nullptr);
2005 OutChains.push_back(Store);
2006 }
2007 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2008 }
2009
2010 // All stores are grouped in one node to allow the matching between
2011 // the size of Ins and InVals. This only happens for vararg functions.
2012 if (!OutChains.empty()) {
2013 OutChains.push_back(Chain);
2014 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2015 }
2016
2017 return Chain;
2018}
2019
2020/// isEligibleForTailCallOptimization - Check whether the call is eligible
2021/// for tail call optimization.
2022/// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
2023bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2024 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2025 const SmallVector<CCValAssign, 16> &ArgLocs) const {
2026
2027 auto &Callee = CLI.Callee;
2028 auto CalleeCC = CLI.CallConv;
2029 auto &Outs = CLI.Outs;
2030 auto &Caller = MF.getFunction();
2031 auto CallerCC = Caller.getCallingConv();
2032
2033 // Exception-handling functions need a special set of instructions to
2034 // indicate a return to the hardware. Tail-calling another function would
2035 // probably break this.
2036 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2037 // should be expanded as new function attributes are introduced.
2038 if (Caller.hasFnAttribute("interrupt"))
2039 return false;
2040
2041 // Do not tail call opt if the stack is used to pass parameters.
2042 if (CCInfo.getNextStackOffset() != 0)
2043 return false;
2044
2045 // Do not tail call opt if any parameters need to be passed indirectly.
2046 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2047 // passed indirectly. So the address of the value will be passed in a
2048 // register, or if not available, then the address is put on the stack. In
2049 // order to pass indirectly, space on the stack often needs to be allocated
2050 // in order to store the value. In this case the CCInfo.getNextStackOffset()
2051 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
2052 // are passed CCValAssign::Indirect.
2053 for (auto &VA : ArgLocs)
2054 if (VA.getLocInfo() == CCValAssign::Indirect)
2055 return false;
2056
2057 // Do not tail call opt if either caller or callee uses struct return
2058 // semantics.
2059 auto IsCallerStructRet = Caller.hasStructRetAttr();
2060 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
2061 if (IsCallerStructRet || IsCalleeStructRet)
2062 return false;
2063
2064 // Externally-defined functions with weak linkage should not be
2065 // tail-called. The behaviour of branch instructions in this situation (as
2066 // used for tail calls) is implementation-defined, so we cannot rely on the
2067 // linker replacing the tail call with a return.
2068 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2069 const GlobalValue *GV = G->getGlobal();
2070 if (GV->hasExternalWeakLinkage())
2071 return false;
2072 }
2073
2074 // The callee has to preserve all registers the caller needs to preserve.
2075 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2076 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2077 if (CalleeCC != CallerCC) {
2078 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2079 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2080 return false;
2081 }
2082
2083 // Byval parameters hand the function a pointer directly into the stack area
2084 // we want to reuse during a tail call. Working around this *is* possible
2085 // but less efficient and uglier in LowerCall.
2086 for (auto &Arg : Outs)
2087 if (Arg.Flags.isByVal())
2088 return false;
2089
2090 return true;
2091}
2092
2093// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
2094// and output parameter nodes.
2095SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
2096 SmallVectorImpl<SDValue> &InVals) const {
2097 SelectionDAG &DAG = CLI.DAG;
2098 SDLoc &DL = CLI.DL;
2099 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2100 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2101 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2102 SDValue Chain = CLI.Chain;
2103 SDValue Callee = CLI.Callee;
2104 bool &IsTailCall = CLI.IsTailCall;
2105 CallingConv::ID CallConv = CLI.CallConv;
2106 bool IsVarArg = CLI.IsVarArg;
2107 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2108 MVT XLenVT = Subtarget.getXLenVT();
2109
2110 MachineFunction &MF = DAG.getMachineFunction();
2111
2112 // Analyze the operands of the call, assigning locations to each operand.
2113 SmallVector<CCValAssign, 16> ArgLocs;
2114 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2115
2116 if (CallConv == CallingConv::Fast)
2117 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
2118 else
2119 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
2120
2121 // Check if it's really possible to do a tail call.
2122 if (IsTailCall)
2123 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2124
2125 if (IsTailCall)
2126 ++NumTailCalls;
2127 else if (CLI.CS && CLI.CS.isMustTailCall())
2128 report_fatal_error("failed to perform tail call elimination on a call "
2129 "site marked musttail");
2130
2131 // Get a count of how many bytes are to be pushed on the stack.
2132 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
2133
2134 // Create local copies for byval args
2135 SmallVector<SDValue, 8> ByValArgs;
2136 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2137 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2138 if (!Flags.isByVal())
2139 continue;
2140
2141 SDValue Arg = OutVals[i];
2142 unsigned Size = Flags.getByValSize();
2143 Align Alignment = Flags.getNonZeroByValAlign();
2144
2145 int FI =
2146 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
2147 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2148 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
2149
2150 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
2151 /*IsVolatile=*/false,
2152 /*AlwaysInline=*/false, IsTailCall,
2153 MachinePointerInfo(), MachinePointerInfo());
2154 ByValArgs.push_back(FIPtr);
2155 }
2156
2157 if (!IsTailCall)
2158 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
2159
2160 // Copy argument values to their designated locations.
2161 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
2162 SmallVector<SDValue, 8> MemOpChains;
2163 SDValue StackPtr;
2164 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
2165 CCValAssign &VA = ArgLocs[i];
2166 SDValue ArgValue = OutVals[i];
2167 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2168
2169 // Handle passing f64 on RV32D with a soft float ABI as a special case.
2170 bool IsF64OnRV32DSoftABI =
2171 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
2172 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
2173 SDValue SplitF64 = DAG.getNode(
2174 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
2175 SDValue Lo = SplitF64.getValue(0);
2176 SDValue Hi = SplitF64.getValue(1);
2177
2178 Register RegLo = VA.getLocReg();
2179 RegsToPass.push_back(std::make_pair(RegLo, Lo));
2180
2181 if (RegLo == RISCV::X17) {
2182 // Second half of f64 is passed on the stack.
2183 // Work out the address of the stack slot.
2184 if (!StackPtr.getNode())
2185 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2186 // Emit the store.
2187 MemOpChains.push_back(
2188 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
2189 } else {
2190 // Second half of f64 is passed in another GPR.
2191 assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ?
static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2191, __PRETTY_FUNCTION__))
;
2192 Register RegHigh = RegLo + 1;
2193 RegsToPass.push_back(std::make_pair(RegHigh, Hi));
2194 }
2195 continue;
2196 }
2197
2198 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
2199 // as any other MemLoc.
2200
2201 // Promote the value if needed.
2202 // For now, only handle fully promoted and indirect arguments.
2203 if (VA.getLocInfo() == CCValAssign::Indirect) {
2204 // Store the argument in a stack slot and pass its address.
2205 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
2206 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2207 MemOpChains.push_back(
2208 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
2209 MachinePointerInfo::getFixedStack(MF, FI)));
2210 // If the original argument was split (e.g. i128), we need
2211 // to store all parts of it here (and pass just one address).
2212 unsigned ArgIndex = Outs[i].OrigArgIndex;
2213 assert(Outs[i].PartOffset == 0)((Outs[i].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Outs[i].PartOffset == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2213, __PRETTY_FUNCTION__))
;
2214 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2215 SDValue PartValue = OutVals[i + 1];
2216 unsigned PartOffset = Outs[i + 1].PartOffset;
2217 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
2218 DAG.getIntPtrConstant(PartOffset, DL));
2219 MemOpChains.push_back(
2220 DAG.getStore(Chain, DL, PartValue, Address,
2221 MachinePointerInfo::getFixedStack(MF, FI)));
2222 ++i;
2223 }
2224 ArgValue = SpillSlot;
2225 } else {
2226 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
2227 }
2228
2229 // Use local copy if it is a byval arg.
2230 if (Flags.isByVal())
2231 ArgValue = ByValArgs[j++];
2232
2233 if (VA.isRegLoc()) {
2234 // Queue up the argument copies and emit them at the end.
2235 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
2236 } else {
2237 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2237, __PRETTY_FUNCTION__))
;
2238 assert(!IsTailCall && "Tail call not allowed if stack is used "((!IsTailCall && "Tail call not allowed if stack is used "
"for passing parameters") ? static_cast<void> (0) : __assert_fail
("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2239, __PRETTY_FUNCTION__))
2239 "for passing parameters")((!IsTailCall && "Tail call not allowed if stack is used "
"for passing parameters") ? static_cast<void> (0) : __assert_fail
("!IsTailCall && \"Tail call not allowed if stack is used \" \"for passing parameters\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2239, __PRETTY_FUNCTION__))
;
2240
2241 // Work out the address of the stack slot.
2242 if (!StackPtr.getNode())
2243 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2244 SDValue Address =
2245 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
2246 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
2247
2248 // Emit the store.
2249 MemOpChains.push_back(
2250 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
2251 }
2252 }
2253
2254 // Join the stores, which are independent of one another.
2255 if (!MemOpChains.empty())
2256 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2257
2258 SDValue Glue;
2259
2260 // Build a sequence of copy-to-reg nodes, chained and glued together.
2261 for (auto &Reg : RegsToPass) {
2262 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
2263 Glue = Chain.getValue(1);
2264 }
2265
2266 // Validate that none of the argument registers have been marked as
2267 // reserved, if so report an error. Do the same for the return address if this
2268 // is not a tailcall.
2269 validateCCReservedRegs(RegsToPass, MF);
2270 if (!IsTailCall &&
2271 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
2272 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2273 MF.getFunction(),
2274 "Return address register required, but has been reserved."});
2275
2276 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
2277 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
2278 // split it and then direct call can be matched by PseudoCALL.
2279 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
2280 const GlobalValue *GV = S->getGlobal();
2281
2282 unsigned OpFlags = RISCVII::MO_CALL;
2283 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
2284 OpFlags = RISCVII::MO_PLT;
2285
2286 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2287 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2288 unsigned OpFlags = RISCVII::MO_CALL;
2289
2290 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
2291 nullptr))
2292 OpFlags = RISCVII::MO_PLT;
2293
2294 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
2295 }
2296
2297 // The first call operand is the chain and the second is the target address.
2298 SmallVector<SDValue, 8> Ops;
2299 Ops.push_back(Chain);
2300 Ops.push_back(Callee);
2301
2302 // Add argument registers to the end of the list so that they are
2303 // known live into the call.
2304 for (auto &Reg : RegsToPass)
2305 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2306
2307 if (!IsTailCall) {
2308 // Add a register mask operand representing the call-preserved registers.
2309 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2310 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2311 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2311, __PRETTY_FUNCTION__))
;
2312 Ops.push_back(DAG.getRegisterMask(Mask));
2313 }
2314
2315 // Glue the call to the argument copies, if any.
2316 if (Glue.getNode())
2317 Ops.push_back(Glue);
2318
2319 // Emit the call.
2320 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2321
2322 if (IsTailCall) {
2323 MF.getFrameInfo().setHasTailCall();
2324 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
2325 }
2326
2327 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
2328 Glue = Chain.getValue(1);
2329
2330 // Mark the end of the call, which is glued to the call itself.
2331 Chain = DAG.getCALLSEQ_END(Chain,
2332 DAG.getConstant(NumBytes, DL, PtrVT, true),
2333 DAG.getConstant(0, DL, PtrVT, true),
2334 Glue, DL);
2335 Glue = Chain.getValue(1);
2336
2337 // Assign locations to each value returned by this call.
2338 SmallVector<CCValAssign, 16> RVLocs;
2339 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
2340 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
2341
2342 // Copy all of the result registers out of their specified physreg.
2343 for (auto &VA : RVLocs) {
2344 // Copy the value out
2345 SDValue RetValue =
2346 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
2347 // Glue the RetValue to the end of the call sequence
2348 Chain = RetValue.getValue(1);
2349 Glue = RetValue.getValue(2);
2350
2351 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2352 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment")((VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"
) ? static_cast<void> (0) : __assert_fail ("VA.getLocReg() == ArgGPRs[0] && \"Unexpected reg assignment\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2352, __PRETTY_FUNCTION__))
;
2353 SDValue RetValue2 =
2354 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
2355 Chain = RetValue2.getValue(1);
2356 Glue = RetValue2.getValue(2);
2357 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
2358 RetValue2);
2359 }
2360
2361 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
2362
2363 InVals.push_back(RetValue);
2364 }
2365
2366 return Chain;
2367}
2368
2369bool RISCVTargetLowering::CanLowerReturn(
2370 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
2371 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2372 SmallVector<CCValAssign, 16> RVLocs;
2373 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2374 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2375 MVT VT = Outs[i].VT;
2376 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2377 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2378 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
2379 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
2380 return false;
2381 }
2382 return true;
2383}
2384
2385SDValue
2386RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2387 bool IsVarArg,
2388 const SmallVectorImpl<ISD::OutputArg> &Outs,
2389 const SmallVectorImpl<SDValue> &OutVals,
2390 const SDLoc &DL, SelectionDAG &DAG) const {
2391 const MachineFunction &MF = DAG.getMachineFunction();
2392 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2393
2394 // Stores the assignment of the return value to a location.
2395 SmallVector<CCValAssign, 16> RVLocs;
2396
2397 // Info about the registers and stack slot.
2398 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2399 *DAG.getContext());
2400
2401 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
2402 nullptr);
2403
2404 SDValue Glue;
2405 SmallVector<SDValue, 4> RetOps(1, Chain);
2406
2407 // Copy the result values into the output registers.
2408 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
2409 SDValue Val = OutVals[i];
2410 CCValAssign &VA = RVLocs[i];
2411 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2411, __PRETTY_FUNCTION__))
;
2412
2413 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2414 // Handle returning f64 on RV32D with a soft float ABI.
2415 assert(VA.isRegLoc() && "Expected return via registers")((VA.isRegLoc() && "Expected return via registers") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Expected return via registers\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2415, __PRETTY_FUNCTION__))
;
2416 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
2417 DAG.getVTList(MVT::i32, MVT::i32), Val);
2418 SDValue Lo = SplitF64.getValue(0);
2419 SDValue Hi = SplitF64.getValue(1);
2420 Register RegLo = VA.getLocReg();
2421 assert(RegLo < RISCV::X31 && "Invalid register pair")((RegLo < RISCV::X31 && "Invalid register pair") ?
static_cast<void> (0) : __assert_fail ("RegLo < RISCV::X31 && \"Invalid register pair\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2421, __PRETTY_FUNCTION__))
;
2422 Register RegHi = RegLo + 1;
2423
2424 if (STI.isRegisterReservedByUser(RegLo) ||
2425 STI.isRegisterReservedByUser(RegHi))
2426 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2427 MF.getFunction(),
2428 "Return value register required, but has been reserved."});
2429
2430 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
2431 Glue = Chain.getValue(1);
2432 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
2433 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
2434 Glue = Chain.getValue(1);
2435 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
2436 } else {
2437 // Handle a 'normal' return.
2438 Val = convertValVTToLocVT(DAG, Val, VA, DL);
2439 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
2440
2441 if (STI.isRegisterReservedByUser(VA.getLocReg()))
2442 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2443 MF.getFunction(),
2444 "Return value register required, but has been reserved."});
2445
2446 // Guarantee that all emitted copies are stuck together.
2447 Glue = Chain.getValue(1);
2448 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2449 }
2450 }
2451
2452 RetOps[0] = Chain; // Update chain.
2453
2454 // Add the glue node if we have it.
2455 if (Glue.getNode()) {
2456 RetOps.push_back(Glue);
2457 }
2458
2459 // Interrupt service routines use different return instructions.
2460 const Function &Func = DAG.getMachineFunction().getFunction();
2461 if (Func.hasFnAttribute("interrupt")) {
2462 if (!Func.getReturnType()->isVoidTy())
2463 report_fatal_error(
2464 "Functions with the interrupt attribute must have void return type!");
2465
2466 MachineFunction &MF = DAG.getMachineFunction();
2467 StringRef Kind =
2468 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2469
2470 unsigned RetOpc;
2471 if (Kind == "user")
2472 RetOpc = RISCVISD::URET_FLAG;
2473 else if (Kind == "supervisor")
2474 RetOpc = RISCVISD::SRET_FLAG;
2475 else
2476 RetOpc = RISCVISD::MRET_FLAG;
2477
2478 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
2479 }
2480
2481 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
2482}
2483
2484void RISCVTargetLowering::validateCCReservedRegs(
2485 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
2486 MachineFunction &MF) const {
2487 const Function &F = MF.getFunction();
2488 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2489
2490 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) {
2491 return STI.isRegisterReservedByUser(Reg.first);
2492 }))
2493 F.getContext().diagnose(DiagnosticInfoUnsupported{
2494 F, "Argument register required, but has been reserved."});
2495}
2496
2497bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2498 return CI->isTailCall();
2499}
2500
2501const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
2502 switch ((RISCVISD::NodeType)Opcode) {
2503 case RISCVISD::FIRST_NUMBER:
2504 break;
2505 case RISCVISD::RET_FLAG:
2506 return "RISCVISD::RET_FLAG";
2507 case RISCVISD::URET_FLAG:
2508 return "RISCVISD::URET_FLAG";
2509 case RISCVISD::SRET_FLAG:
2510 return "RISCVISD::SRET_FLAG";
2511 case RISCVISD::MRET_FLAG:
2512 return "RISCVISD::MRET_FLAG";
2513 case RISCVISD::CALL:
2514 return "RISCVISD::CALL";
2515 case RISCVISD::SELECT_CC:
2516 return "RISCVISD::SELECT_CC";
2517 case RISCVISD::BuildPairF64:
2518 return "RISCVISD::BuildPairF64";
2519 case RISCVISD::SplitF64:
2520 return "RISCVISD::SplitF64";
2521 case RISCVISD::TAIL:
2522 return "RISCVISD::TAIL";
2523 case RISCVISD::SLLW:
2524 return "RISCVISD::SLLW";
2525 case RISCVISD::SRAW:
2526 return "RISCVISD::SRAW";
2527 case RISCVISD::SRLW:
2528 return "RISCVISD::SRLW";
2529 case RISCVISD::DIVW:
2530 return "RISCVISD::DIVW";
2531 case RISCVISD::DIVUW:
2532 return "RISCVISD::DIVUW";
2533 case RISCVISD::REMUW:
2534 return "RISCVISD::REMUW";
2535 case RISCVISD::FMV_W_X_RV64:
2536 return "RISCVISD::FMV_W_X_RV64";
2537 case RISCVISD::FMV_X_ANYEXTW_RV64:
2538 return "RISCVISD::FMV_X_ANYEXTW_RV64";
2539 case RISCVISD::READ_CYCLE_WIDE:
2540 return "RISCVISD::READ_CYCLE_WIDE";
2541 }
2542 return nullptr;
2543}
2544
2545/// getConstraintType - Given a constraint letter, return the type of
2546/// constraint it is for this target.
2547RISCVTargetLowering::ConstraintType
2548RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
2549 if (Constraint.size() == 1) {
2550 switch (Constraint[0]) {
2551 default:
2552 break;
2553 case 'f':
2554 return C_RegisterClass;
2555 case 'I':
2556 case 'J':
2557 case 'K':
2558 return C_Immediate;
2559 case 'A':
2560 return C_Memory;
2561 }
2562 }
2563 return TargetLowering::getConstraintType(Constraint);
2564}
2565
2566std::pair<unsigned, const TargetRegisterClass *>
2567RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2568 StringRef Constraint,
2569 MVT VT) const {
2570 // First, see if this is a constraint that directly corresponds to a
2571 // RISCV register class.
2572 if (Constraint.size() == 1) {
2573 switch (Constraint[0]) {
2574 case 'r':
2575 return std::make_pair(0U, &RISCV::GPRRegClass);
2576 case 'f':
2577 if (Subtarget.hasStdExtF() && VT == MVT::f32)
2578 return std::make_pair(0U, &RISCV::FPR32RegClass);
2579 if (Subtarget.hasStdExtD() && VT == MVT::f64)
2580 return std::make_pair(0U, &RISCV::FPR64RegClass);
2581 break;
2582 default:
2583 break;
2584 }
2585 }
2586
2587 // Clang will correctly decode the usage of register name aliases into their
2588 // official names. However, other frontends like `rustc` do not. This allows
2589 // users of these frontends to use the ABI names for registers in LLVM-style
2590 // register constraints.
2591 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower())
2592 .Case("{zero}", RISCV::X0)
2593 .Case("{ra}", RISCV::X1)
2594 .Case("{sp}", RISCV::X2)
2595 .Case("{gp}", RISCV::X3)
2596 .Case("{tp}", RISCV::X4)
2597 .Case("{t0}", RISCV::X5)
2598 .Case("{t1}", RISCV::X6)
2599 .Case("{t2}", RISCV::X7)
2600 .Cases("{s0}", "{fp}", RISCV::X8)
2601 .Case("{s1}", RISCV::X9)
2602 .Case("{a0}", RISCV::X10)
2603 .Case("{a1}", RISCV::X11)
2604 .Case("{a2}", RISCV::X12)
2605 .Case("{a3}", RISCV::X13)
2606 .Case("{a4}", RISCV::X14)
2607 .Case("{a5}", RISCV::X15)
2608 .Case("{a6}", RISCV::X16)
2609 .Case("{a7}", RISCV::X17)
2610 .Case("{s2}", RISCV::X18)
2611 .Case("{s3}", RISCV::X19)
2612 .Case("{s4}", RISCV::X20)
2613 .Case("{s5}", RISCV::X21)
2614 .Case("{s6}", RISCV::X22)
2615 .Case("{s7}", RISCV::X23)
2616 .Case("{s8}", RISCV::X24)
2617 .Case("{s9}", RISCV::X25)
2618 .Case("{s10}", RISCV::X26)
2619 .Case("{s11}", RISCV::X27)
2620 .Case("{t3}", RISCV::X28)
2621 .Case("{t4}", RISCV::X29)
2622 .Case("{t5}", RISCV::X30)
2623 .Case("{t6}", RISCV::X31)
2624 .Default(RISCV::NoRegister);
2625 if (XRegFromAlias != RISCV::NoRegister)
2626 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
2627
2628 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
2629 // TableGen record rather than the AsmName to choose registers for InlineAsm
2630 // constraints, plus we want to match those names to the widest floating point
2631 // register type available, manually select floating point registers here.
2632 //
2633 // The second case is the ABI name of the register, so that frontends can also
2634 // use the ABI names in register constraint lists.
2635 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) {
2636 std::pair<Register, Register> FReg =
2637 StringSwitch<std::pair<Register, Register>>(Constraint.lower())
2638 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D})
2639 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D})
2640 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D})
2641 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D})
2642 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D})
2643 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D})
2644 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D})
2645 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D})
2646 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D})
2647 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D})
2648 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D})
2649 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D})
2650 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D})
2651 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D})
2652 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D})
2653 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D})
2654 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D})
2655 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D})
2656 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D})
2657 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D})
2658 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D})
2659 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D})
2660 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D})
2661 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D})
2662 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D})
2663 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D})
2664 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D})
2665 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D})
2666 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D})
2667 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D})
2668 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D})
2669 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D})
2670 .Default({RISCV::NoRegister, RISCV::NoRegister});
2671 if (FReg.first != RISCV::NoRegister)
2672 return Subtarget.hasStdExtD()
2673 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass)
2674 : std::make_pair(FReg.first, &RISCV::FPR32RegClass);
2675 }
2676
2677 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2678}
2679
2680unsigned
2681RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2682 // Currently only support length 1 constraints.
2683 if (ConstraintCode.size() == 1) {
2684 switch (ConstraintCode[0]) {
2685 case 'A':
2686 return InlineAsm::Constraint_A;
2687 default:
2688 break;
2689 }
2690 }
2691
2692 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2693}
2694
2695void RISCVTargetLowering::LowerAsmOperandForConstraint(
2696 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2697 SelectionDAG &DAG) const {
2698 // Currently only support length 1 constraints.
2699 if (Constraint.length() == 1) {
2700 switch (Constraint[0]) {
2701 case 'I':
2702 // Validate & create a 12-bit signed immediate operand.
2703 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2704 uint64_t CVal = C->getSExtValue();
2705 if (isInt<12>(CVal))
2706 Ops.push_back(
2707 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2708 }
2709 return;
2710 case 'J':
2711 // Validate & create an integer zero operand.
2712 if (auto *C = dyn_cast<ConstantSDNode>(Op))
2713 if (C->getZExtValue() == 0)
2714 Ops.push_back(
2715 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
2716 return;
2717 case 'K':
2718 // Validate & create a 5-bit unsigned immediate operand.
2719 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2720 uint64_t CVal = C->getZExtValue();
2721 if (isUInt<5>(CVal))
2722 Ops.push_back(
2723 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2724 }
2725 return;
2726 default:
2727 break;
2728 }
2729 }
2730 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2731}
2732
2733Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
2734 Instruction *Inst,
2735 AtomicOrdering Ord) const {
2736 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
2737 return Builder.CreateFence(Ord);
2738 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
2739 return Builder.CreateFence(AtomicOrdering::Release);
2740 return nullptr;
2741}
2742
2743Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
2744 Instruction *Inst,
2745 AtomicOrdering Ord) const {
2746 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
2747 return Builder.CreateFence(AtomicOrdering::Acquire);
2748 return nullptr;
2749}
2750
2751TargetLowering::AtomicExpansionKind
2752RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2753 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
2754 // point operations can't be used in an lr/sc sequence without breaking the
2755 // forward-progress guarantee.
2756 if (AI->isFloatingPointOperation())
2757 return AtomicExpansionKind::CmpXChg;
2758
2759 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
2760 if (Size == 8 || Size == 16)
2761 return AtomicExpansionKind::MaskedIntrinsic;
2762 return AtomicExpansionKind::None;
2763}
2764
2765static Intrinsic::ID
2766getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
2767 if (XLen == 32) {
2768 switch (BinOp) {
2769 default:
2770 llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2770)
;
2771 case AtomicRMWInst::Xchg:
2772 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
2773 case AtomicRMWInst::Add:
2774 return Intrinsic::riscv_masked_atomicrmw_add_i32;
2775 case AtomicRMWInst::Sub:
2776 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
2777 case AtomicRMWInst::Nand:
2778 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
2779 case AtomicRMWInst::Max:
2780 return Intrinsic::riscv_masked_atomicrmw_max_i32;
2781 case AtomicRMWInst::Min:
2782 return Intrinsic::riscv_masked_atomicrmw_min_i32;
2783 case AtomicRMWInst::UMax:
2784 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
2785 case AtomicRMWInst::UMin:
2786 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
2787 }
2788 }
2789
2790 if (XLen == 64) {
2791 switch (BinOp) {
2792 default:
2793 llvm_unreachable("Unexpected AtomicRMW BinOp")::llvm::llvm_unreachable_internal("Unexpected AtomicRMW BinOp"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2793)
;
2794 case AtomicRMWInst::Xchg:
2795 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
2796 case AtomicRMWInst::Add:
2797 return Intrinsic::riscv_masked_atomicrmw_add_i64;
2798 case AtomicRMWInst::Sub:
2799 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
2800 case AtomicRMWInst::Nand:
2801 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
2802 case AtomicRMWInst::Max:
2803 return Intrinsic::riscv_masked_atomicrmw_max_i64;
2804 case AtomicRMWInst::Min:
2805 return Intrinsic::riscv_masked_atomicrmw_min_i64;
2806 case AtomicRMWInst::UMax:
2807 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
2808 case AtomicRMWInst::UMin:
2809 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
2810 }
2811 }
2812
2813 llvm_unreachable("Unexpected XLen\n")::llvm::llvm_unreachable_internal("Unexpected XLen\n", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/RISCV/RISCVISelLowering.cpp"
, 2813)
;
2814}
2815
2816Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2817 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
2818 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
2819 unsigned XLen = Subtarget.getXLen();
2820 Value *Ordering =
2821 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
2822 Type *Tys[] = {AlignedAddr->getType()};
2823 Function *LrwOpScwLoop = Intrinsic::getDeclaration(
2824 AI->getModule(),
2825 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
2826
2827 if (XLen == 64) {
2828 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
2829 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2830 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
2831 }
2832
2833 Value *Result;
2834
2835 // Must pass the shift amount needed to sign extend the loaded value prior
2836 // to performing a signed comparison for min/max. ShiftAmt is the number of
2837 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
2838 // is the number of bits to left+right shift the value in order to
2839 // sign-extend.
2840 if (AI->getOperation() == AtomicRMWInst::Min ||
2841 AI->getOperation() == AtomicRMWInst::Max) {
2842 const DataLayout &DL = AI->getModule()->getDataLayout();
2843 unsigned ValWidth =
2844 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
2845 Value *SextShamt =
2846 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
2847 Result = Builder.CreateCall(LrwOpScwLoop,
2848 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
2849 } else {
2850 Result =
2851 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
2852 }
2853
2854 if (XLen == 64)
2855 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2856 return Result;
2857}
2858
2859TargetLowering::AtomicExpansionKind
2860RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
2861 AtomicCmpXchgInst *CI) const {
2862 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
2863 if (Size == 8 || Size == 16)
2864 return AtomicExpansionKind::MaskedIntrinsic;
2865 return AtomicExpansionKind::None;
2866}
2867
2868Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2869 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2870 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2871 unsigned XLen = Subtarget.getXLen();
2872 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
2873 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
2874 if (XLen == 64) {
2875 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2876 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2877 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2878 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2879 }
2880 Type *Tys[] = {AlignedAddr->getType()};
2881 Function *MaskedCmpXchg =
2882 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
2883 Value *Result = Builder.CreateCall(
2884 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2885 if (XLen == 64)
2886 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2887 return Result;
2888}
2889
2890unsigned RISCVTargetLowering::getExceptionPointerRegister(
2891 const Constant *PersonalityFn) const {
2892 return RISCV::X10;
2893}
2894
2895unsigned RISCVTargetLowering::getExceptionSelectorRegister(
2896 const Constant *PersonalityFn) const {
2897 return RISCV::X11;
2898}
2899
2900bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
2901 // Return false to suppress the unnecessary extensions if the LibCall
2902 // arguments or return value is f32 type for LP64 ABI.
2903 RISCVABI::ABI ABI = Subtarget.getTargetABI();
2904 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
2905 return false;
2906
2907 return true;
2908}
2909
2910#define GET_REGISTER_MATCHER
2911#include "RISCVGenAsmMatcher.inc"
2912
2913Register
2914RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
2915 const MachineFunction &MF) const {
2916 Register Reg = MatchRegisterAltName(RegName);
2917 if (Reg == RISCV::NoRegister)
2918 Reg = MatchRegisterName(RegName);
2919 if (Reg == RISCV::NoRegister)
2920 report_fatal_error(
2921 Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
2922 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
2923 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
2924 report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
2925 StringRef(RegName) + "\"."));
2926 return Reg;
2927}

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h

1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/Analysis/ProfileSummaryInfo.h"
32#include "llvm/CodeGen/DAGCombine.h"
33#include "llvm/CodeGen/ISDOpcodes.h"
34#include "llvm/CodeGen/RuntimeLibcalls.h"
35#include "llvm/CodeGen/SelectionDAG.h"
36#include "llvm/CodeGen/SelectionDAGNodes.h"
37#include "llvm/CodeGen/TargetCallingConv.h"
38#include "llvm/CodeGen/ValueTypes.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/CallSite.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/InlineAsm.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/Type.h"
50#include "llvm/MC/MCRegisterInfo.h"
51#include "llvm/Support/Alignment.h"
52#include "llvm/Support/AtomicOrdering.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/MachineValueType.h"
56#include "llvm/Target/TargetMachine.h"
57#include "llvm/Transforms/Utils/SizeOpts.h"
58#include <algorithm>
59#include <cassert>
60#include <climits>
61#include <cstdint>
62#include <iterator>
63#include <map>
64#include <string>
65#include <utility>
66#include <vector>
67
68namespace llvm {
69
70class BranchProbability;
71class CCState;
72class CCValAssign;
73class Constant;
74class FastISel;
75class FunctionLoweringInfo;
76class GlobalValue;
77class GISelKnownBits;
78class IntrinsicInst;
79struct KnownBits;
80class LegacyDivergenceAnalysis;
81class LLVMContext;
82class MachineBasicBlock;
83class MachineFunction;
84class MachineInstr;
85class MachineJumpTableInfo;
86class MachineLoop;
87class MachineRegisterInfo;
88class MCContext;
89class MCExpr;
90class Module;
91class TargetRegisterClass;
92class TargetLibraryInfo;
93class TargetRegisterInfo;
94class Value;
95
96namespace Sched {
97
98 enum Preference {
99 None, // No preference
100 Source, // Follow source order.
101 RegPressure, // Scheduling for lowest register pressure.
102 Hybrid, // Scheduling for both latency and register pressure.
103 ILP, // Scheduling for ILP in low register pressure mode.
104 VLIW // Scheduling for VLIW targets.
105 };
106
107} // end namespace Sched
108
109// MemOp models a memory operation, either memset or memcpy/memmove.
110struct MemOp {
111private:
112 // Shared
113 uint64_t Size;
114 bool DstAlignCanChange; // true if destination alignment can satisfy any
115 // constraint.
116 Align DstAlign; // Specified alignment of the memory operation.
117
118 bool AllowOverlap;
119 // memset only
120 bool IsMemset; // If setthis memory operation is a memset.
121 bool ZeroMemset; // If set clears out memory with zeros.
122 // memcpy only
123 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
124 // constant so it does not need to be loaded.
125 Align SrcAlign; // Inferred alignment of the source or default value if the
126 // memory operation does not need to load the value.
127public:
128 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
129 Align SrcAlign, bool IsVolatile,
130 bool MemcpyStrSrc = false) {
131 MemOp Op;
132 Op.Size = Size;
133 Op.DstAlignCanChange = DstAlignCanChange;
134 Op.DstAlign = DstAlign;
135 Op.AllowOverlap = !IsVolatile;
136 Op.IsMemset = false;
137 Op.ZeroMemset = false;
138 Op.MemcpyStrSrc = MemcpyStrSrc;
139 Op.SrcAlign = SrcAlign;
140 return Op;
141 }
142
143 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
144 bool IsZeroMemset, bool IsVolatile) {
145 MemOp Op;
146 Op.Size = Size;
147 Op.DstAlignCanChange = DstAlignCanChange;
148 Op.DstAlign = DstAlign;
149 Op.AllowOverlap = !IsVolatile;
150 Op.IsMemset = true;
151 Op.ZeroMemset = IsZeroMemset;
152 Op.MemcpyStrSrc = false;
153 return Op;
154 }
155
156 uint64_t size() const { return Size; }
157 Align getDstAlign() const {
158 assert(!DstAlignCanChange)((!DstAlignCanChange) ? static_cast<void> (0) : __assert_fail
("!DstAlignCanChange", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 158, __PRETTY_FUNCTION__))
;
159 return DstAlign;
160 }
161 bool isFixedDstAlign() const { return !DstAlignCanChange; }
162 bool allowOverlap() const { return AllowOverlap; }
163 bool isMemset() const { return IsMemset; }
164 bool isMemcpy() const { return !IsMemset; }
165 bool isMemcpyWithFixedDstAlign() const {
166 return isMemcpy() && !DstAlignCanChange;
167 }
168 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
169 bool isMemcpyStrSrc() const {
170 assert(isMemcpy() && "Must be a memcpy")((isMemcpy() && "Must be a memcpy") ? static_cast<
void> (0) : __assert_fail ("isMemcpy() && \"Must be a memcpy\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 170, __PRETTY_FUNCTION__))
;
171 return MemcpyStrSrc;
172 }
173 Align getSrcAlign() const {
174 assert(isMemcpy() && "Must be a memcpy")((isMemcpy() && "Must be a memcpy") ? static_cast<
void> (0) : __assert_fail ("isMemcpy() && \"Must be a memcpy\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 174, __PRETTY_FUNCTION__))
;
175 return SrcAlign;
176 }
177 bool isSrcAligned(Align AlignCheck) const {
178 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
179 }
180 bool isDstAligned(Align AlignCheck) const {
181 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
182 }
183 bool isAligned(Align AlignCheck) const {
184 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
185 }
186};
187
188/// This base class for TargetLowering contains the SelectionDAG-independent
189/// parts that can be used from the rest of CodeGen.
190class TargetLoweringBase {
191public:
192 /// This enum indicates whether operations are valid for a target, and if not,
193 /// what action should be used to make them valid.
194 enum LegalizeAction : uint8_t {
195 Legal, // The target natively supports this operation.
196 Promote, // This operation should be executed in a larger type.
197 Expand, // Try to expand this to other ops, otherwise use a libcall.
198 LibCall, // Don't try to expand this to other ops, always use a libcall.
199 Custom // Use the LowerOperation hook to implement custom lowering.
200 };
201
202 /// This enum indicates whether a types are legal for a target, and if not,
203 /// what action should be used to make them valid.
204 enum LegalizeTypeAction : uint8_t {
205 TypeLegal, // The target natively supports this type.
206 TypePromoteInteger, // Replace this integer with a larger one.
207 TypeExpandInteger, // Split this integer into two of half the size.
208 TypeSoftenFloat, // Convert this float to a same size integer type.
209 TypeExpandFloat, // Split this float into two of half the size.
210 TypeScalarizeVector, // Replace this one-element vector with its element.
211 TypeSplitVector, // Split this vector into two of half the size.
212 TypeWidenVector, // This vector should be widened into a larger vector.
213 TypePromoteFloat, // Replace this float with a larger one.
214 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
215 };
216
217 /// LegalizeKind holds the legalization kind that needs to happen to EVT
218 /// in order to type-legalize it.
219 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
220
221 /// Enum that describes how the target represents true/false values.
222 enum BooleanContent {
223 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
224 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
225 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
226 };
227
228 /// Enum that describes what type of support for selects the target has.
229 enum SelectSupportKind {
230 ScalarValSelect, // The target supports scalar selects (ex: cmov).
231 ScalarCondVectorVal, // The target supports selects with a scalar condition
232 // and vector values (ex: cmov).
233 VectorMaskSelect // The target supports vector selects with a vector
234 // mask (ex: x86 blends).
235 };
236
237 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
238 /// to, if at all. Exists because different targets have different levels of
239 /// support for these atomic instructions, and also have different options
240 /// w.r.t. what they should expand to.
241 enum class AtomicExpansionKind {
242 None, // Don't expand the instruction.
243 LLSC, // Expand the instruction into loadlinked/storeconditional; used
244 // by ARM/AArch64.
245 LLOnly, // Expand the (load) instruction into just a load-linked, which has
246 // greater atomic guarantees than a normal load.
247 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
248 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
249 };
250
251 /// Enum that specifies when a multiplication should be expanded.
252 enum class MulExpansionKind {
253 Always, // Always expand the instruction.
254 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
255 // or custom.
256 };
257
258 /// Enum that specifies when a float negation is beneficial.
259 enum class NegatibleCost {
260 Expensive = 0, // Negated expression is more expensive.
261 Neutral = 1, // Negated expression has the same cost.
262 Cheaper = 2 // Negated expression is cheaper.
263 };
264
265 class ArgListEntry {
266 public:
267 Value *Val = nullptr;
268 SDValue Node = SDValue();
269 Type *Ty = nullptr;
270 bool IsSExt : 1;
271 bool IsZExt : 1;
272 bool IsInReg : 1;
273 bool IsSRet : 1;
274 bool IsNest : 1;
275 bool IsByVal : 1;
276 bool IsInAlloca : 1;
277 bool IsReturned : 1;
278 bool IsSwiftSelf : 1;
279 bool IsSwiftError : 1;
280 bool IsCFGuardTarget : 1;
281 uint16_t Alignment = 0;
282 Type *ByValType = nullptr;
283
284 ArgListEntry()
285 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
286 IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
287 IsSwiftSelf(false), IsSwiftError(false), IsCFGuardTarget(false) {}
288
289 void setAttributes(const CallBase *Call, unsigned ArgIdx);
290
291 void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx) {
292 return setAttributes(cast<CallBase>(CS->getInstruction()), ArgIdx);
293 }
294 };
295 using ArgListTy = std::vector<ArgListEntry>;
296
297 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
298 ArgListTy &Args) const {};
299
300 static ISD::NodeType getExtendForContent(BooleanContent Content) {
301 switch (Content) {
302 case UndefinedBooleanContent:
303 // Extend by adding rubbish bits.
304 return ISD::ANY_EXTEND;
305 case ZeroOrOneBooleanContent:
306 // Extend by adding zero bits.
307 return ISD::ZERO_EXTEND;
308 case ZeroOrNegativeOneBooleanContent:
309 // Extend by copying the sign bit.
310 return ISD::SIGN_EXTEND;
311 }
312 llvm_unreachable("Invalid content kind")::llvm::llvm_unreachable_internal("Invalid content kind", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 312)
;
313 }
314
315 explicit TargetLoweringBase(const TargetMachine &TM);
316 TargetLoweringBase(const TargetLoweringBase &) = delete;
317 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
318 virtual ~TargetLoweringBase() = default;
319
320 /// Return true if the target support strict float operation
321 bool isStrictFPEnabled() const {
322 return IsStrictFPEnabled;
323 }
324
325protected:
326 /// Initialize all of the actions to default values.
327 void initActions();
328
329public:
330 const TargetMachine &getTargetMachine() const { return TM; }
331
332 virtual bool useSoftFloat() const { return false; }
333
334 /// Return the pointer type for the given address space, defaults to
335 /// the pointer type from the data layout.
336 /// FIXME: The default needs to be removed once all the code is updated.
337 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
338 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
339 }
340
341 /// Return the in-memory pointer type for the given address space, defaults to
342 /// the pointer type from the data layout. FIXME: The default needs to be
343 /// removed once all the code is updated.
344 MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
345 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
346 }
347
348 /// Return the type for frame index, which is determined by
349 /// the alloca address space specified through the data layout.
350 MVT getFrameIndexTy(const DataLayout &DL) const {
351 return getPointerTy(DL, DL.getAllocaAddrSpace());
352 }
353
354 /// Return the type for operands of fence.
355 /// TODO: Let fence operands be of i32 type and remove this.
356 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
357 return getPointerTy(DL);
358 }
359
360 /// EVT is not used in-tree, but is used by out-of-tree target.
361 /// A documentation for this function would be nice...
362 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
363
364 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
365 bool LegalTypes = true) const;
366
367 /// Returns the type to be used for the index operand of:
368 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
369 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
370 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
371 return getPointerTy(DL);
372 }
373
374 /// This callback is used to inspect load/store instructions and add
375 /// target-specific MachineMemOperand flags to them. The default
376 /// implementation does nothing.
377 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
378 return MachineMemOperand::MONone;
379 }
380
381 MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI,
382 const DataLayout &DL) const;
383 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
384 const DataLayout &DL) const;
385 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
386 const DataLayout &DL) const;
387
388 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
389 return true;
390 }
391
392 /// Return true if it is profitable to convert a select of FP constants into
393 /// a constant pool load whose address depends on the select condition. The
394 /// parameter may be used to differentiate a select with FP compare from
395 /// integer compare.
396 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
397 return true;
398 }
399
400 /// Return true if multiple condition registers are available.
401 bool hasMultipleConditionRegisters() const {
402 return HasMultipleConditionRegisters;
403 }
404
405 /// Return true if the target has BitExtract instructions.
406 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
407
408 /// Return the preferred vector type legalization action.
409 virtual TargetLoweringBase::LegalizeTypeAction
410 getPreferredVectorAction(MVT VT) const {
411 // The default action for one element vectors is to scalarize
412 if (VT.getVectorNumElements() == 1)
413 return TypeScalarizeVector;
414 // The default action for an odd-width vector is to widen.
415 if (!VT.isPow2VectorType())
416 return TypeWidenVector;
417 // The default action for other vectors is to promote
418 return TypePromoteInteger;
419 }
420
421 // Return true if the half type should be passed around as i16, but promoted
422 // to float around arithmetic. The default behavior is to pass around as
423 // float and convert around loads/stores/bitcasts and other places where
424 // the size matters.
425 virtual bool softPromoteHalfType() const { return false; }
426
427 // There are two general methods for expanding a BUILD_VECTOR node:
428 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
429 // them together.
430 // 2. Build the vector on the stack and then load it.
431 // If this function returns true, then method (1) will be used, subject to
432 // the constraint that all of the necessary shuffles are legal (as determined
433 // by isShuffleMaskLegal). If this function returns false, then method (2) is
434 // always used. The vector type, and the number of defined values, are
435 // provided.
436 virtual bool
437 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
438 unsigned DefinedValues) const {
439 return DefinedValues < 3;
440 }
441
442 /// Return true if integer divide is usually cheaper than a sequence of
443 /// several shifts, adds, and multiplies for this target.
444 /// The definition of "cheaper" may depend on whether we're optimizing
445 /// for speed or for size.
446 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
447
448 /// Return true if the target can handle a standalone remainder operation.
449 virtual bool hasStandaloneRem(EVT VT) const {
450 return true;
451 }
452
453 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
454 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
455 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
456 return false;
457 }
458
459 /// Reciprocal estimate status values used by the functions below.
460 enum ReciprocalEstimate : int {
461 Unspecified = -1,
462 Disabled = 0,
463 Enabled = 1
464 };
465
466 /// Return a ReciprocalEstimate enum value for a square root of the given type
467 /// based on the function's attributes. If the operation is not overridden by
468 /// the function's attributes, "Unspecified" is returned and target defaults
469 /// are expected to be used for instruction selection.
470 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
471
472 /// Return a ReciprocalEstimate enum value for a division of the given type
473 /// based on the function's attributes. If the operation is not overridden by
474 /// the function's attributes, "Unspecified" is returned and target defaults
475 /// are expected to be used for instruction selection.
476 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
477
478 /// Return the refinement step count for a square root of the given type based
479 /// on the function's attributes. If the operation is not overridden by
480 /// the function's attributes, "Unspecified" is returned and target defaults
481 /// are expected to be used for instruction selection.
482 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
483
484 /// Return the refinement step count for a division of the given type based
485 /// on the function's attributes. If the operation is not overridden by
486 /// the function's attributes, "Unspecified" is returned and target defaults
487 /// are expected to be used for instruction selection.
488 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
489
490 /// Returns true if target has indicated at least one type should be bypassed.
491 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
492
493 /// Returns map of slow types for division or remainder with corresponding
494 /// fast types
495 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
496 return BypassSlowDivWidths;
497 }
498
499 /// Return true if Flow Control is an expensive operation that should be
500 /// avoided.
501 bool isJumpExpensive() const { return JumpIsExpensive; }
502
503 /// Return true if selects are only cheaper than branches if the branch is
504 /// unlikely to be predicted right.
505 bool isPredictableSelectExpensive() const {
506 return PredictableSelectIsExpensive;
507 }
508
509 /// If a branch or a select condition is skewed in one direction by more than
510 /// this factor, it is very likely to be predicted correctly.
511 virtual BranchProbability getPredictableBranchThreshold() const;
512
513 /// Return true if the following transform is beneficial:
514 /// fold (conv (load x)) -> (load (conv*)x)
515 /// On architectures that don't natively support some vector loads
516 /// efficiently, casting the load to a smaller vector of larger types and
517 /// loading is more efficient, however, this can be undone by optimizations in
518 /// dag combiner.
519 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
520 const SelectionDAG &DAG,
521 const MachineMemOperand &MMO) const {
522 // Don't do if we could do an indexed load on the original type, but not on
523 // the new one.
524 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
525 return true;
526
527 MVT LoadMVT = LoadVT.getSimpleVT();
528
529 // Don't bother doing this if it's just going to be promoted again later, as
530 // doing so might interfere with other combines.
531 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
532 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
533 return false;
534
535 bool Fast = false;
536 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
537 MMO, &Fast) && Fast;
538 }
539
540 /// Return true if the following transform is beneficial:
541 /// (store (y (conv x)), y*)) -> (store x, (x*))
542 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
543 const SelectionDAG &DAG,
544 const MachineMemOperand &MMO) const {
545 // Default to the same logic as loads.
546 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
547 }
548
549 /// Return true if it is expected to be cheaper to do a store of a non-zero
550 /// vector constant with the given size and type for the address space than to
551 /// store the individual scalar element constants.
552 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
553 unsigned NumElem,
554 unsigned AddrSpace) const {
555 return false;
556 }
557
558 /// Allow store merging for the specified type after legalization in addition
559 /// to before legalization. This may transform stores that do not exist
560 /// earlier (for example, stores created from intrinsics).
561 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
562 return true;
563 }
564
565 /// Returns if it's reasonable to merge stores to MemVT size.
566 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
567 const SelectionDAG &DAG) const {
568 return true;
569 }
570
571 /// Return true if it is cheap to speculate a call to intrinsic cttz.
572 virtual bool isCheapToSpeculateCttz() const {
573 return false;
574 }
575
576 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
577 virtual bool isCheapToSpeculateCtlz() const {
578 return false;
579 }
580
581 /// Return true if ctlz instruction is fast.
582 virtual bool isCtlzFast() const {
583 return false;
584 }
585
586 /// Return true if instruction generated for equality comparison is folded
587 /// with instruction generated for signed comparison.
588 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
589
590 /// Return true if it is safe to transform an integer-domain bitwise operation
591 /// into the equivalent floating-point operation. This should be set to true
592 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
593 /// type.
594 virtual bool hasBitPreservingFPLogic(EVT VT) const {
595 return false;
596 }
597
598 /// Return true if it is cheaper to split the store of a merged int val
599 /// from a pair of smaller values into multiple stores.
600 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
601 return false;
602 }
603
604 /// Return if the target supports combining a
605 /// chain like:
606 /// \code
607 /// %andResult = and %val1, #mask
608 /// %icmpResult = icmp %andResult, 0
609 /// \endcode
610 /// into a single machine instruction of a form like:
611 /// \code
612 /// cc = test %register, #mask
613 /// \endcode
614 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
615 return false;
616 }
617
618 /// Use bitwise logic to make pairs of compares more efficient. For example:
619 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
620 /// This should be true when it takes more than one instruction to lower
621 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
622 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
623 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
624 return false;
625 }
626
627 /// Return the preferred operand type if the target has a quick way to compare
628 /// integer values of the given size. Assume that any legal integer type can
629 /// be compared efficiently. Targets may override this to allow illegal wide
630 /// types to return a vector type if there is support to compare that type.
631 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
632 MVT VT = MVT::getIntegerVT(NumBits);
633 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
634 }
635
636 /// Return true if the target should transform:
637 /// (X & Y) == Y ---> (~X & Y) == 0
638 /// (X & Y) != Y ---> (~X & Y) != 0
639 ///
640 /// This may be profitable if the target has a bitwise and-not operation that
641 /// sets comparison flags. A target may want to limit the transformation based
642 /// on the type of Y or if Y is a constant.
643 ///
644 /// Note that the transform will not occur if Y is known to be a power-of-2
645 /// because a mask and compare of a single bit can be handled by inverting the
646 /// predicate, for example:
647 /// (X & 8) == 8 ---> (X & 8) != 0
648 virtual bool hasAndNotCompare(SDValue Y) const {
649 return false;
650 }
651
652 /// Return true if the target has a bitwise and-not operation:
653 /// X = ~A & B
654 /// This can be used to simplify select or other instructions.
655 virtual bool hasAndNot(SDValue X) const {
656 // If the target has the more complex version of this operation, assume that
657 // it has this operation too.
658 return hasAndNotCompare(X);
659 }
660
661 /// Return true if the target has a bit-test instruction:
662 /// (X & (1 << Y)) ==/!= 0
663 /// This knowledge can be used to prevent breaking the pattern,
664 /// or creating it if it could be recognized.
665 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
666
667 /// There are two ways to clear extreme bits (either low or high):
668 /// Mask: x & (-1 << y) (the instcombine canonical form)
669 /// Shifts: x >> y << y
670 /// Return true if the variant with 2 variable shifts is preferred.
671 /// Return false if there is no preference.
672 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
673 // By default, let's assume that no one prefers shifts.
674 return false;
675 }
676
677 /// Return true if it is profitable to fold a pair of shifts into a mask.
678 /// This is usually true on most targets. But some targets, like Thumb1,
679 /// have immediate shift instructions, but no immediate "and" instruction;
680 /// this makes the fold unprofitable.
681 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
682 CombineLevel Level) const {
683 return true;
684 }
685
686 /// Should we tranform the IR-optimal check for whether given truncation
687 /// down into KeptBits would be truncating or not:
688 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
689 /// Into it's more traditional form:
690 /// ((%x << C) a>> C) dstcond %x
691 /// Return true if we should transform.
692 /// Return false if there is no preference.
693 virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
694 unsigned KeptBits) const {
695 // By default, let's assume that no one prefers shifts.
696 return false;
697 }
698
699 /// Given the pattern
700 /// (X & (C l>>/<< Y)) ==/!= 0
701 /// return true if it should be transformed into:
702 /// ((X <</l>> Y) & C) ==/!= 0
703 /// WARNING: if 'X' is a constant, the fold may deadlock!
704 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
705 /// here because it can end up being not linked in.
706 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
707 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
708 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
709 SelectionDAG &DAG) const {
710 if (hasBitTest(X, Y)) {
711 // One interesting pattern that we'd want to form is 'bit test':
712 // ((1 << Y) & C) ==/!= 0
713 // But we also need to be careful not to try to reverse that fold.
714
715 // Is this '1 << Y' ?
716 if (OldShiftOpcode == ISD::SHL && CC->isOne())
717 return false; // Keep the 'bit test' pattern.
718
719 // Will it be '1 << Y' after the transform ?
720 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
721 return true; // Do form the 'bit test' pattern.
722 }
723
724 // If 'X' is a constant, and we transform, then we will immediately
725 // try to undo the fold, thus causing endless combine loop.
726 // So by default, let's assume everyone prefers the fold
727 // iff 'X' is not a constant.
728 return !XC;
729 }
730
731 /// These two forms are equivalent:
732 /// sub %y, (xor %x, -1)
733 /// add (add %x, 1), %y
734 /// The variant with two add's is IR-canonical.
735 /// Some targets may prefer one to the other.
736 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
737 // By default, let's assume that everyone prefers the form with two add's.
738 return true;
739 }
740
741 /// Return true if the target wants to use the optimization that
742 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
743 /// promotedInst1(...(promotedInstN(ext(load)))).
744 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
745
746 /// Return true if the target can combine store(extractelement VectorTy,
747 /// Idx).
748 /// \p Cost[out] gives the cost of that transformation when this is true.
749 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
750 unsigned &Cost) const {
751 return false;
752 }
753
754 /// Return true if inserting a scalar into a variable element of an undef
755 /// vector is more efficiently handled by splatting the scalar instead.
756 virtual bool shouldSplatInsEltVarIndex(EVT) const {
757 return false;
758 }
759
760 /// Return true if target always beneficiates from combining into FMA for a
761 /// given value type. This must typically return false on targets where FMA
762 /// takes more cycles to execute than FADD.
763 virtual bool enableAggressiveFMAFusion(EVT VT) const {
764 return false;
765 }
766
767 /// Return the ValueType of the result of SETCC operations.
768 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
769 EVT VT) const;
770
771 /// Return the ValueType for comparison libcalls. Comparions libcalls include
772 /// floating point comparion calls, and Ordered/Unordered check calls on
773 /// floating point numbers.
774 virtual
775 MVT::SimpleValueType getCmpLibcallReturnType() const;
776
777 /// For targets without i1 registers, this gives the nature of the high-bits
778 /// of boolean values held in types wider than i1.
779 ///
780 /// "Boolean values" are special true/false values produced by nodes like
781 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
782 /// Not to be confused with general values promoted from i1. Some cpus
783 /// distinguish between vectors of boolean and scalars; the isVec parameter
784 /// selects between the two kinds. For example on X86 a scalar boolean should
785 /// be zero extended from i1, while the elements of a vector of booleans
786 /// should be sign extended from i1.
787 ///
788 /// Some cpus also treat floating point types the same way as they treat
789 /// vectors instead of the way they treat scalars.
790 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
791 if (isVec)
792 return BooleanVectorContents;
793 return isFloat ? BooleanFloatContents : BooleanContents;
794 }
795
796 BooleanContent getBooleanContents(EVT Type) const {
797 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
798 }
799
800 /// Return target scheduling preference.
801 Sched::Preference getSchedulingPreference() const {
802 return SchedPreferenceInfo;
803 }
804
805 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
806 /// for different nodes. This function returns the preference (or none) for
807 /// the given node.
808 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
809 return Sched::None;
810 }
811
812 /// Return the register class that should be used for the specified value
813 /// type.
814 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
815 (void)isDivergent;
816 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
817 assert(RC && "This value type is not natively supported!")((RC && "This value type is not natively supported!")
? static_cast<void> (0) : __assert_fail ("RC && \"This value type is not natively supported!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 817, __PRETTY_FUNCTION__))
;
818 return RC;
819 }
820
821 /// Allows target to decide about the register class of the
822 /// specific value that is live outside the defining block.
823 /// Returns true if the value needs uniform register class.
824 virtual bool requiresUniformRegister(MachineFunction &MF,
825 const Value *) const {
826 return false;
827 }
828
829 /// Return the 'representative' register class for the specified value
830 /// type.
831 ///
832 /// The 'representative' register class is the largest legal super-reg
833 /// register class for the register class of the value type. For example, on
834 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
835 /// register class is GR64 on x86_64.
836 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
837 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
838 return RC;
839 }
840
841 /// Return the cost of the 'representative' register class for the specified
842 /// value type.
843 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
844 return RepRegClassCostForVT[VT.SimpleTy];
845 }
846
847 /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
848 /// instructions, and false if a library call is preferred (e.g for code-size
849 /// reasons).
850 virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
851 return true;
852 }
853
854 /// Return true if the target has native support for the specified value type.
855 /// This means that it has a register that directly holds it without
856 /// promotions or expansions.
857 bool isTypeLegal(EVT VT) const {
858 assert(!VT.isSimple() ||((!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof
(RegClassForVT)) ? static_cast<void> (0) : __assert_fail
("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 859, __PRETTY_FUNCTION__))
859 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT))((!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof
(RegClassForVT)) ? static_cast<void> (0) : __assert_fail
("!VT.isSimple() || (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 859, __PRETTY_FUNCTION__))
;
860 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
861 }
862
863 class ValueTypeActionImpl {
864 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
865 /// that indicates how instruction selection should deal with the type.
866 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
867
868 public:
869 ValueTypeActionImpl() {
870 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
871 TypeLegal);
872 }
873
874 LegalizeTypeAction getTypeAction(MVT VT) const {
875 return ValueTypeActions[VT.SimpleTy];
876 }
877
878 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
879 ValueTypeActions[VT.SimpleTy] = Action;
880 }
881 };
882
883 const ValueTypeActionImpl &getValueTypeActions() const {
884 return ValueTypeActions;
885 }
886
887 /// Return how we should legalize values of this type, either it is already
888 /// legal (return 'Legal') or we need to promote it to a larger type (return
889 /// 'Promote'), or we need to expand it into multiple registers of smaller
890 /// integer type (return 'Expand'). 'Custom' is not an option.
891 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
892 return getTypeConversion(Context, VT).first;
893 }
894 LegalizeTypeAction getTypeAction(MVT VT) const {
895 return ValueTypeActions.getTypeAction(VT);
896 }
897
898 /// For types supported by the target, this is an identity function. For
899 /// types that must be promoted to larger types, this returns the larger type
900 /// to promote to. For integer types that are larger than the largest integer
901 /// register, this contains one step in the expansion to get to the smaller
902 /// register. For illegal floating point types, this returns the integer type
903 /// to transform to.
904 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
905 return getTypeConversion(Context, VT).second;
906 }
907
908 /// For types supported by the target, this is an identity function. For
909 /// types that must be expanded (i.e. integer types that are larger than the
910 /// largest integer register or illegal floating point types), this returns
911 /// the largest legal type it will be expanded to.
912 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
913 assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail
("!VT.isVector()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 913, __PRETTY_FUNCTION__))
;
914 while (true) {
915 switch (getTypeAction(Context, VT)) {
916 case TypeLegal:
917 return VT;
918 case TypeExpandInteger:
919 VT = getTypeToTransformTo(Context, VT);
920 break;
921 default:
922 llvm_unreachable("Type is not legal nor is it to be expanded!")::llvm::llvm_unreachable_internal("Type is not legal nor is it to be expanded!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 922)
;
923 }
924 }
925 }
926
927 /// Vector types are broken down into some number of legal first class types.
928 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
929 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
930 /// turns into 4 EVT::i32 values with both PPC and X86.
931 ///
932 /// This method returns the number of registers needed, and the VT for each
933 /// register. It also returns the VT and quantity of the intermediate values
934 /// before they are promoted/expanded.
935 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
936 EVT &IntermediateVT,
937 unsigned &NumIntermediates,
938 MVT &RegisterVT) const;
939
940 /// Certain targets such as MIPS require that some types such as vectors are
941 /// always broken down into scalars in some contexts. This occurs even if the
942 /// vector type is legal.
943 virtual unsigned getVectorTypeBreakdownForCallingConv(
944 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
945 unsigned &NumIntermediates, MVT &RegisterVT) const {
946 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
947 RegisterVT);
948 }
949
950 struct IntrinsicInfo {
951 unsigned opc = 0; // target opcode
952 EVT memVT; // memory VT
953
954 // value representing memory location
955 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
956
957 int offset = 0; // offset off of ptrVal
958 uint64_t size = 0; // the size of the memory location
959 // (taken from memVT if zero)
960 MaybeAlign align = Align(1); // alignment
961
962 MachineMemOperand::Flags flags = MachineMemOperand::MONone;
963 IntrinsicInfo() = default;
964 };
965
966 /// Given an intrinsic, checks if on the target the intrinsic will need to map
967 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
968 /// true and store the intrinsic information into the IntrinsicInfo that was
969 /// passed to the function.
970 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
971 MachineFunction &,
972 unsigned /*Intrinsic*/) const {
973 return false;
974 }
975
976 /// Returns true if the target can instruction select the specified FP
977 /// immediate natively. If false, the legalizer will materialize the FP
978 /// immediate as a load from a constant pool.
979 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
980 bool ForCodeSize = false) const {
981 return false;
982 }
983
984 /// Targets can use this to indicate that they only support *some*
985 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
986 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
987 /// legal.
988 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
989 return true;
990 }
991
992 /// Returns true if the operation can trap for the value type.
993 ///
994 /// VT must be a legal type. By default, we optimistically assume most
995 /// operations don't trap except for integer divide and remainder.
996 virtual bool canOpTrap(unsigned Op, EVT VT) const;
997
998 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
999 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1000 /// constant pool entry.
1001 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1002 EVT /*VT*/) const {
1003 return false;
1004 }
1005
1006 /// Return how this operation should be treated: either it is legal, needs to
1007 /// be promoted to a larger size, needs to be expanded to some other code
1008 /// sequence, or the target has a custom expander for it.
1009 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1010 if (VT.isExtended()) return Expand;
1011 // If a target-specific SDNode requires legalization, require the target
1012 // to provide custom legalization for it.
1013 if (Op >= array_lengthof(OpActions[0])) return Custom;
1014 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1015 }
1016
1017 /// Custom method defined by each target to indicate if an operation which
1018 /// may require a scale is supported natively by the target.
1019 /// If not, the operation is illegal.
1020 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1021 unsigned Scale) const {
1022 return false;
1023 }
1024
1025 /// Some fixed point operations may be natively supported by the target but
1026 /// only for specific scales. This method allows for checking
1027 /// if the width is supported by the target for a given operation that may
1028 /// depend on scale.
1029 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
1030 unsigned Scale) const {
1031 auto Action = getOperationAction(Op, VT);
1032 if (Action != Legal)
1033 return Action;
1034
1035 // This operation is supported in this type but may only work on specific
1036 // scales.
1037 bool Supported;
1038 switch (Op) {
1039 default:
1040 llvm_unreachable("Unexpected fixed point operation.")::llvm::llvm_unreachable_internal("Unexpected fixed point operation."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1040)
;
1041 case ISD::SMULFIX:
1042 case ISD::SMULFIXSAT:
1043 case ISD::UMULFIX:
1044 case ISD::UMULFIXSAT:
1045 case ISD::SDIVFIX:
1046 case ISD::SDIVFIXSAT:
1047 case ISD::UDIVFIX:
1048 case ISD::UDIVFIXSAT:
1049 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1050 break;
1051 }
1052
1053 return Supported ? Action : Expand;
1054 }
1055
1056 // If Op is a strict floating-point operation, return the result
1057 // of getOperationAction for the equivalent non-strict operation.
1058 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
1059 unsigned EqOpc;
1060 switch (Op) {
1061 default: llvm_unreachable("Unexpected FP pseudo-opcode")::llvm::llvm_unreachable_internal("Unexpected FP pseudo-opcode"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1061)
;
1062#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1063 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1064#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1065 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1066#include "llvm/IR/ConstrainedOps.def"
1067 }
1068
1069 return getOperationAction(EqOpc, VT);
1070 }
1071
1072 /// Return true if the specified operation is legal on this target or can be
1073 /// made legal with custom lowering. This is used to help guide high-level
1074 /// lowering decisions.
1075 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
1076 return (VT == MVT::Other || isTypeLegal(VT)) &&
1077 (getOperationAction(Op, VT) == Legal ||
1078 getOperationAction(Op, VT) == Custom);
1079 }
1080
1081 /// Return true if the specified operation is legal on this target or can be
1082 /// made legal using promotion. This is used to help guide high-level lowering
1083 /// decisions.
1084 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
1085 return (VT == MVT::Other || isTypeLegal(VT)) &&
1086 (getOperationAction(Op, VT) == Legal ||
1087 getOperationAction(Op, VT) == Promote);
1088 }
1089
1090 /// Return true if the specified operation is legal on this target or can be
1091 /// made legal with custom lowering or using promotion. This is used to help
1092 /// guide high-level lowering decisions.
1093 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
1094 return (VT == MVT::Other || isTypeLegal(VT)) &&
1095 (getOperationAction(Op, VT) == Legal ||
1096 getOperationAction(Op, VT) == Custom ||
1097 getOperationAction(Op, VT) == Promote);
1098 }
1099
1100 /// Return true if the operation uses custom lowering, regardless of whether
1101 /// the type is legal or not.
1102 bool isOperationCustom(unsigned Op, EVT VT) const {
1103 return getOperationAction(Op, VT) == Custom;
1104 }
1105
1106 /// Return true if lowering to a jump table is allowed.
1107 virtual bool areJTsAllowed(const Function *Fn) const {
1108 if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
1109 return false;
1110
1111 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1112 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1113 }
1114
1115 /// Check whether the range [Low,High] fits in a machine word.
1116 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1117 const DataLayout &DL) const {
1118 // FIXME: Using the pointer type doesn't seem ideal.
1119 uint64_t BW = DL.getIndexSizeInBits(0u);
1120 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX(18446744073709551615UL) - 1) + 1;
1121 return Range <= BW;
1122 }
1123
1124 /// Return true if lowering to a jump table is suitable for a set of case
1125 /// clusters which may contain \p NumCases cases, \p Range range of values.
1126 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1127 uint64_t Range, ProfileSummaryInfo *PSI,
1128 BlockFrequencyInfo *BFI) const;
1129
1130 /// Return true if lowering to a bit test is suitable for a set of case
1131 /// clusters which contains \p NumDests unique destinations, \p Low and
1132 /// \p High as its lowest and highest case values, and expects \p NumCmps
1133 /// case value comparisons. Check if the number of destinations, comparison
1134 /// metric, and range are all suitable.
1135 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1136 const APInt &Low, const APInt &High,
1137 const DataLayout &DL) const {
1138 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1139 // range of cases both require only one branch to lower. Just looking at the
1140 // number of clusters and destinations should be enough to decide whether to
1141 // build bit tests.
1142
1143 // To lower a range with bit tests, the range must fit the bitwidth of a
1144 // machine word.
1145 if (!rangeFitsInWord(Low, High, DL))
1146 return false;
1147
1148 // Decide whether it's profitable to lower this range with bit tests. Each
1149 // destination requires a bit test and branch, and there is an overall range
1150 // check branch. For a small number of clusters, separate comparisons might
1151 // be cheaper, and for many destinations, splitting the range might be
1152 // better.
1153 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1154 (NumDests == 3 && NumCmps >= 6);
1155 }
1156
1157 /// Return true if the specified operation is illegal on this target or
1158 /// unlikely to be made legal with custom lowering. This is used to help guide
1159 /// high-level lowering decisions.
1160 bool isOperationExpand(unsigned Op, EVT VT) const {
1161 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1162 }
1163
1164 /// Return true if the specified operation is legal on this target.
1165 bool isOperationLegal(unsigned Op, EVT VT) const {
1166 return (VT == MVT::Other || isTypeLegal(VT)) &&
1167 getOperationAction(Op, VT) == Legal;
1168 }
1169
1170 /// Return how this load with extension should be treated: either it is legal,
1171 /// needs to be promoted to a larger size, needs to be expanded to some other
1172 /// code sequence, or the target has a custom expander for it.
1173 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1174 EVT MemVT) const {
1175 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1176 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1177 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1178 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&((ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT
::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!") ? static_cast<void> (0) : __assert_fail
("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1179, __PRETTY_FUNCTION__))
1179 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!")((ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT
::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!") ? static_cast<void> (0) : __assert_fail
("ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1179, __PRETTY_FUNCTION__))
;
1180 unsigned Shift = 4 * ExtType;
1181 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1182 }
1183
1184 /// Return true if the specified load with extension is legal on this target.
1185 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1186 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1187 }
1188
1189 /// Return true if the specified load with extension is legal or custom
1190 /// on this target.
1191 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1192 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1193 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1194 }
1195
1196 /// Return how this store with truncation should be treated: either it is
1197 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1198 /// other code sequence, or the target has a custom expander for it.
1199 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1200 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1201 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1202 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1203 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&((ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1204, __PRETTY_FUNCTION__))
1204 "Table isn't big enough!")((ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE
&& "Table isn't big enough!") ? static_cast<void>
(0) : __assert_fail ("ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1204, __PRETTY_FUNCTION__))
;
1205 return TruncStoreActions[ValI][MemI];
1206 }
1207
1208 /// Return true if the specified store with truncation is legal on this
1209 /// target.
1210 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1211 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1212 }
1213
1214 /// Return true if the specified store with truncation has solution on this
1215 /// target.
1216 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1217 return isTypeLegal(ValVT) &&
1218 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1219 getTruncStoreAction(ValVT, MemVT) == Custom);
1220 }
1221
1222 /// Return how the indexed load should be treated: either it is legal, needs
1223 /// to be promoted to a larger size, needs to be expanded to some other code
1224 /// sequence, or the target has a custom expander for it.
1225 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1226 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1227 }
1228
1229 /// Return true if the specified indexed load is legal on this target.
1230 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1231 return VT.isSimple() &&
1232 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1233 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1234 }
1235
1236 /// Return how the indexed store should be treated: either it is legal, needs
1237 /// to be promoted to a larger size, needs to be expanded to some other code
1238 /// sequence, or the target has a custom expander for it.
1239 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1240 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1241 }
1242
1243 /// Return true if the specified indexed load is legal on this target.
1244 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1245 return VT.isSimple() &&
1246 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1247 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1248 }
1249
1250 /// Return how the indexed load should be treated: either it is legal, needs
1251 /// to be promoted to a larger size, needs to be expanded to some other code
1252 /// sequence, or the target has a custom expander for it.
1253 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1254 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1255 }
1256
1257 /// Return true if the specified indexed load is legal on this target.
1258 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1259 return VT.isSimple() &&
1260 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1261 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1262 }
1263
1264 /// Return how the indexed store should be treated: either it is legal, needs
1265 /// to be promoted to a larger size, needs to be expanded to some other code
1266 /// sequence, or the target has a custom expander for it.
1267 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1268 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1269 }
1270
1271 /// Return true if the specified indexed load is legal on this target.
1272 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1273 return VT.isSimple() &&
1274 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1275 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1276 }
1277
1278 /// Return how the condition code should be treated: either it is legal, needs
1279 /// to be expanded to some other code sequence, or the target has a custom
1280 /// expander for it.
1281 LegalizeAction
1282 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1283 assert((unsigned)CC < array_lengthof(CondCodeActions) &&(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1285, __PRETTY_FUNCTION__))
1284 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1285, __PRETTY_FUNCTION__))
1285 "Table isn't big enough!")(((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions
[0]) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("(unsigned)CC < array_lengthof(CondCodeActions) && ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1285, __PRETTY_FUNCTION__))
;
1286 // See setCondCodeAction for how this is encoded.
1287 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1288 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1289 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1290 assert(Action != Promote && "Can't promote condition code!")((Action != Promote && "Can't promote condition code!"
) ? static_cast<void> (0) : __assert_fail ("Action != Promote && \"Can't promote condition code!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1290, __PRETTY_FUNCTION__))
;
1291 return Action;
1292 }
1293
1294 /// Return true if the specified condition code is legal on this target.
1295 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1296 return getCondCodeAction(CC, VT) == Legal;
1297 }
1298
1299 /// Return true if the specified condition code is legal or custom on this
1300 /// target.
1301 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1302 return getCondCodeAction(CC, VT) == Legal ||
1303 getCondCodeAction(CC, VT) == Custom;
1304 }
1305
1306 /// If the action for this operation is to promote, this method returns the
1307 /// ValueType to promote to.
1308 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1309 assert(getOperationAction(Op, VT) == Promote &&((getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"
) ? static_cast<void> (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1310, __PRETTY_FUNCTION__))
1310 "This operation isn't promoted!")((getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"
) ? static_cast<void> (0) : __assert_fail ("getOperationAction(Op, VT) == Promote && \"This operation isn't promoted!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1310, __PRETTY_FUNCTION__))
;
1311
1312 // See if this has an explicit type specified.
1313 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1314 MVT::SimpleValueType>::const_iterator PTTI =
1315 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1316 if (PTTI != PromoteToType.end()) return PTTI->second;
1317
1318 assert((VT.isInteger() || VT.isFloatingPoint()) &&(((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."
) ? static_cast<void> (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1319, __PRETTY_FUNCTION__))
1319 "Cannot autopromote this type, add it with AddPromotedToType.")(((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."
) ? static_cast<void> (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Cannot autopromote this type, add it with AddPromotedToType.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1319, __PRETTY_FUNCTION__))
;
1320
1321 MVT NVT = VT;
1322 do {
1323 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1324 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&((NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid
&& "Didn't find type to promote to!") ? static_cast<
void> (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1325, __PRETTY_FUNCTION__))
1325 "Didn't find type to promote to!")((NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid
&& "Didn't find type to promote to!") ? static_cast<
void> (0) : __assert_fail ("NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && \"Didn't find type to promote to!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1325, __PRETTY_FUNCTION__))
;
1326 } while (!isTypeLegal(NVT) ||
1327 getOperationAction(Op, NVT) == Promote);
1328 return NVT;
1329 }
1330
1331 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1332 /// operations except for the pointer size. If AllowUnknown is true, this
1333 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1334 /// otherwise it will assert.
1335 EVT getValueType(const DataLayout &DL, Type *Ty,
1336 bool AllowUnknown = false) const {
1337 // Lower scalar pointers to native pointer types.
1338 if (auto *PTy = dyn_cast<PointerType>(Ty))
1339 return getPointerTy(DL, PTy->getAddressSpace());
1340
1341 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1342 Type *EltTy = VTy->getElementType();
1343 // Lower vectors of pointers to native pointer types.
1344 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1345 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1346 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1347 }
1348 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1349 VTy->getElementCount());
1350 }
1351
1352 return EVT::getEVT(Ty, AllowUnknown);
1353 }
1354
1355 EVT getMemValueType(const DataLayout &DL, Type *Ty,
1356 bool AllowUnknown = false) const {
1357 // Lower scalar pointers to native pointer types.
1358 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1359 return getPointerMemTy(DL, PTy->getAddressSpace());
1360 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1361 Type *Elm = VTy->getElementType();
1362 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1363 EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1364 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1365 }
1366 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1367 VTy->getElementCount());
1368 }
1369
1370 return getValueType(DL, Ty, AllowUnknown);
1371 }
1372
1373
1374 /// Return the MVT corresponding to this LLVM type. See getValueType.
1375 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1376 bool AllowUnknown = false) const {
1377 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1378 }
1379
1380 /// Return the desired alignment for ByVal or InAlloca aggregate function
1381 /// arguments in the caller parameter area. This is the actual alignment, not
1382 /// its logarithm.
1383 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1384
1385 /// Return the type of registers that this ValueType will eventually require.
1386 MVT getRegisterType(MVT VT) const {
1387 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT))(((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1387, __PRETTY_FUNCTION__))
;
1388 return RegisterTypeForVT[VT.SimpleTy];
1389 }
1390
1391 /// Return the type of registers that this ValueType will eventually require.
1392 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1393 if (VT.isSimple()) {
1394 assert((unsigned)VT.getSimpleVT().SimpleTy <(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1395, __PRETTY_FUNCTION__))
1395 array_lengthof(RegisterTypeForVT))(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegisterTypeForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1395, __PRETTY_FUNCTION__))
;
1396 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1397 }
1398 if (VT.isVector()) {
1399 EVT VT1;
1400 MVT RegisterVT;
1401 unsigned NumIntermediates;
1402 (void)getVectorTypeBreakdown(Context, VT, VT1,
1403 NumIntermediates, RegisterVT);
1404 return RegisterVT;
1405 }
1406 if (VT.isInteger()) {
1407 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1408 }
1409 llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1409)
;
1410 }
1411
1412 /// Return the number of registers that this ValueType will eventually
1413 /// require.
1414 ///
1415 /// This is one for any types promoted to live in larger registers, but may be
1416 /// more than one for types (like i64) that are split into pieces. For types
1417 /// like i140, which are first promoted then expanded, it is the number of
1418 /// registers needed to hold all the bits of the original type. For an i140
1419 /// on a 32 bit machine this means 5 registers.
1420 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1421 if (VT.isSimple()) {
1422 assert((unsigned)VT.getSimpleVT().SimpleTy <(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1423, __PRETTY_FUNCTION__))
1423 array_lengthof(NumRegistersForVT))(((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT
)) ? static_cast<void> (0) : __assert_fail ("(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1423, __PRETTY_FUNCTION__))
;
1424 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1425 }
1426 if (VT.isVector()) {
1427 EVT VT1;
1428 MVT VT2;
1429 unsigned NumIntermediates;
1430 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1431 }
1432 if (VT.isInteger()) {
1433 unsigned BitWidth = VT.getSizeInBits();
1434 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1435 return (BitWidth + RegWidth - 1) / RegWidth;
1436 }
1437 llvm_unreachable("Unsupported extended type!")::llvm::llvm_unreachable_internal("Unsupported extended type!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1437)
;
1438 }
1439
1440 /// Certain combinations of ABIs, Targets and features require that types
1441 /// are legal for some operations and not for other operations.
1442 /// For MIPS all vector types must be passed through the integer register set.
1443 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1444 CallingConv::ID CC, EVT VT) const {
1445 return getRegisterType(Context, VT);
1446 }
1447
1448 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1449 /// this occurs when a vector type is used, as vector are passed through the
1450 /// integer register set.
1451 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1452 CallingConv::ID CC,
1453 EVT VT) const {
1454 return getNumRegisters(Context, VT);
1455 }
1456
1457 /// Certain targets have context senstive alignment requirements, where one
1458 /// type has the alignment requirement of another type.
1459 virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1460 DataLayout DL) const {
1461 return Align(DL.getABITypeAlignment(ArgTy));
1462 }
1463
1464 /// If true, then instruction selection should seek to shrink the FP constant
1465 /// of the specified type to a smaller type in order to save space and / or
1466 /// reduce runtime.
1467 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1468
1469 /// Return true if it is profitable to reduce a load to a smaller type.
1470 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1471 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1472 EVT NewVT) const {
1473 // By default, assume that it is cheaper to extract a subvector from a wide
1474 // vector load rather than creating multiple narrow vector loads.
1475 if (NewVT.isVector() && !Load->hasOneUse())
1476 return false;
1477
1478 return true;
1479 }
1480
1481 /// When splitting a value of the specified type into parts, does the Lo
1482 /// or Hi part come first? This usually follows the endianness, except
1483 /// for ppcf128, where the Hi part always comes first.
1484 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1485 return DL.isBigEndian() || VT == MVT::ppcf128;
1486 }
1487
1488 /// If true, the target has custom DAG combine transformations that it can
1489 /// perform for the specified node.
1490 bool hasTargetDAGCombine(ISD::NodeType NT) const {
1491 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))((unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray
)) ? static_cast<void> (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1491, __PRETTY_FUNCTION__))
;
1492 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1493 }
1494
1495 unsigned getGatherAllAliasesMaxDepth() const {
1496 return GatherAllAliasesMaxDepth;
1497 }
1498
1499 /// Returns the size of the platform's va_list object.
1500 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1501 return getPointerTy(DL).getSizeInBits();
1502 }
1503
1504 /// Get maximum # of store operations permitted for llvm.memset
1505 ///
1506 /// This function returns the maximum number of store operations permitted
1507 /// to replace a call to llvm.memset. The value is set by the target at the
1508 /// performance threshold for such a replacement. If OptSize is true,
1509 /// return the limit for functions that have OptSize attribute.
1510 unsigned getMaxStoresPerMemset(bool OptSize) const {
1511 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1512 }
1513
1514 /// Get maximum # of store operations permitted for llvm.memcpy
1515 ///
1516 /// This function returns the maximum number of store operations permitted
1517 /// to replace a call to llvm.memcpy. The value is set by the target at the
1518 /// performance threshold for such a replacement. If OptSize is true,
1519 /// return the limit for functions that have OptSize attribute.
1520 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1521 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1522 }
1523
1524 /// \brief Get maximum # of store operations to be glued together
1525 ///
1526 /// This function returns the maximum number of store operations permitted
1527 /// to glue together during lowering of llvm.memcpy. The value is set by
1528 // the target at the performance threshold for such a replacement.
1529 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1530 return MaxGluedStoresPerMemcpy;
1531 }
1532
1533 /// Get maximum # of load operations permitted for memcmp
1534 ///
1535 /// This function returns the maximum number of load operations permitted
1536 /// to replace a call to memcmp. The value is set by the target at the
1537 /// performance threshold for such a replacement. If OptSize is true,
1538 /// return the limit for functions that have OptSize attribute.
1539 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1540 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1541 }
1542
1543 /// Get maximum # of store operations permitted for llvm.memmove
1544 ///
1545 /// This function returns the maximum number of store operations permitted
1546 /// to replace a call to llvm.memmove. The value is set by the target at the
1547 /// performance threshold for such a replacement. If OptSize is true,
1548 /// return the limit for functions that have OptSize attribute.
1549 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1550 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1551 }
1552
1553 /// Determine if the target supports unaligned memory accesses.
1554 ///
1555 /// This function returns true if the target allows unaligned memory accesses
1556 /// of the specified type in the given address space. If true, it also returns
1557 /// whether the unaligned memory access is "fast" in the last argument by
1558 /// reference. This is used, for example, in situations where an array
1559 /// copy/move/set is converted to a sequence of store operations. Its use
1560 /// helps to ensure that such replacements don't generate code that causes an
1561 /// alignment error (trap) on the target machine.
1562 virtual bool allowsMisalignedMemoryAccesses(
1563 EVT, unsigned AddrSpace = 0, unsigned Align = 1,
1564 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1565 bool * /*Fast*/ = nullptr) const {
1566 return false;
1567 }
1568
1569 /// LLT handling variant.
1570 virtual bool allowsMisalignedMemoryAccesses(
1571 LLT, unsigned AddrSpace = 0, unsigned Align = 1,
1572 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1573 bool * /*Fast*/ = nullptr) const {
1574 return false;
1575 }
1576
1577 /// This function returns true if the memory access is aligned or if the
1578 /// target allows this specific unaligned memory access. If the access is
1579 /// allowed, the optional final parameter returns if the access is also fast
1580 /// (as defined by the target).
1581 bool allowsMemoryAccessForAlignment(
1582 LLVMContext &Context, const DataLayout &DL, EVT VT,
1583 unsigned AddrSpace = 0, unsigned Alignment = 1,
1584 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1585 bool *Fast = nullptr) const;
1586
1587 /// Return true if the memory access of this type is aligned or if the target
1588 /// allows this specific unaligned access for the given MachineMemOperand.
1589 /// If the access is allowed, the optional final parameter returns if the
1590 /// access is also fast (as defined by the target).
1591 bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1592 const DataLayout &DL, EVT VT,
1593 const MachineMemOperand &MMO,
1594 bool *Fast = nullptr) const;
1595
1596 /// Return true if the target supports a memory access of this type for the
1597 /// given address space and alignment. If the access is allowed, the optional
1598 /// final parameter returns if the access is also fast (as defined by the
1599 /// target).
1600 virtual bool
1601 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1602 unsigned AddrSpace = 0, unsigned Alignment = 1,
1603 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1604 bool *Fast = nullptr) const;
1605
1606 /// Return true if the target supports a memory access of this type for the
1607 /// given MachineMemOperand. If the access is allowed, the optional
1608 /// final parameter returns if the access is also fast (as defined by the
1609 /// target).
1610 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1611 const MachineMemOperand &MMO,
1612 bool *Fast = nullptr) const;
1613
1614 /// Returns the target specific optimal type for load and store operations as
1615 /// a result of memset, memcpy, and memmove lowering.
1616 /// It returns EVT::Other if the type should be determined using generic
1617 /// target-independent logic.
1618 virtual EVT
1619 getOptimalMemOpType(const MemOp &Op,
1620 const AttributeList & /*FuncAttributes*/) const {
1621 return MVT::Other;
1622 }
1623
1624 /// LLT returning variant.
1625 virtual LLT
1626 getOptimalMemOpLLT(const MemOp &Op,
1627 const AttributeList & /*FuncAttributes*/) const {
1628 return LLT();
1629 }
1630
1631 /// Returns true if it's safe to use load / store of the specified type to
1632 /// expand memcpy / memset inline.
1633 ///
1634 /// This is mostly true for all types except for some special cases. For
1635 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1636 /// fstpl which also does type conversion. Note the specified type doesn't
1637 /// have to be legal as the hook is used before type legalization.
1638 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1639
1640 /// Return lower limit for number of blocks in a jump table.
1641 virtual unsigned getMinimumJumpTableEntries() const;
1642
1643 /// Return lower limit of the density in a jump table.
1644 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1645
1646 /// Return upper limit for number of entries in a jump table.
1647 /// Zero if no limit.
1648 unsigned getMaximumJumpTableSize() const;
1649
1650 virtual bool isJumpTableRelative() const {
1651 return TM.isPositionIndependent();
1652 }
1653
1654 /// If a physical register, this specifies the register that
1655 /// llvm.savestack/llvm.restorestack should save and restore.
1656 unsigned getStackPointerRegisterToSaveRestore() const {
1657 return StackPointerRegisterToSaveRestore;
1658 }
1659
1660 /// If a physical register, this returns the register that receives the
1661 /// exception address on entry to an EH pad.
1662 virtual unsigned
1663 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1664 // 0 is guaranteed to be the NoRegister value on all targets
1665 return 0;
1666 }
1667
1668 /// If a physical register, this returns the register that receives the
1669 /// exception typeid on entry to a landing pad.
1670 virtual unsigned
1671 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1672 // 0 is guaranteed to be the NoRegister value on all targets
1673 return 0;
1674 }
1675
1676 virtual bool needsFixedCatchObjects() const {
1677 report_fatal_error("Funclet EH is not implemented for this target");
1678 }
1679
1680 /// Return the minimum stack alignment of an argument.
1681 Align getMinStackArgumentAlignment() const {
1682 return MinStackArgumentAlignment;
1683 }
1684
1685 /// Return the minimum function alignment.
1686 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1687
1688 /// Return the preferred function alignment.
1689 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1690
1691 /// Return the preferred loop alignment.
1692 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1693 return PrefLoopAlignment;
1694 }
1695
1696 /// Should loops be aligned even when the function is marked OptSize (but not
1697 /// MinSize).
1698 virtual bool alignLoopsWithOptSize() const {
1699 return false;
1700 }
1701
1702 /// If the target has a standard location for the stack protector guard,
1703 /// returns the address of that location. Otherwise, returns nullptr.
1704 /// DEPRECATED: please override useLoadStackGuardNode and customize
1705 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1706 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1707
1708 /// Inserts necessary declarations for SSP (stack protection) purpose.
1709 /// Should be used only when getIRStackGuard returns nullptr.
1710 virtual void insertSSPDeclarations(Module &M) const;
1711
1712 /// Return the variable that's previously inserted by insertSSPDeclarations,
1713 /// if any, otherwise return nullptr. Should be used only when
1714 /// getIRStackGuard returns nullptr.
1715 virtual Value *getSDagStackGuard(const Module &M) const;
1716
1717 /// If this function returns true, stack protection checks should XOR the
1718 /// frame pointer (or whichever pointer is used to address locals) into the
1719 /// stack guard value before checking it. getIRStackGuard must return nullptr
1720 /// if this returns true.
1721 virtual bool useStackGuardXorFP() const { return false; }
1722
1723 /// If the target has a standard stack protection check function that
1724 /// performs validation and error handling, returns the function. Otherwise,
1725 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1726 /// Should be used only when getIRStackGuard returns nullptr.
1727 virtual Function *getSSPStackGuardCheck(const Module &M) const;
1728
1729protected:
1730 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1731 bool UseTLS) const;
1732
1733public:
1734 /// Returns the target-specific address of the unsafe stack pointer.
1735 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1736
1737 /// Returns the name of the symbol used to emit stack probes or the empty
1738 /// string if not applicable.
1739 virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; }
1740
1741 virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; }
1742
1743 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1744 return "";
1745 }
1746
1747 /// Returns true if a cast between SrcAS and DestAS is a noop.
1748 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1749 return false;
1750 }
1751
1752 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1753 /// are happy to sink it into basic blocks. A cast may be free, but not
1754 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1755 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1756 return isNoopAddrSpaceCast(SrcAS, DestAS);
1757 }
1758
1759 /// Return true if the pointer arguments to CI should be aligned by aligning
1760 /// the object whose address is being passed. If so then MinSize is set to the
1761 /// minimum size the object must be to be aligned and PrefAlign is set to the
1762 /// preferred alignment.
1763 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1764 unsigned & /*PrefAlign*/) const {
1765 return false;
1766 }
1767
1768 //===--------------------------------------------------------------------===//
1769 /// \name Helpers for TargetTransformInfo implementations
1770 /// @{
1771
1772 /// Get the ISD node that corresponds to the Instruction class opcode.
1773 int InstructionOpcodeToISD(unsigned Opcode) const;
1774
1775 /// Estimate the cost of type-legalization and the legalized type.
1776 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1777 Type *Ty) const;
1778
1779 /// @}
1780
1781 //===--------------------------------------------------------------------===//
1782 /// \name Helpers for atomic expansion.
1783 /// @{
1784
1785 /// Returns the maximum atomic operation size (in bits) supported by
1786 /// the backend. Atomic operations greater than this size (as well
1787 /// as ones that are not naturally aligned), will be expanded by
1788 /// AtomicExpandPass into an __atomic_* library call.
1789 unsigned getMaxAtomicSizeInBitsSupported() const {
1790 return MaxAtomicSizeInBitsSupported;
1791 }
1792
1793 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1794 /// the backend supports. Any smaller operations are widened in
1795 /// AtomicExpandPass.
1796 ///
1797 /// Note that *unlike* operations above the maximum size, atomic ops
1798 /// are still natively supported below the minimum; they just
1799 /// require a more complex expansion.
1800 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1801
1802 /// Whether the target supports unaligned atomic operations.
1803 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1804
1805 /// Whether AtomicExpandPass should automatically insert fences and reduce
1806 /// ordering for this atomic. This should be true for most architectures with
1807 /// weak memory ordering. Defaults to false.
1808 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1809 return false;
1810 }
1811
1812 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1813 /// corresponding pointee type. This may entail some non-trivial operations to
1814 /// truncate or reconstruct types that will be illegal in the backend. See
1815 /// ARMISelLowering for an example implementation.
1816 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1817 AtomicOrdering Ord) const {
1818 llvm_unreachable("Load linked unimplemented on this target")::llvm::llvm_unreachable_internal("Load linked unimplemented on this target"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1818)
;
1819 }
1820
1821 /// Perform a store-conditional operation to Addr. Return the status of the
1822 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1823 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1824 Value *Addr, AtomicOrdering Ord) const {
1825 llvm_unreachable("Store conditional unimplemented on this target")::llvm::llvm_unreachable_internal("Store conditional unimplemented on this target"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1825)
;
1826 }
1827
1828 /// Perform a masked atomicrmw using a target-specific intrinsic. This
1829 /// represents the core LL/SC loop which will be lowered at a late stage by
1830 /// the backend.
1831 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
1832 AtomicRMWInst *AI,
1833 Value *AlignedAddr, Value *Incr,
1834 Value *Mask, Value *ShiftAmt,
1835 AtomicOrdering Ord) const {
1836 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked atomicrmw expansion unimplemented on this target"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1836)
;
1837 }
1838
1839 /// Perform a masked cmpxchg using a target-specific intrinsic. This
1840 /// represents the core LL/SC loop which will be lowered at a late stage by
1841 /// the backend.
1842 virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
1843 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1844 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1845 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target")::llvm::llvm_unreachable_internal("Masked cmpxchg expansion unimplemented on this target"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 1845)
;
1846 }
1847
1848 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1849 /// It is called by AtomicExpandPass before expanding an
1850 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1851 /// if shouldInsertFencesForAtomic returns true.
1852 ///
1853 /// Inst is the original atomic instruction, prior to other expansions that
1854 /// may be performed.
1855 ///
1856 /// This function should either return a nullptr, or a pointer to an IR-level
1857 /// Instruction*. Even complex fence sequences can be represented by a
1858 /// single Instruction* through an intrinsic to be lowered later.
1859 /// Backends should override this method to produce target-specific intrinsic
1860 /// for their fences.
1861 /// FIXME: Please note that the default implementation here in terms of
1862 /// IR-level fences exists for historical/compatibility reasons and is
1863 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1864 /// consistency. For example, consider the following example:
1865 /// atomic<int> x = y = 0;
1866 /// int r1, r2, r3, r4;
1867 /// Thread 0:
1868 /// x.store(1);
1869 /// Thread 1:
1870 /// y.store(1);
1871 /// Thread 2:
1872 /// r1 = x.load();
1873 /// r2 = y.load();
1874 /// Thread 3:
1875 /// r3 = y.load();
1876 /// r4 = x.load();
1877 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1878 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1879 /// IR-level fences can prevent it.
1880 /// @{
1881 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1882 AtomicOrdering Ord) const {
1883 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1884 return Builder.CreateFence(Ord);
1885 else
1886 return nullptr;
1887 }
1888
1889 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1890 Instruction *Inst,
1891 AtomicOrdering Ord) const {
1892 if (isAcquireOrStronger(Ord))
1893 return Builder.CreateFence(Ord);
1894 else
1895 return nullptr;
1896 }
1897 /// @}
1898
1899 // Emits code that executes when the comparison result in the ll/sc
1900 // expansion of a cmpxchg instruction is such that the store-conditional will
1901 // not execute. This makes it possible to balance out the load-linked with
1902 // a dedicated instruction, if desired.
1903 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1904 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1905 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1906
1907 /// Returns true if the given (atomic) store should be expanded by the
1908 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1909 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1910 return false;
1911 }
1912
1913 /// Returns true if arguments should be sign-extended in lib calls.
1914 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1915 return IsSigned;
1916 }
1917
1918 /// Returns true if arguments should be extended in lib calls.
1919 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
1920 return true;
1921 }
1922
1923 /// Returns how the given (atomic) load should be expanded by the
1924 /// IR-level AtomicExpand pass.
1925 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1926 return AtomicExpansionKind::None;
1927 }
1928
1929 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1930 /// AtomicExpand pass.
1931 virtual AtomicExpansionKind
1932 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1933 return AtomicExpansionKind::None;
1934 }
1935
1936 /// Returns how the IR-level AtomicExpand pass should expand the given
1937 /// AtomicRMW, if at all. Default is to never expand.
1938 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1939 return RMW->isFloatingPointOperation() ?
1940 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
1941 }
1942
1943 /// On some platforms, an AtomicRMW that never actually modifies the value
1944 /// (such as fetch_add of 0) can be turned into a fence followed by an
1945 /// atomic load. This may sound useless, but it makes it possible for the
1946 /// processor to keep the cacheline shared, dramatically improving
1947 /// performance. And such idempotent RMWs are useful for implementing some
1948 /// kinds of locks, see for example (justification + benchmarks):
1949 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1950 /// This method tries doing that transformation, returning the atomic load if
1951 /// it succeeds, and nullptr otherwise.
1952 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1953 /// another round of expansion.
1954 virtual LoadInst *
1955 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1956 return nullptr;
1957 }
1958
1959 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1960 /// SIGN_EXTEND, or ANY_EXTEND).
1961 virtual ISD::NodeType getExtendForAtomicOps() const {
1962 return ISD::ZERO_EXTEND;
1963 }
1964
1965 /// @}
1966
1967 /// Returns true if we should normalize
1968 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1969 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1970 /// that it saves us from materializing N0 and N1 in an integer register.
1971 /// Targets that are able to perform and/or on flags should return false here.
1972 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1973 EVT VT) const {
1974 // If a target has multiple condition registers, then it likely has logical
1975 // operations on those registers.
1976 if (hasMultipleConditionRegisters())
1977 return false;
1978 // Only do the transform if the value won't be split into multiple
1979 // registers.
1980 LegalizeTypeAction Action = getTypeAction(Context, VT);
1981 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1982 Action != TypeSplitVector;
1983 }
1984
1985 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
1986
1987 /// Return true if a select of constants (select Cond, C1, C2) should be
1988 /// transformed into simple math ops with the condition value. For example:
1989 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
1990 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
1991 return false;
1992 }
1993
1994 /// Return true if it is profitable to transform an integer
1995 /// multiplication-by-constant into simpler operations like shifts and adds.
1996 /// This may be true if the target does not directly support the
1997 /// multiplication operation for the specified type or the sequence of simpler
1998 /// ops is faster than the multiply.
1999 virtual bool decomposeMulByConstant(LLVMContext &Context,
2000 EVT VT, SDValue C) const {
2001 return false;
2002 }
2003
2004 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2005 /// conversion operations - canonicalizing the FP source value instead of
2006 /// converting all cases and then selecting based on value.
2007 /// This may be true if the target throws exceptions for out of bounds
2008 /// conversions or has fast FP CMOV.
2009 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2010 bool IsSigned) const {
2011 return false;
2012 }
2013
2014 //===--------------------------------------------------------------------===//
2015 // TargetLowering Configuration Methods - These methods should be invoked by
2016 // the derived class constructor to configure this object for the target.
2017 //
2018protected:
2019 /// Specify how the target extends the result of integer and floating point
2020 /// boolean values from i1 to a wider type. See getBooleanContents.
2021 void setBooleanContents(BooleanContent Ty) {
2022 BooleanContents = Ty;
2023 BooleanFloatContents = Ty;
2024 }
2025
2026 /// Specify how the target extends the result of integer and floating point
2027 /// boolean values from i1 to a wider type. See getBooleanContents.
2028 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
2029 BooleanContents = IntTy;
2030 BooleanFloatContents = FloatTy;
2031 }
2032
2033 /// Specify how the target extends the result of a vector boolean value from a
2034 /// vector of i1 to a wider type. See getBooleanContents.
2035 void setBooleanVectorContents(BooleanContent Ty) {
2036 BooleanVectorContents = Ty;
2037 }
2038
2039 /// Specify the target scheduling preference.
2040 void setSchedulingPreference(Sched::Preference Pref) {
2041 SchedPreferenceInfo = Pref;
2042 }
2043
2044 /// Indicate the minimum number of blocks to generate jump tables.
2045 void setMinimumJumpTableEntries(unsigned Val);
2046
2047 /// Indicate the maximum number of entries in jump tables.
2048 /// Set to zero to generate unlimited jump tables.
2049 void setMaximumJumpTableSize(unsigned);
2050
2051 /// If set to a physical register, this specifies the register that
2052 /// llvm.savestack/llvm.restorestack should save and restore.
2053 void setStackPointerRegisterToSaveRestore(unsigned R) {
2054 StackPointerRegisterToSaveRestore = R;
2055 }
2056
2057 /// Tells the code generator that the target has multiple (allocatable)
2058 /// condition registers that can be used to store the results of comparisons
2059 /// for use by selects and conditional branches. With multiple condition
2060 /// registers, the code generator will not aggressively sink comparisons into
2061 /// the blocks of their users.
2062 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2063 HasMultipleConditionRegisters = hasManyRegs;
2064 }
2065
2066 /// Tells the code generator that the target has BitExtract instructions.
2067 /// The code generator will aggressively sink "shift"s into the blocks of
2068 /// their users if the users will generate "and" instructions which can be
2069 /// combined with "shift" to BitExtract instructions.
2070 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2071 HasExtractBitsInsn = hasExtractInsn;
2072 }
2073
2074 /// Tells the code generator not to expand logic operations on comparison
2075 /// predicates into separate sequences that increase the amount of flow
2076 /// control.
2077 void setJumpIsExpensive(bool isExpensive = true);
2078
2079 /// Tells the code generator which bitwidths to bypass.
2080 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2081 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2082 }
2083
2084 /// Add the specified register class as an available regclass for the
2085 /// specified value type. This indicates the selector can handle values of
2086 /// that class natively.
2087 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
2088 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT))(((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)) ?
static_cast<void> (0) : __assert_fail ("(unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2088, __PRETTY_FUNCTION__))
;
2089 RegClassForVT[VT.SimpleTy] = RC;
2090 }
2091
2092 /// Return the largest legal super-reg register class of the register class
2093 /// for the specified type and its associated "cost".
2094 virtual std::pair<const TargetRegisterClass *, uint8_t>
2095 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2096
2097 /// Once all of the register classes are added, this allows us to compute
2098 /// derived properties we expose.
2099 void computeRegisterProperties(const TargetRegisterInfo *TRI);
2100
2101 /// Indicate that the specified operation does not work with the specified
2102 /// type and indicate what to do about it. Note that VT may refer to either
2103 /// the type of a result or that of an operand of Op.
2104 void setOperationAction(unsigned Op, MVT VT,
2105 LegalizeAction Action) {
2106 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!")((Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("Op < array_lengthof(OpActions[0]) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2106, __PRETTY_FUNCTION__))
;
2107 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2108 }
2109
2110 /// Indicate that the specified load with extension does not work with the
2111 /// specified type and indicate what to do about it.
2112 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2113 LegalizeAction Action) {
2114 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&((ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid
() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2115, __PRETTY_FUNCTION__))
2115 MemVT.isValid() && "Table isn't big enough!")((ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid
() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2115, __PRETTY_FUNCTION__))
;
2116 assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(((unsigned)Action < 0x10 && "too many bits for bitfield array"
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2116, __PRETTY_FUNCTION__))
;
2117 unsigned Shift = 4 * ExtType;
2118 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2119 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2120 }
2121
2122 /// Indicate that the specified truncating store does not work with the
2123 /// specified type and indicate what to do about it.
2124 void setTruncStoreAction(MVT ValVT, MVT MemVT,
2125 LegalizeAction Action) {
2126 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!")((ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"
) ? static_cast<void> (0) : __assert_fail ("ValVT.isValid() && MemVT.isValid() && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2126, __PRETTY_FUNCTION__))
;
2127 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2128 }
2129
2130 /// Indicate that the specified indexed load does or does not work with the
2131 /// specified type and indicate what to do abort it.
2132 ///
2133 /// NOTE: All indexed mode loads are initialized to Expand in
2134 /// TargetLowering.cpp
2135 void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2136 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2137 }
2138
2139 /// Indicate that the specified indexed store does or does not work with the
2140 /// specified type and indicate what to do about it.
2141 ///
2142 /// NOTE: All indexed mode stores are initialized to Expand in
2143 /// TargetLowering.cpp
2144 void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2145 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2146 }
2147
2148 /// Indicate that the specified indexed masked load does or does not work with
2149 /// the specified type and indicate what to do about it.
2150 ///
2151 /// NOTE: All indexed mode masked loads are initialized to Expand in
2152 /// TargetLowering.cpp
2153 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2154 LegalizeAction Action) {
2155 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2156 }
2157
2158 /// Indicate that the specified indexed masked store does or does not work
2159 /// with the specified type and indicate what to do about it.
2160 ///
2161 /// NOTE: All indexed mode masked stores are initialized to Expand in
2162 /// TargetLowering.cpp
2163 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2164 LegalizeAction Action) {
2165 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2166 }
2167
2168 /// Indicate that the specified condition code is or isn't supported on the
2169 /// target and indicate what to do about it.
2170 void setCondCodeAction(ISD::CondCode CC, MVT VT,
2171 LegalizeAction Action) {
2172 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&((VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions
) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2173, __PRETTY_FUNCTION__))
2173 "Table isn't big enough!")((VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions
) && "Table isn't big enough!") ? static_cast<void
> (0) : __assert_fail ("VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && \"Table isn't big enough!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2173, __PRETTY_FUNCTION__))
;
2174 assert((unsigned)Action < 0x10 && "too many bits for bitfield array")(((unsigned)Action < 0x10 && "too many bits for bitfield array"
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Action < 0x10 && \"too many bits for bitfield array\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2174, __PRETTY_FUNCTION__))
;
2175 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2176 /// value and the upper 29 bits index into the second dimension of the array
2177 /// to select what 32-bit value to use.
2178 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2179 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2180 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2181 }
2182
2183 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2184 /// to trying a larger integer/fp until it can find one that works. If that
2185 /// default is insufficient, this method can be used by the target to override
2186 /// the default.
2187 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2188 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2189 }
2190
2191 /// Convenience method to set an operation to Promote and specify the type
2192 /// in a single call.
2193 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2194 setOperationAction(Opc, OrigVT, Promote);
2195 AddPromotedToType(Opc, OrigVT, DestVT);
2196 }
2197
2198 /// Targets should invoke this method for each target independent node that
2199 /// they want to provide a custom DAG combiner for by implementing the
2200 /// PerformDAGCombine virtual method.
2201 void setTargetDAGCombine(ISD::NodeType NT) {
2202 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray))((unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray
)) ? static_cast<void> (0) : __assert_fail ("unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2202, __PRETTY_FUNCTION__))
;
2203 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
2204 }
2205
2206 /// Set the target's minimum function alignment.
2207 void setMinFunctionAlignment(Align Alignment) {
2208 MinFunctionAlignment = Alignment;
2209 }
2210
2211 /// Set the target's preferred function alignment. This should be set if
2212 /// there is a performance benefit to higher-than-minimum alignment
2213 void setPrefFunctionAlignment(Align Alignment) {
2214 PrefFunctionAlignment = Alignment;
2215 }
2216
2217 /// Set the target's preferred loop alignment. Default alignment is one, it
2218 /// means the target does not care about loop alignment. The target may also
2219 /// override getPrefLoopAlignment to provide per-loop values.
2220 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2221
2222 /// Set the minimum stack alignment of an argument.
2223 void setMinStackArgumentAlignment(Align Alignment) {
2224 MinStackArgumentAlignment = Alignment;
2225 }
2226
2227 /// Set the maximum atomic operation size supported by the
2228 /// backend. Atomic operations greater than this size (as well as
2229 /// ones that are not naturally aligned), will be expanded by
2230 /// AtomicExpandPass into an __atomic_* library call.
2231 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2232 MaxAtomicSizeInBitsSupported = SizeInBits;
2233 }
2234
2235 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2236 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2237 MinCmpXchgSizeInBits = SizeInBits;
2238 }
2239
2240 /// Sets whether unaligned atomic operations are supported.
2241 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2242 SupportsUnalignedAtomics = UnalignedSupported;
2243 }
2244
2245public:
2246 //===--------------------------------------------------------------------===//
2247 // Addressing mode description hooks (used by LSR etc).
2248 //
2249
2250 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2251 /// instructions reading the address. This allows as much computation as
2252 /// possible to be done in the address mode for that operand. This hook lets
2253 /// targets also pass back when this should be done on intrinsics which
2254 /// load/store.
2255 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2256 SmallVectorImpl<Value*> &/*Ops*/,
2257 Type *&/*AccessTy*/) const {
2258 return false;
2259 }
2260
2261 /// This represents an addressing mode of:
2262 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2263 /// If BaseGV is null, there is no BaseGV.
2264 /// If BaseOffs is zero, there is no base offset.
2265 /// If HasBaseReg is false, there is no base register.
2266 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2267 /// no scale.
2268 struct AddrMode {
2269 GlobalValue *BaseGV = nullptr;
2270 int64_t BaseOffs = 0;
2271 bool HasBaseReg = false;
2272 int64_t Scale = 0;
2273 AddrMode() = default;
2274 };
2275
2276 /// Return true if the addressing mode represented by AM is legal for this
2277 /// target, for a load/store of the specified type.
2278 ///
2279 /// The type may be VoidTy, in which case only return true if the addressing
2280 /// mode is legal for a load/store of any legal type. TODO: Handle
2281 /// pre/postinc as well.
2282 ///
2283 /// If the address space cannot be determined, it will be -1.
2284 ///
2285 /// TODO: Remove default argument
2286 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2287 Type *Ty, unsigned AddrSpace,
2288 Instruction *I = nullptr) const;
2289
2290 /// Return the cost of the scaling factor used in the addressing mode
2291 /// represented by AM for this target, for a load/store of the specified type.
2292 ///
2293 /// If the AM is supported, the return value must be >= 0.
2294 /// If the AM is not supported, it returns a negative value.
2295 /// TODO: Handle pre/postinc as well.
2296 /// TODO: Remove default argument
2297 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
2298 Type *Ty, unsigned AS = 0) const {
2299 // Default: assume that any scaling factor used in a legal AM is free.
2300 if (isLegalAddressingMode(DL, AM, Ty, AS))
2301 return 0;
2302 return -1;
2303 }
2304
2305 /// Return true if the specified immediate is legal icmp immediate, that is
2306 /// the target has icmp instructions which can compare a register against the
2307 /// immediate without having to materialize the immediate into a register.
2308 virtual bool isLegalICmpImmediate(int64_t) const {
2309 return true;
2310 }
2311
2312 /// Return true if the specified immediate is legal add immediate, that is the
2313 /// target has add instructions which can add a register with the immediate
2314 /// without having to materialize the immediate into a register.
2315 virtual bool isLegalAddImmediate(int64_t) const {
2316 return true;
2317 }
2318
2319 /// Return true if the specified immediate is legal for the value input of a
2320 /// store instruction.
2321 virtual bool isLegalStoreImmediate(int64_t Value) const {
2322 // Default implementation assumes that at least 0 works since it is likely
2323 // that a zero register exists or a zero immediate is allowed.
2324 return Value == 0;
2325 }
2326
2327 /// Return true if it's significantly cheaper to shift a vector by a uniform
2328 /// scalar than by an amount which will vary across each lane. On x86, for
2329 /// example, there is a "psllw" instruction for the former case, but no simple
2330 /// instruction for a general "a << b" operation on vectors.
2331 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2332 return false;
2333 }
2334
2335 /// Returns true if the opcode is a commutative binary operation.
2336 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2337 // FIXME: This should get its info from the td file.
2338 switch (Opcode) {
2339 case ISD::ADD:
2340 case ISD::SMIN:
2341 case ISD::SMAX:
2342 case ISD::UMIN:
2343 case ISD::UMAX:
2344 case ISD::MUL:
2345 case ISD::MULHU:
2346 case ISD::MULHS:
2347 case ISD::SMUL_LOHI:
2348 case ISD::UMUL_LOHI:
2349 case ISD::FADD:
2350 case ISD::FMUL:
2351 case ISD::AND:
2352 case ISD::OR:
2353 case ISD::XOR:
2354 case ISD::SADDO:
2355 case ISD::UADDO:
2356 case ISD::ADDC:
2357 case ISD::ADDE:
2358 case ISD::SADDSAT:
2359 case ISD::UADDSAT:
2360 case ISD::FMINNUM:
2361 case ISD::FMAXNUM:
2362 case ISD::FMINNUM_IEEE:
2363 case ISD::FMAXNUM_IEEE:
2364 case ISD::FMINIMUM:
2365 case ISD::FMAXIMUM:
2366 return true;
2367 default: return false;
2368 }
2369 }
2370
2371 /// Return true if the node is a math/logic binary operator.
2372 virtual bool isBinOp(unsigned Opcode) const {
2373 // A commutative binop must be a binop.
2374 if (isCommutativeBinOp(Opcode))
2375 return true;
2376 // These are non-commutative binops.
2377 switch (Opcode) {
2378 case ISD::SUB:
2379 case ISD::SHL:
2380 case ISD::SRL:
2381 case ISD::SRA:
2382 case ISD::SDIV:
2383 case ISD::UDIV:
2384 case ISD::SREM:
2385 case ISD::UREM:
2386 case ISD::FSUB:
2387 case ISD::FDIV:
2388 case ISD::FREM:
2389 return true;
2390 default:
2391 return false;
2392 }
2393 }
2394
2395 /// Return true if it's free to truncate a value of type FromTy to type
2396 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2397 /// by referencing its sub-register AX.
2398 /// Targets must return false when FromTy <= ToTy.
2399 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2400 return false;
2401 }
2402
2403 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2404 /// whether a call is in tail position. Typically this means that both results
2405 /// would be assigned to the same register or stack slot, but it could mean
2406 /// the target performs adequate checks of its own before proceeding with the
2407 /// tail call. Targets must return false when FromTy <= ToTy.
2408 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2409 return false;
2410 }
2411
2412 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2413 return false;
2414 }
2415
2416 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2417
2418 /// Return true if the extension represented by \p I is free.
2419 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2420 /// this method can use the context provided by \p I to decide
2421 /// whether or not \p I is free.
2422 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2423 /// In other words, if is[Z|FP]Free returns true, then this method
2424 /// returns true as well. The converse is not true.
2425 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2426 /// \pre \p I must be a sign, zero, or fp extension.
2427 bool isExtFree(const Instruction *I) const {
2428 switch (I->getOpcode()) {
2429 case Instruction::FPExt:
2430 if (isFPExtFree(EVT::getEVT(I->getType()),
2431 EVT::getEVT(I->getOperand(0)->getType())))
2432 return true;
2433 break;
2434 case Instruction::ZExt:
2435 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2436 return true;
2437 break;
2438 case Instruction::SExt:
2439 break;
2440 default:
2441 llvm_unreachable("Instruction is not an extension")::llvm::llvm_unreachable_internal("Instruction is not an extension"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2441)
;
2442 }
2443 return isExtFreeImpl(I);
2444 }
2445
2446 /// Return true if \p Load and \p Ext can form an ExtLoad.
2447 /// For example, in AArch64
2448 /// %L = load i8, i8* %ptr
2449 /// %E = zext i8 %L to i32
2450 /// can be lowered into one load instruction
2451 /// ldrb w0, [x0]
2452 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2453 const DataLayout &DL) const {
2454 EVT VT = getValueType(DL, Ext->getType());
2455 EVT LoadVT = getValueType(DL, Load->getType());
2456
2457 // If the load has other users and the truncate is not free, the ext
2458 // probably isn't free.
2459 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2460 !isTruncateFree(Ext->getType(), Load->getType()))
2461 return false;
2462
2463 // Check whether the target supports casts folded into loads.
2464 unsigned LType;
2465 if (isa<ZExtInst>(Ext))
2466 LType = ISD::ZEXTLOAD;
2467 else {
2468 assert(isa<SExtInst>(Ext) && "Unexpected ext type!")((isa<SExtInst>(Ext) && "Unexpected ext type!")
? static_cast<void> (0) : __assert_fail ("isa<SExtInst>(Ext) && \"Unexpected ext type!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2468, __PRETTY_FUNCTION__))
;
2469 LType = ISD::SEXTLOAD;
2470 }
2471
2472 return isLoadExtLegal(LType, VT, LoadVT);
2473 }
2474
2475 /// Return true if any actual instruction that defines a value of type FromTy
2476 /// implicitly zero-extends the value to ToTy in the result register.
2477 ///
2478 /// The function should return true when it is likely that the truncate can
2479 /// be freely folded with an instruction defining a value of FromTy. If
2480 /// the defining instruction is unknown (because you're looking at a
2481 /// function argument, PHI, etc.) then the target may require an
2482 /// explicit truncate, which is not necessarily free, but this function
2483 /// does not deal with those cases.
2484 /// Targets must return false when FromTy >= ToTy.
2485 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2486 return false;
2487 }
2488
2489 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2490 return false;
2491 }
2492
2493 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2494 /// zero-extension.
2495 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2496 return false;
2497 }
2498
2499 /// Return true if sinking I's operands to the same basic block as I is
2500 /// profitable, e.g. because the operands can be folded into a target
2501 /// instruction during instruction selection. After calling the function
2502 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2503 /// come first).
2504 virtual bool shouldSinkOperands(Instruction *I,
2505 SmallVectorImpl<Use *> &Ops) const {
2506 return false;
2507 }
2508
2509 /// Return true if the target supplies and combines to a paired load
2510 /// two loaded values of type LoadedType next to each other in memory.
2511 /// RequiredAlignment gives the minimal alignment constraints that must be met
2512 /// to be able to select this paired load.
2513 ///
2514 /// This information is *not* used to generate actual paired loads, but it is
2515 /// used to generate a sequence of loads that is easier to combine into a
2516 /// paired load.
2517 /// For instance, something like this:
2518 /// a = load i64* addr
2519 /// b = trunc i64 a to i32
2520 /// c = lshr i64 a, 32
2521 /// d = trunc i64 c to i32
2522 /// will be optimized into:
2523 /// b = load i32* addr1
2524 /// d = load i32* addr2
2525 /// Where addr1 = addr2 +/- sizeof(i32).
2526 ///
2527 /// In other words, unless the target performs a post-isel load combining,
2528 /// this information should not be provided because it will generate more
2529 /// loads.
2530 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2531 unsigned & /*RequiredAlignment*/) const {
2532 return false;
2533 }
2534
2535 /// Return true if the target has a vector blend instruction.
2536 virtual bool hasVectorBlend() const { return false; }
2537
2538 /// Get the maximum supported factor for interleaved memory accesses.
2539 /// Default to be the minimum interleave factor: 2.
2540 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2541
2542 /// Lower an interleaved load to target specific intrinsics. Return
2543 /// true on success.
2544 ///
2545 /// \p LI is the vector load instruction.
2546 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2547 /// \p Indices is the corresponding indices for each shufflevector.
2548 /// \p Factor is the interleave factor.
2549 virtual bool lowerInterleavedLoad(LoadInst *LI,
2550 ArrayRef<ShuffleVectorInst *> Shuffles,
2551 ArrayRef<unsigned> Indices,
2552 unsigned Factor) const {
2553 return false;
2554 }
2555
2556 /// Lower an interleaved store to target specific intrinsics. Return
2557 /// true on success.
2558 ///
2559 /// \p SI is the vector store instruction.
2560 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2561 /// \p Factor is the interleave factor.
2562 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2563 unsigned Factor) const {
2564 return false;
2565 }
2566
2567 /// Return true if zero-extending the specific node Val to type VT2 is free
2568 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2569 /// because it's folded such as X86 zero-extending loads).
2570 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2571 return isZExtFree(Val.getValueType(), VT2);
5
Calling 'SDValue::getValueType'
2572 }
2573
2574 /// Return true if an fpext operation is free (for instance, because
2575 /// single-precision floating-point numbers are implicitly extended to
2576 /// double-precision).
2577 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2578 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&((SrcVT.isFloatingPoint() && DestVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2579, __PRETTY_FUNCTION__))
2579 "invalid fpext types")((SrcVT.isFloatingPoint() && DestVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2579, __PRETTY_FUNCTION__))
;
2580 return false;
2581 }
2582
2583 /// Return true if an fpext operation input to an \p Opcode operation is free
2584 /// (for instance, because half-precision floating-point numbers are
2585 /// implicitly extended to float-precision) for an FMA instruction.
2586 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2587 EVT DestVT, EVT SrcVT) const {
2588 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2589, __PRETTY_FUNCTION__))
2589 "invalid fpext types")((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2589, __PRETTY_FUNCTION__))
;
2590 return isFPExtFree(DestVT, SrcVT);
2591 }
2592
2593 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2594 /// extend node) is profitable.
2595 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2596
2597 /// Return true if an fneg operation is free to the point where it is never
2598 /// worthwhile to replace it with a bitwise operation.
2599 virtual bool isFNegFree(EVT VT) const {
2600 assert(VT.isFloatingPoint())((VT.isFloatingPoint()) ? static_cast<void> (0) : __assert_fail
("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2600, __PRETTY_FUNCTION__))
;
2601 return false;
2602 }
2603
2604 /// Return true if an fabs operation is free to the point where it is never
2605 /// worthwhile to replace it with a bitwise operation.
2606 virtual bool isFAbsFree(EVT VT) const {
2607 assert(VT.isFloatingPoint())((VT.isFloatingPoint()) ? static_cast<void> (0) : __assert_fail
("VT.isFloatingPoint()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2607, __PRETTY_FUNCTION__))
;
2608 return false;
2609 }
2610
2611 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2612 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2613 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2614 ///
2615 /// NOTE: This may be called before legalization on types for which FMAs are
2616 /// not legal, but should return true if those types will eventually legalize
2617 /// to types that support FMAs. After legalization, it will only be called on
2618 /// types that support FMAs (via Legal or Custom actions)
2619 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
2620 EVT) const {
2621 return false;
2622 }
2623
2624 /// IR version
2625 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2626 return false;
2627 }
2628
2629 /// Returns true if the FADD or FSUB node passed could legally be combined with
2630 /// an fmul to form an ISD::FMAD.
2631 virtual bool isFMADLegalForFAddFSub(const SelectionDAG &DAG,
2632 const SDNode *N) const {
2633 assert(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB)((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::
FSUB) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/TargetLowering.h"
, 2633, __PRETTY_FUNCTION__))
;
2634 return isOperationLegal(ISD::FMAD, N->getValueType(0));
2635 }
2636
2637 /// Return true if it's profitable to narrow operations of type VT1 to
2638 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2639 /// i32 to i16.
2640 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2641 return false;
2642 }
2643
2644 /// Return true if it is beneficial to convert a load of a constant to
2645 /// just the constant itself.
2646 /// On some targets it might be more efficient to use a combination of
2647 /// arithmetic instructions to materialize the constant instead of loading it
2648 /// from a constant pool.
2649 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2650 Type *Ty) const {
2651 return false;
2652 }
2653
2654 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2655 /// from this source type with this index. This is needed because
2656 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2657 /// the first element, and only the target knows which lowering is cheap.
2658 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2659 unsigned Index) const {
2660 return false;
2661 }
2662
2663 /// Try to convert an extract element of a vector binary operation into an
2664 /// extract element followed by a scalar operation.
2665 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
2666 return false;
2667 }
2668
2669 /// Return true if extraction of a scalar element from the given vector type
2670 /// at the given index is cheap. For example, if scalar operations occur on
2671 /// the same register file as vector operations, then an extract element may
2672 /// be a sub-register rename rather than an actual instruction.
2673 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
2674 return false;
2675 }
2676
2677 /// Try to convert math with an overflow comparison into the corresponding DAG
2678 /// node operation. Targets may want to override this independently of whether
2679 /// the operation is legal/custom for the given type because it may obscure
2680 /// matching of other patterns.
2681 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
2682 bool MathUsed) const {
2683 // TODO: The default logic is inherited from code in CodeGenPrepare.
2684 // The opcode should not make a difference by default?
2685 if (Opcode != ISD::UADDO)
2686 return false;
2687
2688 // Allow the transform as long as we have an integer type that is not
2689 // obviously illegal and unsupported and if the math result is used
2690 // besides the overflow check. On some targets (e.g. SPARC), it is
2691 // not profitable to form on overflow op if the math result has no
2692 // concrete users.
2693 if (VT.isVector())
2694 return false;
2695 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
2696 }
2697
2698 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2699 // even if the vector itself has multiple uses.
2700 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2701 return false;
2702 }
2703
2704 // Return true if CodeGenPrepare should consider splitting large offset of a
2705 // GEP to make the GEP fit into the addressing mode and can be sunk into the
2706 // same blocks of its users.
2707 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2708
2709 /// Return true if creating a shift of the type by the given
2710 /// amount is not profitable.
2711 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
2712 return false;
2713 }
2714
2715 //===--------------------------------------------------------------------===//
2716 // Runtime Library hooks
2717 //
2718
2719 /// Rename the default libcall routine name for the specified libcall.
2720 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2721 LibcallRoutineNames[Call] = Name;
2722 }
2723
2724 /// Get the libcall routine name for the specified libcall.
2725 const char *getLibcallName(RTLIB::Libcall Call) const {
2726 return LibcallRoutineNames[Call];
2727 }
2728
2729 /// Override the default CondCode to be used to test the result of the
2730 /// comparison libcall against zero.
2731 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2732 CmpLibcallCCs[Call] = CC;
2733 }
2734
2735 /// Get the CondCode that's to be used to test the result of the comparison
2736 /// libcall against zero.
2737 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2738 return CmpLibcallCCs[Call];
2739 }
2740
2741 /// Set the CallingConv that should be used for the specified libcall.
2742 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2743 LibcallCallingConvs[Call] = CC;
2744 }
2745
2746 /// Get the CallingConv that should be used for the specified libcall.
2747 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2748 return LibcallCallingConvs[Call];
2749 }
2750
2751 /// Execute target specific actions to finalize target lowering.
2752 /// This is used to set extra flags in MachineFrameInformation and freezing
2753 /// the set of reserved registers.
2754 /// The default implementation just freezes the set of reserved registers.
2755 virtual void finalizeLowering(MachineFunction &MF) const;
2756
2757 //===----------------------------------------------------------------------===//
2758 // GlobalISel Hooks
2759 //===----------------------------------------------------------------------===//
2760 /// Check whether or not \p MI needs to be moved close to its uses.
2761 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
2762
2763
2764private:
2765 const TargetMachine &TM;
2766
2767 /// Tells the code generator that the target has multiple (allocatable)
2768 /// condition registers that can be used to store the results of comparisons
2769 /// for use by selects and conditional branches. With multiple condition
2770 /// registers, the code generator will not aggressively sink comparisons into
2771 /// the blocks of their users.
2772 bool HasMultipleConditionRegisters;
2773
2774 /// Tells the code generator that the target has BitExtract instructions.
2775 /// The code generator will aggressively sink "shift"s into the blocks of
2776 /// their users if the users will generate "and" instructions which can be
2777 /// combined with "shift" to BitExtract instructions.
2778 bool HasExtractBitsInsn;
2779
2780 /// Tells the code generator to bypass slow divide or remainder
2781 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2782 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2783 /// div/rem when the operands are positive and less than 256.
2784 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2785
2786 /// Tells the code generator that it shouldn't generate extra flow control
2787 /// instructions and should attempt to combine flow control instructions via
2788 /// predication.
2789 bool JumpIsExpensive;
2790
2791 /// Information about the contents of the high-bits in boolean values held in
2792 /// a type wider than i1. See getBooleanContents.
2793 BooleanContent BooleanContents;
2794
2795 /// Information about the contents of the high-bits in boolean values held in
2796 /// a type wider than i1. See getBooleanContents.
2797 BooleanContent BooleanFloatContents;
2798
2799 /// Information about the contents of the high-bits in boolean vector values
2800 /// when the element type is wider than i1. See getBooleanContents.
2801 BooleanContent BooleanVectorContents;
2802
2803 /// The target scheduling preference: shortest possible total cycles or lowest
2804 /// register usage.
2805 Sched::Preference SchedPreferenceInfo;
2806
2807 /// The minimum alignment that any argument on the stack needs to have.
2808 Align MinStackArgumentAlignment;
2809
2810 /// The minimum function alignment (used when optimizing for size, and to
2811 /// prevent explicitly provided alignment from leading to incorrect code).
2812 Align MinFunctionAlignment;
2813
2814 /// The preferred function alignment (used when alignment unspecified and
2815 /// optimizing for speed).
2816 Align PrefFunctionAlignment;
2817
2818 /// The preferred loop alignment (in log2 bot in bytes).
2819 Align PrefLoopAlignment;
2820
2821 /// Size in bits of the maximum atomics size the backend supports.
2822 /// Accesses larger than this will be expanded by AtomicExpandPass.
2823 unsigned MaxAtomicSizeInBitsSupported;
2824
2825 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2826 /// backend supports.
2827 unsigned MinCmpXchgSizeInBits;
2828
2829 /// This indicates if the target supports unaligned atomic operations.
2830 bool SupportsUnalignedAtomics;
2831
2832 /// If set to a physical register, this specifies the register that
2833 /// llvm.savestack/llvm.restorestack should save and restore.
2834 unsigned StackPointerRegisterToSaveRestore;
2835
2836 /// This indicates the default register class to use for each ValueType the
2837 /// target supports natively.
2838 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2839 uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE];
2840 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2841
2842 /// This indicates the "representative" register class to use for each
2843 /// ValueType the target supports natively. This information is used by the
2844 /// scheduler to track register pressure. By default, the representative
2845 /// register class is the largest legal super-reg register class