LLVM 23.0.0git
X86FastISel.cpp
Go to the documentation of this file.
1//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the X86-specific support for the FastISel class. Much
10// of the target-specific code is generated by tablegen in the file
11// X86GenFastISel.inc, which is #included here.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86CallingConv.h"
17#include "X86InstrBuilder.h"
18#include "X86InstrInfo.h"
20#include "X86RegisterInfo.h"
21#include "X86Subtarget.h"
22#include "X86TargetMachine.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/DebugInfo.h"
37#include "llvm/IR/IntrinsicsX86.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/MC/MCAsmInfo.h"
41#include "llvm/MC/MCSymbol.h"
44using namespace llvm;
45
46namespace {
47
48class X86FastISel final : public FastISel {
49 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
50 /// make the right decision when generating code for different targets.
51 const X86Subtarget *Subtarget;
52
53public:
54 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
55 const TargetLibraryInfo *libInfo,
56 const LibcallLoweringInfo *libcallLowering)
57 : FastISel(funcInfo, libInfo, libcallLowering) {
58 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
59 }
60
61 bool fastSelectInstruction(const Instruction *I) override;
62
63 /// The specified machine instr operand is a vreg, and that
64 /// vreg is being provided by the specified load instruction. If possible,
65 /// try to fold the load as an operand to the instruction, returning true if
66 /// possible.
67 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
68 const LoadInst *LI) override;
69
70 bool fastLowerArguments() override;
71 bool fastLowerCall(CallLoweringInfo &CLI) override;
72 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
73
74#include "X86GenFastISel.inc"
75
76private:
77 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
78 const DebugLoc &DL);
79
80 bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
81 Register &ResultReg, unsigned Alignment = 1);
82
83 bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
84 MachineMemOperand *MMO = nullptr, bool Aligned = false);
85 bool X86FastEmitStore(EVT VT, Register ValReg, X86AddressMode &AM,
86 MachineMemOperand *MMO = nullptr, bool Aligned = false);
87
88 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, Register Src, EVT SrcVT,
89 Register &ResultReg);
90
91 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
92 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
93
94 bool X86SelectLoad(const Instruction *I);
95
96 bool X86SelectStore(const Instruction *I);
97
98 bool X86SelectRet(const Instruction *I);
99
100 bool X86SelectCmp(const Instruction *I);
101
102 bool X86SelectZExt(const Instruction *I);
103
104 bool X86SelectSExt(const Instruction *I);
105
106 bool X86SelectBranch(const Instruction *I);
107
108 bool X86SelectShift(const Instruction *I);
109
110 bool X86SelectDivRem(const Instruction *I);
111
112 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
113
114 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
115
116 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
117
118 bool X86SelectSelect(const Instruction *I);
119
120 bool X86SelectTrunc(const Instruction *I);
121
122 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
123 const TargetRegisterClass *RC);
124
125 bool X86SelectFPExt(const Instruction *I);
126 bool X86SelectFPTrunc(const Instruction *I);
127 bool X86SelectSIToFP(const Instruction *I);
128 bool X86SelectUIToFP(const Instruction *I);
129 bool X86SelectIntToFP(const Instruction *I, bool IsSigned);
130 bool X86SelectBitCast(const Instruction *I);
131
132 const X86InstrInfo *getInstrInfo() const {
133 return Subtarget->getInstrInfo();
134 }
135 const X86TargetMachine *getTargetMachine() const {
136 return static_cast<const X86TargetMachine *>(&TM);
137 }
138
139 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
140
141 Register X86MaterializeInt(const ConstantInt *CI, MVT VT);
142 Register X86MaterializeFP(const ConstantFP *CFP, MVT VT);
143 Register X86MaterializeGV(const GlobalValue *GV, MVT VT);
144 Register fastMaterializeConstant(const Constant *C) override;
145
146 Register fastMaterializeAlloca(const AllocaInst *C) override;
147
148 Register fastMaterializeFloatZero(const ConstantFP *CF) override;
149
150 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
151 /// computed in an SSE register, not on the X87 floating point stack.
152 bool isScalarFPTypeInSSEReg(EVT VT) const {
153 return (VT == MVT::f64 && Subtarget->hasSSE2()) ||
154 (VT == MVT::f32 && Subtarget->hasSSE1()) || VT == MVT::f16;
155 }
156
157 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
158
159 bool IsMemcpySmall(uint64_t Len);
160
161 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
162 X86AddressMode SrcAM, uint64_t Len);
163
164 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
165 const Value *Cond);
166
167 const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
168 X86AddressMode &AM);
169
170 Register fastEmitInst_rrrr(unsigned MachineInstOpcode,
171 const TargetRegisterClass *RC, Register Op0,
172 Register Op1, Register Op2, Register Op3);
173};
174
175} // end anonymous namespace.
176
177static std::pair<unsigned, bool>
179 unsigned CC;
180 bool NeedSwap = false;
181
182 // SSE Condition code mapping:
183 // 0 - EQ
184 // 1 - LT
185 // 2 - LE
186 // 3 - UNORD
187 // 4 - NEQ
188 // 5 - NLT
189 // 6 - NLE
190 // 7 - ORD
191 switch (Predicate) {
192 default: llvm_unreachable("Unexpected predicate");
193 case CmpInst::FCMP_OEQ: CC = 0; break;
194 case CmpInst::FCMP_OGT: NeedSwap = true; [[fallthrough]];
195 case CmpInst::FCMP_OLT: CC = 1; break;
196 case CmpInst::FCMP_OGE: NeedSwap = true; [[fallthrough]];
197 case CmpInst::FCMP_OLE: CC = 2; break;
198 case CmpInst::FCMP_UNO: CC = 3; break;
199 case CmpInst::FCMP_UNE: CC = 4; break;
200 case CmpInst::FCMP_ULE: NeedSwap = true; [[fallthrough]];
201 case CmpInst::FCMP_UGE: CC = 5; break;
202 case CmpInst::FCMP_ULT: NeedSwap = true; [[fallthrough]];
203 case CmpInst::FCMP_UGT: CC = 6; break;
204 case CmpInst::FCMP_ORD: CC = 7; break;
205 case CmpInst::FCMP_UEQ: CC = 8; break;
206 case CmpInst::FCMP_ONE: CC = 12; break;
207 }
208
209 return std::make_pair(CC, NeedSwap);
210}
211
212/// Adds a complex addressing mode to the given machine instr builder.
213/// Note, this will constrain the index register. If its not possible to
214/// constrain the given index register, then a new one will be created. The
215/// IndexReg field of the addressing mode will be updated to match in this case.
217X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
218 X86AddressMode &AM) {
219 // First constrain the index register. It needs to be a GR64_NOSP.
221 MIB->getNumOperands() +
223 return ::addFullAddress(MIB, AM);
224}
225
226/// Check if it is possible to fold the condition from the XALU intrinsic
227/// into the user. The condition code will only be updated on success.
228bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
229 const Value *Cond) {
231 return false;
232
233 const auto *EV = cast<ExtractValueInst>(Cond);
234 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
235 return false;
236
237 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
238 MVT RetVT;
239 const Function *Callee = II->getCalledFunction();
240 Type *RetTy =
241 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
242 if (!isTypeLegal(RetTy, RetVT))
243 return false;
244
245 if (RetVT != MVT::i32 && RetVT != MVT::i64)
246 return false;
247
248 X86::CondCode TmpCC;
249 switch (II->getIntrinsicID()) {
250 default: return false;
251 case Intrinsic::sadd_with_overflow:
252 case Intrinsic::ssub_with_overflow:
253 case Intrinsic::smul_with_overflow:
254 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
255 case Intrinsic::uadd_with_overflow:
256 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
257 }
258
259 // Check if both instructions are in the same basic block.
260 if (II->getParent() != I->getParent())
261 return false;
262
263 // Make sure nothing is in the way
266 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
267 // We only expect extractvalue instructions between the intrinsic and the
268 // instruction to be selected.
269 if (!isa<ExtractValueInst>(Itr))
270 return false;
271
272 // Check that the extractvalue operand comes from the intrinsic.
273 const auto *EVI = cast<ExtractValueInst>(Itr);
274 if (EVI->getAggregateOperand() != II)
275 return false;
276 }
277
278 // Make sure no potentially eflags clobbering phi moves can be inserted in
279 // between.
280 auto HasPhis = [](const BasicBlock *Succ) { return !Succ->phis().empty(); };
281 if (I->isTerminator() && llvm::any_of(successors(I), HasPhis))
282 return false;
283
284 // Make sure there are no potentially eflags clobbering constant
285 // materializations in between.
286 if (llvm::any_of(I->operands(), [](Value *V) { return isa<Constant>(V); }))
287 return false;
288
289 CC = TmpCC;
290 return true;
291}
292
293bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
294 EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
295 if (evt == MVT::Other || !evt.isSimple())
296 // Unhandled type. Halt "fast" selection and bail.
297 return false;
298
299 VT = evt.getSimpleVT();
300 // For now, require SSE/SSE2 for performing floating-point operations,
301 // since x87 requires additional work.
302 if (VT == MVT::f64 && !Subtarget->hasSSE2())
303 return false;
304 if (VT == MVT::f32 && !Subtarget->hasSSE1())
305 return false;
306 // Similarly, no f80 support yet.
307 if (VT == MVT::f80)
308 return false;
309 // We only handle legal types. For example, on x86-32 the instruction
310 // selector contains all of the 64-bit instructions from x86-64,
311 // under the assumption that i64 won't be used if the target doesn't
312 // support it.
313 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
314}
315
316/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
317/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
318/// Return true and the result register by reference if it is possible.
319bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
320 MachineMemOperand *MMO, Register &ResultReg,
321 unsigned Alignment) {
322 bool HasSSE1 = Subtarget->hasSSE1();
323 bool HasSSE2 = Subtarget->hasSSE2();
324 bool HasSSE41 = Subtarget->hasSSE41();
325 bool HasAVX = Subtarget->hasAVX();
326 bool HasAVX2 = Subtarget->hasAVX2();
327 bool HasAVX512 = Subtarget->hasAVX512();
328 bool HasVLX = Subtarget->hasVLX();
329 bool IsNonTemporal = MMO && MMO->isNonTemporal();
330
331 // Treat i1 loads the same as i8 loads. Masking will be done when storing.
332 if (VT == MVT::i1)
333 VT = MVT::i8;
334
335 // Get opcode and regclass of the output for the given load instruction.
336 unsigned Opc = 0;
337 switch (VT.SimpleTy) {
338 default: return false;
339 case MVT::i8:
340 Opc = X86::MOV8rm;
341 break;
342 case MVT::i16:
343 Opc = X86::MOV16rm;
344 break;
345 case MVT::i32:
346 Opc = X86::MOV32rm;
347 break;
348 case MVT::i64:
349 // Must be in x86-64 mode.
350 Opc = X86::MOV64rm;
351 break;
352 case MVT::f32:
353 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
354 : HasAVX ? X86::VMOVSSrm_alt
355 : HasSSE1 ? X86::MOVSSrm_alt
356 : X86::LD_Fp32m;
357 break;
358 case MVT::f64:
359 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
360 : HasAVX ? X86::VMOVSDrm_alt
361 : HasSSE2 ? X86::MOVSDrm_alt
362 : X86::LD_Fp64m;
363 break;
364 case MVT::f80:
365 // No f80 support yet.
366 return false;
367 case MVT::v4f32:
368 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
369 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
370 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
371 else if (Alignment >= 16)
372 Opc = HasVLX ? X86::VMOVAPSZ128rm :
373 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
374 else
375 Opc = HasVLX ? X86::VMOVUPSZ128rm :
376 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
377 break;
378 case MVT::v2f64:
379 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
380 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
381 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
382 else if (Alignment >= 16)
383 Opc = HasVLX ? X86::VMOVAPDZ128rm :
384 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
385 else
386 Opc = HasVLX ? X86::VMOVUPDZ128rm :
387 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
388 break;
389 case MVT::v4i32:
390 case MVT::v2i64:
391 case MVT::v8i16:
392 case MVT::v16i8:
393 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
394 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
395 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
396 else if (Alignment >= 16)
397 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
398 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
399 else
400 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
401 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
402 break;
403 case MVT::v8f32:
404 assert(HasAVX);
405 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
406 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
407 else if (IsNonTemporal && Alignment >= 16)
408 return false; // Force split for X86::VMOVNTDQArm
409 else if (Alignment >= 32)
410 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
411 else
412 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
413 break;
414 case MVT::v4f64:
415 assert(HasAVX);
416 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
417 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
418 else if (IsNonTemporal && Alignment >= 16)
419 return false; // Force split for X86::VMOVNTDQArm
420 else if (Alignment >= 32)
421 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
422 else
423 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
424 break;
425 case MVT::v8i32:
426 case MVT::v4i64:
427 case MVT::v16i16:
428 case MVT::v32i8:
429 assert(HasAVX);
430 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
431 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
432 else if (IsNonTemporal && Alignment >= 16)
433 return false; // Force split for X86::VMOVNTDQArm
434 else if (Alignment >= 32)
435 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
436 else
437 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
438 break;
439 case MVT::v16f32:
440 assert(HasAVX512);
441 if (IsNonTemporal && Alignment >= 64)
442 Opc = X86::VMOVNTDQAZrm;
443 else
444 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
445 break;
446 case MVT::v8f64:
447 assert(HasAVX512);
448 if (IsNonTemporal && Alignment >= 64)
449 Opc = X86::VMOVNTDQAZrm;
450 else
451 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
452 break;
453 case MVT::v8i64:
454 case MVT::v16i32:
455 case MVT::v32i16:
456 case MVT::v64i8:
457 assert(HasAVX512);
458 // Note: There are a lot more choices based on type with AVX-512, but
459 // there's really no advantage when the load isn't masked.
460 if (IsNonTemporal && Alignment >= 64)
461 Opc = X86::VMOVNTDQAZrm;
462 else
463 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
464 break;
465 }
466
467 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
468
469 ResultReg = createResultReg(RC);
470 MachineInstrBuilder MIB =
471 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
472 addFullAddress(MIB, AM);
473 if (MMO)
474 MIB->addMemOperand(*FuncInfo.MF, MMO);
475 return true;
476}
477
478/// X86FastEmitStore - Emit a machine instruction to store a value Val of
479/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
480/// and a displacement offset, or a GlobalAddress,
481/// i.e. V. Return true if it is possible.
482bool X86FastISel::X86FastEmitStore(EVT VT, Register ValReg, X86AddressMode &AM,
483 MachineMemOperand *MMO, bool Aligned) {
484 bool HasSSE1 = Subtarget->hasSSE1();
485 bool HasSSE2 = Subtarget->hasSSE2();
486 bool HasSSE4A = Subtarget->hasSSE4A();
487 bool HasAVX = Subtarget->hasAVX();
488 bool HasAVX512 = Subtarget->hasAVX512();
489 bool HasVLX = Subtarget->hasVLX();
490 bool IsNonTemporal = MMO && MMO->isNonTemporal();
491
492 // Get opcode and regclass of the output for the given store instruction.
493 unsigned Opc = 0;
494 switch (VT.getSimpleVT().SimpleTy) {
495 case MVT::f80: // No f80 support yet.
496 default: return false;
497 case MVT::i1: {
498 // Mask out all but lowest bit.
499 Register AndResult = createResultReg(&X86::GR8RegClass);
500 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
501 TII.get(X86::AND8ri), AndResult)
502 .addReg(ValReg).addImm(1);
503 ValReg = AndResult;
504 [[fallthrough]]; // handle i1 as i8.
505 }
506 case MVT::i8: Opc = X86::MOV8mr; break;
507 case MVT::i16: Opc = X86::MOV16mr; break;
508 case MVT::i32:
509 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
510 break;
511 case MVT::i64:
512 // Must be in x86-64 mode.
513 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
514 break;
515 case MVT::f32:
516 if (HasSSE1) {
517 if (IsNonTemporal && HasSSE4A)
518 Opc = X86::MOVNTSS;
519 else
520 Opc = HasAVX512 ? X86::VMOVSSZmr :
521 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
522 } else
523 Opc = X86::ST_Fp32m;
524 break;
525 case MVT::f64:
526 if (HasSSE2) {
527 if (IsNonTemporal && HasSSE4A)
528 Opc = X86::MOVNTSD;
529 else
530 Opc = HasAVX512 ? X86::VMOVSDZmr :
531 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
532 } else
533 Opc = X86::ST_Fp64m;
534 break;
535 case MVT::x86mmx:
536 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
537 break;
538 case MVT::v4f32:
539 if (Aligned) {
540 if (IsNonTemporal)
541 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
542 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
543 else
544 Opc = HasVLX ? X86::VMOVAPSZ128mr :
545 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
546 } else
547 Opc = HasVLX ? X86::VMOVUPSZ128mr :
548 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
549 break;
550 case MVT::v2f64:
551 if (Aligned) {
552 if (IsNonTemporal)
553 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
554 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
555 else
556 Opc = HasVLX ? X86::VMOVAPDZ128mr :
557 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
558 } else
559 Opc = HasVLX ? X86::VMOVUPDZ128mr :
560 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
561 break;
562 case MVT::v4i32:
563 case MVT::v2i64:
564 case MVT::v8i16:
565 case MVT::v16i8:
566 if (Aligned) {
567 if (IsNonTemporal)
568 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
569 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
570 else
571 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
572 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
573 } else
574 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
575 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
576 break;
577 case MVT::v8f32:
578 assert(HasAVX);
579 if (Aligned) {
580 if (IsNonTemporal)
581 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
582 else
583 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
584 } else
585 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
586 break;
587 case MVT::v4f64:
588 assert(HasAVX);
589 if (Aligned) {
590 if (IsNonTemporal)
591 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
592 else
593 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
594 } else
595 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
596 break;
597 case MVT::v8i32:
598 case MVT::v4i64:
599 case MVT::v16i16:
600 case MVT::v32i8:
601 assert(HasAVX);
602 if (Aligned) {
603 if (IsNonTemporal)
604 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
605 else
606 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
607 } else
608 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
609 break;
610 case MVT::v16f32:
611 assert(HasAVX512);
612 if (Aligned)
613 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
614 else
615 Opc = X86::VMOVUPSZmr;
616 break;
617 case MVT::v8f64:
618 assert(HasAVX512);
619 if (Aligned) {
620 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
621 } else
622 Opc = X86::VMOVUPDZmr;
623 break;
624 case MVT::v8i64:
625 case MVT::v16i32:
626 case MVT::v32i16:
627 case MVT::v64i8:
628 assert(HasAVX512);
629 // Note: There are a lot more choices based on type with AVX-512, but
630 // there's really no advantage when the store isn't masked.
631 if (Aligned)
632 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
633 else
634 Opc = X86::VMOVDQU64Zmr;
635 break;
636 }
637
638 const MCInstrDesc &Desc = TII.get(Opc);
639 // Some of the instructions in the previous switch use FR128 instead
640 // of FR32 for ValReg. Make sure the register we feed the instruction
641 // matches its register class constraints.
642 // Note: This is fine to do a copy from FR32 to FR128, this is the
643 // same registers behind the scene and actually why it did not trigger
644 // any bugs before.
645 ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
646 MachineInstrBuilder MIB =
647 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, Desc);
648 addFullAddress(MIB, AM).addReg(ValReg);
649 if (MMO)
650 MIB->addMemOperand(*FuncInfo.MF, MMO);
651
652 return true;
653}
654
655bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
656 X86AddressMode &AM,
657 MachineMemOperand *MMO, bool Aligned) {
658 // Handle 'null' like i32/i64 0.
660 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
661
662 // If this is a store of a simple constant, fold the constant into the store.
663 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
664 unsigned Opc = 0;
665 bool Signed = true;
666 switch (VT.getSimpleVT().SimpleTy) {
667 default: break;
668 case MVT::i1:
669 Signed = false;
670 [[fallthrough]]; // Handle as i8.
671 case MVT::i8: Opc = X86::MOV8mi; break;
672 case MVT::i16: Opc = X86::MOV16mi; break;
673 case MVT::i32: Opc = X86::MOV32mi; break;
674 case MVT::i64:
675 // Must be a 32-bit sign extended value.
676 if (isInt<32>(CI->getSExtValue()))
677 Opc = X86::MOV64mi32;
678 break;
679 }
680
681 if (Opc) {
682 MachineInstrBuilder MIB =
683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
684 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
685 : CI->getZExtValue());
686 if (MMO)
687 MIB->addMemOperand(*FuncInfo.MF, MMO);
688 return true;
689 }
690 }
691
692 Register ValReg = getRegForValue(Val);
693 if (!ValReg)
694 return false;
695
696 return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
697}
698
699/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
700/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
701/// ISD::SIGN_EXTEND).
702bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, Register Src,
703 EVT SrcVT, Register &ResultReg) {
704 Register RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
705 if (!RR)
706 return false;
707
708 ResultReg = RR;
709 return true;
710}
711
712bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
713 // Handle constant address.
714 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
715 // Can't handle alternate code models yet.
716 if (TM.getCodeModel() != CodeModel::Small &&
717 TM.getCodeModel() != CodeModel::Medium)
718 return false;
719
720 // Can't handle large objects yet.
721 if (TM.isLargeGlobalValue(GV))
722 return false;
723
724 // Can't handle TLS yet.
725 if (GV->isThreadLocal())
726 return false;
727
728 // Can't handle !absolute_symbol references yet.
729 if (GV->isAbsoluteSymbolRef())
730 return false;
731
732 // RIP-relative addresses can't have additional register operands, so if
733 // we've already folded stuff into the addressing mode, just force the
734 // global value into its own register, which we can use as the basereg.
735 if (!Subtarget->isPICStyleRIPRel() ||
736 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
737 // Okay, we've committed to selecting this global. Set up the address.
738 AM.GV = GV;
739
740 // Allow the subtarget to classify the global.
741 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
742
743 // If this reference is relative to the pic base, set it now.
744 if (isGlobalRelativeToPICBase(GVFlags)) {
745 // FIXME: How do we know Base.Reg is free??
746 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
747 }
748
749 // Unless the ABI requires an extra load, return a direct reference to
750 // the global.
751 if (!isGlobalStubReference(GVFlags)) {
752 if (Subtarget->isPICStyleRIPRel()) {
753 // Use rip-relative addressing if we can. Above we verified that the
754 // base and index registers are unused.
755 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
756 AM.Base.Reg = X86::RIP;
757 }
758 AM.GVOpFlags = GVFlags;
759 return true;
760 }
761
762 // Ok, we need to do a load from a stub. If we've already loaded from
763 // this stub, reuse the loaded pointer, otherwise emit the load now.
764 DenseMap<const Value *, Register>::iterator I = LocalValueMap.find(V);
765 Register LoadReg;
766 if (I != LocalValueMap.end() && I->second) {
767 LoadReg = I->second;
768 } else {
769 // Issue load from stub.
770 unsigned Opc = 0;
771 const TargetRegisterClass *RC = nullptr;
772 X86AddressMode StubAM;
773 StubAM.Base.Reg = AM.Base.Reg;
774 StubAM.GV = GV;
775 StubAM.GVOpFlags = GVFlags;
776
777 // Prepare for inserting code in the local-value area.
778 SavePoint SaveInsertPt = enterLocalValueArea();
779
780 if (TLI.getPointerTy(DL) == MVT::i64) {
781 Opc = X86::MOV64rm;
782 RC = &X86::GR64RegClass;
783 } else {
784 Opc = X86::MOV32rm;
785 RC = &X86::GR32RegClass;
786 }
787
788 if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL ||
790 StubAM.Base.Reg = X86::RIP;
791
792 LoadReg = createResultReg(RC);
793 MachineInstrBuilder LoadMI =
794 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), LoadReg);
795 addFullAddress(LoadMI, StubAM);
796
797 // Ok, back to normal mode.
798 leaveLocalValueArea(SaveInsertPt);
799
800 // Prevent loading GV stub multiple times in same MBB.
801 LocalValueMap[V] = LoadReg;
802 }
803
804 // Now construct the final address. Note that the Disp, Scale,
805 // and Index values may already be set here.
806 AM.Base.Reg = LoadReg;
807 AM.GV = nullptr;
808 return true;
809 }
810 }
811
812 // If all else fails, try to materialize the value in a register.
813 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
814 if (AM.Base.Reg == 0) {
815 AM.Base.Reg = getRegForValue(V);
816 return AM.Base.Reg != 0;
817 }
818 if (AM.IndexReg == 0) {
819 assert(AM.Scale == 1 && "Scale with no index!");
820 AM.IndexReg = getRegForValue(V);
821 return AM.IndexReg != 0;
822 }
823 }
824
825 return false;
826}
827
828/// X86SelectAddress - Attempt to fill in an address from the given value.
829///
830bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
832redo_gep:
833 const User *U = nullptr;
834 unsigned Opcode = Instruction::UserOp1;
835 if (const Instruction *I = dyn_cast<Instruction>(V)) {
836 // Don't walk into other basic blocks; it's possible we haven't
837 // visited them yet, so the instructions may not yet be assigned
838 // virtual registers.
839 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
840 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
841 Opcode = I->getOpcode();
842 U = I;
843 }
844 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
845 Opcode = C->getOpcode();
846 U = C;
847 }
848
849 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
850 if (Ty->getAddressSpace() > 255)
851 // Fast instruction selection doesn't support the special
852 // address spaces.
853 return false;
854
855 switch (Opcode) {
856 default: break;
857 case Instruction::BitCast:
858 // Look past bitcasts.
859 return X86SelectAddress(U->getOperand(0), AM);
860
861 case Instruction::IntToPtr:
862 // Look past no-op inttoptrs.
863 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
864 TLI.getPointerTy(DL))
865 return X86SelectAddress(U->getOperand(0), AM);
866 break;
867
868 case Instruction::PtrToInt:
869 // Look past no-op ptrtoints.
870 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
871 return X86SelectAddress(U->getOperand(0), AM);
872 break;
873
874 case Instruction::Alloca: {
875 // Do static allocas.
876 const AllocaInst *A = cast<AllocaInst>(V);
877 DenseMap<const AllocaInst *, int>::iterator SI =
878 FuncInfo.StaticAllocaMap.find(A);
879 if (SI != FuncInfo.StaticAllocaMap.end()) {
881 AM.Base.FrameIndex = SI->second;
882 return true;
883 }
884 break;
885 }
886
887 case Instruction::Add: {
888 // Adds of constants are common and easy enough.
889 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
890 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
891 // They have to fit in the 32-bit signed displacement field though.
892 if (isInt<32>(Disp)) {
893 AM.Disp = (uint32_t)Disp;
894 return X86SelectAddress(U->getOperand(0), AM);
895 }
896 }
897 break;
898 }
899
900 case Instruction::GetElementPtr: {
901 X86AddressMode SavedAM = AM;
902
903 // Pattern-match simple GEPs.
904 uint64_t Disp = (int32_t)AM.Disp;
905 Register IndexReg = AM.IndexReg;
906 unsigned Scale = AM.Scale;
907 MVT PtrVT = TLI.getValueType(DL, U->getType()).getSimpleVT();
908
910 // Iterate through the indices, folding what we can. Constants can be
911 // folded, and one dynamic index can be handled, if the scale is supported.
912 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
913 i != e; ++i, ++GTI) {
914 const Value *Op = *i;
915 if (StructType *STy = GTI.getStructTypeOrNull()) {
916 const StructLayout *SL = DL.getStructLayout(STy);
917 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
918 continue;
919 }
920
921 // A array/variable index is always of the form i*S where S is the
922 // constant scale size. See if we can push the scale into immediates.
923 uint64_t S = GTI.getSequentialElementStride(DL);
924 for (;;) {
925 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
926 // Constant-offset addressing.
927 Disp += CI->getSExtValue() * S;
928 break;
929 }
930 if (canFoldAddIntoGEP(U, Op)) {
931 // A compatible add with a constant operand. Fold the constant.
932 ConstantInt *CI =
933 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
934 Disp += CI->getSExtValue() * S;
935 // Iterate on the other operand.
936 Op = cast<AddOperator>(Op)->getOperand(0);
937 continue;
938 }
939 if (!IndexReg && (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
940 (S == 1 || S == 2 || S == 4 || S == 8)) {
941 // Scaled-index addressing.
942 Scale = S;
943 IndexReg = getRegForGEPIndex(PtrVT, Op);
944 if (!IndexReg)
945 return false;
946 break;
947 }
948 // Unsupported.
949 goto unsupported_gep;
950 }
951 }
952
953 // Check for displacement overflow.
954 if (!isInt<32>(Disp))
955 break;
956
957 AM.IndexReg = IndexReg;
958 AM.Scale = Scale;
959 AM.Disp = (uint32_t)Disp;
960 GEPs.push_back(V);
961
962 if (const GetElementPtrInst *GEP =
963 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
964 // Ok, the GEP indices were covered by constant-offset and scaled-index
965 // addressing. Update the address state and move on to examining the base.
966 V = GEP;
967 goto redo_gep;
968 } else if (X86SelectAddress(U->getOperand(0), AM)) {
969 return true;
970 }
971
972 // If we couldn't merge the gep value into this addr mode, revert back to
973 // our address and just match the value instead of completely failing.
974 AM = SavedAM;
975
976 for (const Value *I : reverse(GEPs))
977 if (handleConstantAddresses(I, AM))
978 return true;
979
980 return false;
981 unsupported_gep:
982 // Ok, the GEP indices weren't all covered.
983 break;
984 }
985 }
986
987 return handleConstantAddresses(V, AM);
988}
989
990/// X86SelectCallAddress - Attempt to fill in an address from the given value.
991///
992bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
993 const User *U = nullptr;
994 unsigned Opcode = Instruction::UserOp1;
996 // Record if the value is defined in the same basic block.
997 //
998 // This information is crucial to know whether or not folding an
999 // operand is valid.
1000 // Indeed, FastISel generates or reuses a virtual register for all
1001 // operands of all instructions it selects. Obviously, the definition and
1002 // its uses must use the same virtual register otherwise the produced
1003 // code is incorrect.
1004 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
1005 // registers for values that are alive across basic blocks. This ensures
1006 // that the values are consistently set between across basic block, even
1007 // if different instruction selection mechanisms are used (e.g., a mix of
1008 // SDISel and FastISel).
1009 // For values local to a basic block, the instruction selection process
1010 // generates these virtual registers with whatever method is appropriate
1011 // for its needs. In particular, FastISel and SDISel do not share the way
1012 // local virtual registers are set.
1013 // Therefore, this is impossible (or at least unsafe) to share values
1014 // between basic blocks unless they use the same instruction selection
1015 // method, which is not guarantee for X86.
1016 // Moreover, things like hasOneUse could not be used accurately, if we
1017 // allow to reference values across basic blocks whereas they are not
1018 // alive across basic blocks initially.
1019 bool InMBB = true;
1020 if (I) {
1021 Opcode = I->getOpcode();
1022 U = I;
1023 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
1024 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
1025 Opcode = C->getOpcode();
1026 U = C;
1027 }
1028
1029 switch (Opcode) {
1030 default: break;
1031 case Instruction::BitCast:
1032 // Look past bitcasts if its operand is in the same BB.
1033 if (InMBB)
1034 return X86SelectCallAddress(U->getOperand(0), AM);
1035 break;
1036
1037 case Instruction::IntToPtr:
1038 // Look past no-op inttoptrs if its operand is in the same BB.
1039 if (InMBB &&
1040 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
1041 TLI.getPointerTy(DL))
1042 return X86SelectCallAddress(U->getOperand(0), AM);
1043 break;
1044
1045 case Instruction::PtrToInt:
1046 // Look past no-op ptrtoints if its operand is in the same BB.
1047 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
1048 return X86SelectCallAddress(U->getOperand(0), AM);
1049 break;
1050 }
1051
1052 // Handle constant address.
1053 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1054 // Can't handle alternate code models yet.
1055 if (TM.getCodeModel() != CodeModel::Small &&
1056 TM.getCodeModel() != CodeModel::Medium)
1057 return false;
1058
1059 // RIP-relative addresses can't have additional register operands.
1060 if (Subtarget->isPICStyleRIPRel() &&
1061 (AM.Base.Reg != 0 || AM.IndexReg != 0))
1062 return false;
1063
1064 // Can't handle TLS.
1065 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
1066 if (GVar->isThreadLocal())
1067 return false;
1068
1069 // Okay, we've committed to selecting this global. Set up the basic address.
1070 AM.GV = GV;
1071
1072 // Return a direct reference to the global. Fastisel can handle calls to
1073 // functions that require loads, such as dllimport and nonlazybind
1074 // functions.
1075 if (Subtarget->isPICStyleRIPRel()) {
1076 // Use rip-relative addressing if we can. Above we verified that the
1077 // base and index registers are unused.
1078 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
1079 AM.Base.Reg = X86::RIP;
1080 } else {
1081 AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
1082 }
1083
1084 return true;
1085 }
1086
1087 // If all else fails, try to materialize the value in a register.
1088 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
1089 auto GetCallRegForValue = [this](const Value *V) {
1090 Register Reg = getRegForValue(V);
1091
1092 // In 64-bit mode, we need a 64-bit register even if pointers are 32 bits.
1093 if (Reg && Subtarget->isTarget64BitILP32()) {
1094 Register CopyReg = createResultReg(&X86::GR32RegClass);
1095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr),
1096 CopyReg)
1097 .addReg(Reg);
1098
1099 Register ExtReg = createResultReg(&X86::GR64RegClass);
1100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1101 TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg)
1102 .addReg(CopyReg)
1103 .addImm(X86::sub_32bit);
1104 Reg = ExtReg;
1105 }
1106
1107 return Reg;
1108 };
1109
1110 if (AM.Base.Reg == 0) {
1111 AM.Base.Reg = GetCallRegForValue(V);
1112 return AM.Base.Reg != 0;
1113 }
1114 if (AM.IndexReg == 0) {
1115 assert(AM.Scale == 1 && "Scale with no index!");
1116 AM.IndexReg = GetCallRegForValue(V);
1117 return AM.IndexReg != 0;
1118 }
1119 }
1120
1121 return false;
1122}
1123
1124
1125/// X86SelectStore - Select and emit code to implement store instructions.
1126bool X86FastISel::X86SelectStore(const Instruction *I) {
1127 // Atomic stores need special handling.
1128 const StoreInst *S = cast<StoreInst>(I);
1129
1130 if (S->isAtomic())
1131 return false;
1132
1133 const Value *PtrV = I->getOperand(1);
1134 if (TLI.supportSwiftError()) {
1135 // Swifterror values can come from either a function parameter with
1136 // swifterror attribute or an alloca with swifterror attribute.
1137 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1138 if (Arg->hasSwiftErrorAttr())
1139 return false;
1140 }
1141
1142 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1143 if (Alloca->isSwiftError())
1144 return false;
1145 }
1146 }
1147
1148 const Value *Val = S->getValueOperand();
1149 const Value *Ptr = S->getPointerOperand();
1150
1151 MVT VT;
1152 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
1153 return false;
1154
1155 Align Alignment = S->getAlign();
1156 Align ABIAlignment = DL.getABITypeAlign(Val->getType());
1157 bool Aligned = Alignment >= ABIAlignment;
1158
1159 X86AddressMode AM;
1160 if (!X86SelectAddress(Ptr, AM))
1161 return false;
1162
1163 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1164}
1165
1166/// X86SelectRet - Select and emit code to implement ret instructions.
1167bool X86FastISel::X86SelectRet(const Instruction *I) {
1168 const ReturnInst *Ret = cast<ReturnInst>(I);
1169 const Function &F = *I->getParent()->getParent();
1170 const X86MachineFunctionInfo *X86MFInfo =
1171 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
1172
1173 if (!FuncInfo.CanLowerReturn)
1174 return false;
1175
1176 if (TLI.supportSwiftError() &&
1177 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1178 return false;
1179
1180 if (TLI.supportSplitCSR(FuncInfo.MF))
1181 return false;
1182
1183 CallingConv::ID CC = F.getCallingConv();
1184 if (CC != CallingConv::C &&
1185 CC != CallingConv::Fast &&
1186 CC != CallingConv::Tail &&
1187 CC != CallingConv::SwiftTail &&
1188 CC != CallingConv::X86_FastCall &&
1189 CC != CallingConv::X86_StdCall &&
1190 CC != CallingConv::X86_ThisCall &&
1191 CC != CallingConv::X86_64_SysV &&
1192 CC != CallingConv::Win64)
1193 return false;
1194
1195 // Don't handle popping bytes if they don't fit the ret's immediate.
1196 if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
1197 return false;
1198
1199 // fastcc with -tailcallopt is intended to provide a guaranteed
1200 // tail call optimization. Fastisel doesn't know how to do that.
1201 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
1202 CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
1203 return false;
1204
1205 // Let SDISel handle vararg functions.
1206 if (F.isVarArg())
1207 return false;
1208
1209 // Build a list of return value registers.
1211
1212 if (Ret->getNumOperands() > 0) {
1214 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1215
1216 // Analyze operands of the call, assigning locations to each operand.
1218 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
1219 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1220
1221 const Value *RV = Ret->getOperand(0);
1222 Register Reg = getRegForValue(RV);
1223 if (!Reg)
1224 return false;
1225
1226 // Only handle a single return value for now.
1227 if (ValLocs.size() != 1)
1228 return false;
1229
1230 CCValAssign &VA = ValLocs[0];
1231
1232 // Don't bother handling odd stuff for now.
1233 if (VA.getLocInfo() != CCValAssign::Full)
1234 return false;
1235 // Only handle register returns for now.
1236 if (!VA.isRegLoc())
1237 return false;
1238
1239 // The calling-convention tables for x87 returns don't tell
1240 // the whole story.
1241 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
1242 return false;
1243
1244 Register SrcReg = Reg + VA.getValNo();
1245 EVT SrcVT = TLI.getValueType(DL, RV->getType());
1246 EVT DstVT = VA.getValVT();
1247 // Special handling for extended integers.
1248 if (SrcVT != DstVT) {
1249 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1250 return false;
1251
1252 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1253 return false;
1254
1255 if (SrcVT == MVT::i1) {
1256 if (Outs[0].Flags.isSExt())
1257 return false;
1258 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
1259 SrcVT = MVT::i8;
1260 }
1261 if (SrcVT != DstVT) {
1262 unsigned Op =
1263 Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
1264 SrcReg =
1265 fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
1266 }
1267 }
1268
1269 // Make the copy.
1270 Register DstReg = VA.getLocReg();
1271 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1272 // Avoid a cross-class copy. This is very unlikely.
1273 if (!SrcRC->contains(DstReg))
1274 return false;
1275 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1276 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1277
1278 // Add register to return instruction.
1279 RetRegs.push_back(VA.getLocReg());
1280 }
1281
1282 // Swift calling convention does not require we copy the sret argument
1283 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
1284
1285 // All x86 ABIs require that for returning structs by value we copy
1286 // the sret argument into %rax/%eax (depending on ABI) for the return.
1287 // We saved the argument into a virtual register in the entry block,
1288 // so now we copy the value out and into %rax/%eax.
1289 if (F.hasStructRetAttr() && CC != CallingConv::Swift &&
1290 CC != CallingConv::SwiftTail) {
1291 Register Reg = X86MFInfo->getSRetReturnReg();
1292 assert(Reg &&
1293 "SRetReturnReg should have been set in LowerFormalArguments()!");
1294 Register RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1296 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1297 RetRegs.push_back(RetReg);
1298 }
1299
1300 // Now emit the RET.
1301 MachineInstrBuilder MIB;
1302 if (X86MFInfo->getBytesToPopOnReturn()) {
1303 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1304 TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32))
1305 .addImm(X86MFInfo->getBytesToPopOnReturn());
1306 } else {
1307 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1308 TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32));
1309 }
1310 for (Register Reg : RetRegs)
1311 MIB.addReg(Reg, RegState::Implicit);
1312 return true;
1313}
1314
1315/// X86SelectLoad - Select and emit code to implement load instructions.
1316///
1317bool X86FastISel::X86SelectLoad(const Instruction *I) {
1318 const LoadInst *LI = cast<LoadInst>(I);
1319
1320 // Atomic loads need special handling.
1321 if (LI->isAtomic())
1322 return false;
1323
1324 const Value *SV = I->getOperand(0);
1325 if (TLI.supportSwiftError()) {
1326 // Swifterror values can come from either a function parameter with
1327 // swifterror attribute or an alloca with swifterror attribute.
1328 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1329 if (Arg->hasSwiftErrorAttr())
1330 return false;
1331 }
1332
1333 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1334 if (Alloca->isSwiftError())
1335 return false;
1336 }
1337 }
1338
1339 MVT VT;
1340 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1341 return false;
1342
1343 const Value *Ptr = LI->getPointerOperand();
1344
1345 X86AddressMode AM;
1346 if (!X86SelectAddress(Ptr, AM))
1347 return false;
1348
1349 Register ResultReg;
1350 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1351 LI->getAlign().value()))
1352 return false;
1353
1354 updateValueMap(I, ResultReg);
1355 return true;
1356}
1357
1358static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1359 bool HasAVX512 = Subtarget->hasAVX512();
1360 bool HasAVX = Subtarget->hasAVX();
1361 bool HasSSE1 = Subtarget->hasSSE1();
1362 bool HasSSE2 = Subtarget->hasSSE2();
1363
1364 switch (VT.getSimpleVT().SimpleTy) {
1365 default: return 0;
1366 case MVT::i8: return X86::CMP8rr;
1367 case MVT::i16: return X86::CMP16rr;
1368 case MVT::i32: return X86::CMP32rr;
1369 case MVT::i64: return X86::CMP64rr;
1370 case MVT::f32:
1371 return HasAVX512 ? X86::VUCOMISSZrr
1372 : HasAVX ? X86::VUCOMISSrr
1373 : HasSSE1 ? X86::UCOMISSrr
1374 : 0;
1375 case MVT::f64:
1376 return HasAVX512 ? X86::VUCOMISDZrr
1377 : HasAVX ? X86::VUCOMISDrr
1378 : HasSSE2 ? X86::UCOMISDrr
1379 : 0;
1380 }
1381}
1382
1383/// If we have a comparison with RHS as the RHS of the comparison, return an
1384/// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
1385static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
1386 switch (VT.getSimpleVT().SimpleTy) {
1387 // Otherwise, we can't fold the immediate into this comparison.
1388 default:
1389 return 0;
1390 case MVT::i8:
1391 return X86::CMP8ri;
1392 case MVT::i16:
1393 return X86::CMP16ri;
1394 case MVT::i32:
1395 return X86::CMP32ri;
1396 case MVT::i64:
1397 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1398 // field.
1399 return isInt<32>(RHSC->getSExtValue()) ? X86::CMP64ri32 : 0;
1400 }
1401}
1402
1403bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
1404 const DebugLoc &CurMIMD) {
1405 Register Op0Reg = getRegForValue(Op0);
1406 if (!Op0Reg)
1407 return false;
1408
1409 // Handle 'null' like i32/i64 0.
1410 if (isa<ConstantPointerNull>(Op1))
1411 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1412
1413 // We have two options: compare with register or immediate. If the RHS of
1414 // the compare is an immediate that we can fold into this compare, use
1415 // CMPri, otherwise use CMPrr.
1416 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1417 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1418 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareImmOpc))
1419 .addReg(Op0Reg)
1420 .addImm(Op1C->getSExtValue());
1421 return true;
1422 }
1423 }
1424
1425 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1426 if (CompareOpc == 0) return false;
1427
1428 Register Op1Reg = getRegForValue(Op1);
1429 if (!Op1Reg)
1430 return false;
1431 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc))
1432 .addReg(Op0Reg)
1433 .addReg(Op1Reg);
1434
1435 return true;
1436}
1437
1438#define GET_SETCC \
1439 ((!Subtarget->hasZU() || Subtarget->preferLegacySetCC()) ? X86::SETCCr \
1440 : X86::SETZUCCr)
1441
1442bool X86FastISel::X86SelectCmp(const Instruction *I) {
1443 const CmpInst *CI = cast<CmpInst>(I);
1444
1445 MVT VT;
1446 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1447 return false;
1448
1449 // Below code only works for scalars.
1450 if (VT.isVector())
1451 return false;
1452
1453 // Try to optimize or fold the cmp.
1454 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1455 Register ResultReg;
1456 switch (Predicate) {
1457 default: break;
1458 case CmpInst::FCMP_FALSE: {
1459 ResultReg = createResultReg(&X86::GR32RegClass);
1460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0),
1461 ResultReg);
1462 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
1463 if (!ResultReg)
1464 return false;
1465 break;
1466 }
1467 case CmpInst::FCMP_TRUE: {
1468 ResultReg = createResultReg(&X86::GR8RegClass);
1469 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
1470 ResultReg).addImm(1);
1471 break;
1472 }
1473 }
1474
1475 if (ResultReg) {
1476 updateValueMap(I, ResultReg);
1477 return true;
1478 }
1479
1480 const Value *LHS = CI->getOperand(0);
1481 const Value *RHS = CI->getOperand(1);
1482
1483 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1484 // We don't have to materialize a zero constant for this case and can just use
1485 // %x again on the RHS.
1487 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1488 if (RHSC && RHSC->isNullValue())
1489 RHS = LHS;
1490 }
1491
1492 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1493 static const uint16_t SETFOpcTable[2][3] = {
1494 { X86::COND_E, X86::COND_NP, X86::AND8rr },
1495 { X86::COND_NE, X86::COND_P, X86::OR8rr }
1496 };
1497 const uint16_t *SETFOpc = nullptr;
1498 switch (Predicate) {
1499 default: break;
1500 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1501 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1502 }
1503
1504 ResultReg = createResultReg(&X86::GR8RegClass);
1505 if (SETFOpc) {
1506 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1507 return false;
1508
1509 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
1510 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
1511 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC),
1512 FlagReg1)
1513 .addImm(SETFOpc[0]);
1514 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC),
1515 FlagReg2)
1516 .addImm(SETFOpc[1]);
1517 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(SETFOpc[2]),
1518 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1519 updateValueMap(I, ResultReg);
1520 return true;
1521 }
1522
1523 X86::CondCode CC;
1524 bool SwapArgs;
1525 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1526 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1527
1528 if (SwapArgs)
1529 std::swap(LHS, RHS);
1530
1531 // Emit a compare of LHS/RHS.
1532 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1533 return false;
1534
1535 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC), ResultReg)
1536 .addImm(CC);
1537 updateValueMap(I, ResultReg);
1538 return true;
1539}
1540
1541bool X86FastISel::X86SelectZExt(const Instruction *I) {
1542 EVT DstVT = TLI.getValueType(DL, I->getType());
1543 if (!TLI.isTypeLegal(DstVT))
1544 return false;
1545
1546 Register ResultReg = getRegForValue(I->getOperand(0));
1547 if (!ResultReg)
1548 return false;
1549
1550 // Handle zero-extension from i1 to i8, which is common.
1551 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1552 if (SrcVT == MVT::i1) {
1553 // Set the high bits to zero.
1554 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1555 SrcVT = MVT::i8;
1556
1557 if (!ResultReg)
1558 return false;
1559 }
1560
1561 if (DstVT == MVT::i64) {
1562 // Handle extension to 64-bits via sub-register shenanigans.
1563 unsigned MovInst;
1564
1565 switch (SrcVT.SimpleTy) {
1566 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1567 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1568 case MVT::i32: MovInst = X86::MOV32rr; break;
1569 default: llvm_unreachable("Unexpected zext to i64 source type");
1570 }
1571
1572 Register Result32 = createResultReg(&X86::GR32RegClass);
1573 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovInst), Result32)
1574 .addReg(ResultReg);
1575
1576 ResultReg = createResultReg(&X86::GR64RegClass);
1577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1578 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
1579 .addReg(Result32)
1580 .addImm(X86::sub_32bit);
1581 } else if (DstVT == MVT::i16) {
1582 // i8->i16 doesn't exist in the autogenerated isel table. Need to zero
1583 // extend to 32-bits and then extract down to 16-bits.
1584 Register Result32 = createResultReg(&X86::GR32RegClass);
1585 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8),
1586 Result32).addReg(ResultReg);
1587
1588 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1589 } else if (DstVT != MVT::i8) {
1590 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1591 ResultReg);
1592 if (!ResultReg)
1593 return false;
1594 }
1595
1596 updateValueMap(I, ResultReg);
1597 return true;
1598}
1599
1600bool X86FastISel::X86SelectSExt(const Instruction *I) {
1601 EVT DstVT = TLI.getValueType(DL, I->getType());
1602 if (!TLI.isTypeLegal(DstVT))
1603 return false;
1604
1605 Register ResultReg = getRegForValue(I->getOperand(0));
1606 if (!ResultReg)
1607 return false;
1608
1609 // Handle sign-extension from i1 to i8.
1610 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1611 if (SrcVT == MVT::i1) {
1612 // Set the high bits to zero.
1613 Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1614 if (!ZExtReg)
1615 return false;
1616
1617 // Negate the result to make an 8-bit sign extended value.
1618 ResultReg = createResultReg(&X86::GR8RegClass);
1619 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r),
1620 ResultReg).addReg(ZExtReg);
1621
1622 SrcVT = MVT::i8;
1623 }
1624
1625 if (DstVT == MVT::i16) {
1626 // i8->i16 doesn't exist in the autogenerated isel table. Need to sign
1627 // extend to 32-bits and then extract down to 16-bits.
1628 Register Result32 = createResultReg(&X86::GR32RegClass);
1629 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8),
1630 Result32).addReg(ResultReg);
1631
1632 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1633 } else if (DstVT != MVT::i8) {
1634 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
1635 ResultReg);
1636 if (!ResultReg)
1637 return false;
1638 }
1639
1640 updateValueMap(I, ResultReg);
1641 return true;
1642}
1643
1644bool X86FastISel::X86SelectBranch(const Instruction *I) {
1645 // Unconditional branches are selected by tablegen-generated code.
1646 // Handle a conditional branch.
1647 const CondBrInst *BI = cast<CondBrInst>(I);
1648 MachineBasicBlock *TrueMBB = FuncInfo.getMBB(BI->getSuccessor(0));
1649 MachineBasicBlock *FalseMBB = FuncInfo.getMBB(BI->getSuccessor(1));
1650
1651 // Fold the common case of a conditional branch with a comparison
1652 // in the same block (values defined on other blocks may not have
1653 // initialized registers).
1654 X86::CondCode CC;
1655 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1656 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
1657 EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1658
1659 // Try to optimize or fold the cmp.
1660 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1661 switch (Predicate) {
1662 default: break;
1663 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, MIMD.getDL()); return true;
1664 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, MIMD.getDL()); return true;
1665 }
1666
1667 const Value *CmpLHS = CI->getOperand(0);
1668 const Value *CmpRHS = CI->getOperand(1);
1669
1670 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1671 // 0.0.
1672 // We don't have to materialize a zero constant for this case and can just
1673 // use %x again on the RHS.
1674 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1675 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1676 if (CmpRHSC && CmpRHSC->isNullValue())
1677 CmpRHS = CmpLHS;
1678 }
1679
1680 // Try to take advantage of fallthrough opportunities.
1681 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1682 std::swap(TrueMBB, FalseMBB);
1684 }
1685
1686 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1687 // code check. Instead two branch instructions are required to check all
1688 // the flags. First we change the predicate to a supported condition code,
1689 // which will be the first branch. Later one we will emit the second
1690 // branch.
1691 bool NeedExtraBranch = false;
1692 switch (Predicate) {
1693 default: break;
1694 case CmpInst::FCMP_OEQ:
1695 std::swap(TrueMBB, FalseMBB);
1696 [[fallthrough]];
1697 case CmpInst::FCMP_UNE:
1698 NeedExtraBranch = true;
1700 break;
1701 }
1702
1703 bool SwapArgs;
1704 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1705 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1706
1707 if (SwapArgs)
1708 std::swap(CmpLHS, CmpRHS);
1709
1710 // Emit a compare of the LHS and RHS, setting the flags.
1711 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1712 return false;
1713
1714 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1715 .addMBB(TrueMBB).addImm(CC);
1716
1717 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1718 // to UNE above).
1719 if (NeedExtraBranch) {
1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1721 .addMBB(TrueMBB).addImm(X86::COND_P);
1722 }
1723
1724 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1725 return true;
1726 }
1727 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1728 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1729 // typically happen for _Bool and C++ bools.
1730 MVT SourceVT;
1731 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1732 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1733 unsigned TestOpc = 0;
1734 switch (SourceVT.SimpleTy) {
1735 default: break;
1736 case MVT::i8: TestOpc = X86::TEST8ri; break;
1737 case MVT::i16: TestOpc = X86::TEST16ri; break;
1738 case MVT::i32: TestOpc = X86::TEST32ri; break;
1739 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1740 }
1741 if (TestOpc) {
1742 Register OpReg = getRegForValue(TI->getOperand(0));
1743 if (!OpReg)
1744 return false;
1745
1746 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc))
1747 .addReg(OpReg).addImm(1);
1748
1749 unsigned JmpCond = X86::COND_NE;
1750 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1751 std::swap(TrueMBB, FalseMBB);
1752 JmpCond = X86::COND_E;
1753 }
1754
1755 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1756 .addMBB(TrueMBB).addImm(JmpCond);
1757
1758 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1759 return true;
1760 }
1761 }
1762 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1763 // Fake request the condition, otherwise the intrinsic might be completely
1764 // optimized away.
1765 Register TmpReg = getRegForValue(BI->getCondition());
1766 if (!TmpReg)
1767 return false;
1768
1769 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1770 .addMBB(TrueMBB).addImm(CC);
1771 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1772 return true;
1773 }
1774
1775 // Otherwise do a clumsy setcc and re-test it.
1776 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1777 // in an explicit cast, so make sure to handle that correctly.
1778 Register OpReg = getRegForValue(BI->getCondition());
1779 if (!OpReg)
1780 return false;
1781
1782 // In case OpReg is a K register, COPY to a GPR
1783 if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1784 Register KOpReg = OpReg;
1785 OpReg = createResultReg(&X86::GR32RegClass);
1786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1787 TII.get(TargetOpcode::COPY), OpReg)
1788 .addReg(KOpReg);
1789 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
1790 }
1791 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
1792 .addReg(OpReg)
1793 .addImm(1);
1794 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1795 .addMBB(TrueMBB).addImm(X86::COND_NE);
1796 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1797 return true;
1798}
1799
1800bool X86FastISel::X86SelectShift(const Instruction *I) {
1801 Register CReg;
1802 unsigned OpReg;
1803 const TargetRegisterClass *RC = nullptr;
1804 if (I->getType()->isIntegerTy(8)) {
1805 CReg = X86::CL;
1806 RC = &X86::GR8RegClass;
1807 switch (I->getOpcode()) {
1808 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1809 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1810 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1811 default: return false;
1812 }
1813 } else if (I->getType()->isIntegerTy(16)) {
1814 CReg = X86::CX;
1815 RC = &X86::GR16RegClass;
1816 switch (I->getOpcode()) {
1817 default: llvm_unreachable("Unexpected shift opcode");
1818 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1819 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1820 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1821 }
1822 } else if (I->getType()->isIntegerTy(32)) {
1823 CReg = X86::ECX;
1824 RC = &X86::GR32RegClass;
1825 switch (I->getOpcode()) {
1826 default: llvm_unreachable("Unexpected shift opcode");
1827 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1828 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1829 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1830 }
1831 } else if (I->getType()->isIntegerTy(64)) {
1832 CReg = X86::RCX;
1833 RC = &X86::GR64RegClass;
1834 switch (I->getOpcode()) {
1835 default: llvm_unreachable("Unexpected shift opcode");
1836 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1837 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1838 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1839 }
1840 } else {
1841 return false;
1842 }
1843
1844 MVT VT;
1845 if (!isTypeLegal(I->getType(), VT))
1846 return false;
1847
1848 Register Op0Reg = getRegForValue(I->getOperand(0));
1849 if (!Op0Reg)
1850 return false;
1851
1852 Register Op1Reg = getRegForValue(I->getOperand(1));
1853 if (!Op1Reg)
1854 return false;
1855 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1856 CReg).addReg(Op1Reg);
1857
1858 // The shift instruction uses X86::CL. If we defined a super-register
1859 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1860 if (CReg != X86::CL)
1861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1862 TII.get(TargetOpcode::KILL), X86::CL)
1863 .addReg(CReg, RegState::Kill);
1864
1865 Register ResultReg = createResultReg(RC);
1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpReg), ResultReg)
1867 .addReg(Op0Reg);
1868 updateValueMap(I, ResultReg);
1869 return true;
1870}
1871
1872bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1873 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1874 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1875 const static bool S = true; // IsSigned
1876 const static bool U = false; // !IsSigned
1877 const static unsigned Copy = TargetOpcode::COPY;
1878 // For the X86 DIV/IDIV instruction, in most cases the dividend
1879 // (numerator) must be in a specific register pair highreg:lowreg,
1880 // producing the quotient in lowreg and the remainder in highreg.
1881 // For most data types, to set up the instruction, the dividend is
1882 // copied into lowreg, and lowreg is sign-extended or zero-extended
1883 // into highreg. The exception is i8, where the dividend is defined
1884 // as a single register rather than a register pair, and we
1885 // therefore directly sign-extend or zero-extend the dividend into
1886 // lowreg, instead of copying, and ignore the highreg.
1887 const static struct DivRemEntry {
1888 // The following portion depends only on the data type.
1889 const TargetRegisterClass *RC;
1890 unsigned LowInReg; // low part of the register pair
1891 unsigned HighInReg; // high part of the register pair
1892 // The following portion depends on both the data type and the operation.
1893 struct DivRemResult {
1894 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1895 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1896 // highreg, or copying a zero into highreg.
1897 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1898 // zero/sign-extending into lowreg for i8.
1899 unsigned DivRemResultReg; // Register containing the desired result.
1900 bool IsOpSigned; // Whether to use signed or unsigned form.
1901 } ResultTable[NumOps];
1902 } OpTable[NumTypes] = {
1903 { &X86::GR8RegClass, X86::AX, 0, {
1904 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1905 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1906 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1907 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1908 }
1909 }, // i8
1910 { &X86::GR16RegClass, X86::AX, X86::DX, {
1911 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1912 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1913 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1914 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1915 }
1916 }, // i16
1917 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1918 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1919 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1920 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1921 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1922 }
1923 }, // i32
1924 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1925 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1926 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1927 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1928 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1929 }
1930 }, // i64
1931 };
1932
1933 MVT VT;
1934 if (!isTypeLegal(I->getType(), VT))
1935 return false;
1936
1937 unsigned TypeIndex, OpIndex;
1938 switch (VT.SimpleTy) {
1939 default: return false;
1940 case MVT::i8: TypeIndex = 0; break;
1941 case MVT::i16: TypeIndex = 1; break;
1942 case MVT::i32: TypeIndex = 2; break;
1943 case MVT::i64: TypeIndex = 3;
1944 if (!Subtarget->is64Bit())
1945 return false;
1946 break;
1947 }
1948
1949 switch (I->getOpcode()) {
1950 default: llvm_unreachable("Unexpected div/rem opcode");
1951 case Instruction::SDiv: OpIndex = 0; break;
1952 case Instruction::SRem: OpIndex = 1; break;
1953 case Instruction::UDiv: OpIndex = 2; break;
1954 case Instruction::URem: OpIndex = 3; break;
1955 }
1956
1957 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1958 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1959 Register Op0Reg = getRegForValue(I->getOperand(0));
1960 if (!Op0Reg)
1961 return false;
1962 Register Op1Reg = getRegForValue(I->getOperand(1));
1963 if (!Op1Reg)
1964 return false;
1965
1966 // Move op0 into low-order input register.
1967 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1968 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1969 // Zero-extend or sign-extend into high-order input register.
1970 if (OpEntry.OpSignExtend) {
1971 if (OpEntry.IsOpSigned)
1972 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1973 TII.get(OpEntry.OpSignExtend));
1974 else {
1975 Register Zero32 = createResultReg(&X86::GR32RegClass);
1976 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1977 TII.get(X86::MOV32r0), Zero32);
1978
1979 // Copy the zero into the appropriate sub/super/identical physical
1980 // register. Unfortunately the operations needed are not uniform enough
1981 // to fit neatly into the table above.
1982 if (VT == MVT::i16) {
1983 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy),
1984 TypeEntry.HighInReg)
1985 .addReg(Zero32, {}, X86::sub_16bit);
1986 } else if (VT == MVT::i32) {
1987 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1988 TII.get(Copy), TypeEntry.HighInReg)
1989 .addReg(Zero32);
1990 } else if (VT == MVT::i64) {
1991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1992 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1993 .addReg(Zero32)
1994 .addImm(X86::sub_32bit);
1995 }
1996 }
1997 }
1998 // Generate the DIV/IDIV instruction.
1999 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2000 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
2001 // For i8 remainder, we can't reference ah directly, as we'll end
2002 // up with bogus copies like %r9b = COPY %ah. Reference ax
2003 // instead to prevent ah references in a rex instruction.
2004 //
2005 // The current assumption of the fast register allocator is that isel
2006 // won't generate explicit references to the GR8_NOREX registers. If
2007 // the allocator and/or the backend get enhanced to be more robust in
2008 // that regard, this can be, and should be, removed.
2009 Register ResultReg;
2010 if ((I->getOpcode() == Instruction::SRem ||
2011 I->getOpcode() == Instruction::URem) &&
2012 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
2013 Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
2014 Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
2015 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2016 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
2017
2018 // Shift AX right by 8 bits instead of using AH.
2019 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri),
2020 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
2021
2022 // Now reference the 8-bit subreg of the result.
2023 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
2024 X86::sub_8bit);
2025 }
2026 // Copy the result out of the physreg if we haven't already.
2027 if (!ResultReg) {
2028 ResultReg = createResultReg(TypeEntry.RC);
2029 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), ResultReg)
2030 .addReg(OpEntry.DivRemResultReg);
2031 }
2032 updateValueMap(I, ResultReg);
2033
2034 return true;
2035}
2036
2037/// Emit a conditional move instruction (if the are supported) to lower
2038/// the select.
2039bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
2040 // Check if the subtarget supports these instructions.
2041 if (!Subtarget->canUseCMOV())
2042 return false;
2043
2044 // FIXME: Add support for i8.
2045 if (RetVT < MVT::i16 || RetVT > MVT::i64)
2046 return false;
2047
2048 const Value *Cond = I->getOperand(0);
2049 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2050 bool NeedTest = true;
2052
2053 // Optimize conditions coming from a compare if both instructions are in the
2054 // same basic block (values defined in other basic blocks may not have
2055 // initialized registers).
2056 const auto *CI = dyn_cast<CmpInst>(Cond);
2057 if (CI && (CI->getParent() == I->getParent())) {
2058 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2059
2060 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
2061 static const uint16_t SETFOpcTable[2][3] = {
2062 { X86::COND_NP, X86::COND_E, X86::TEST8rr },
2063 { X86::COND_P, X86::COND_NE, X86::OR8rr }
2064 };
2065 const uint16_t *SETFOpc = nullptr;
2066 switch (Predicate) {
2067 default: break;
2068 case CmpInst::FCMP_OEQ:
2069 SETFOpc = &SETFOpcTable[0][0];
2071 break;
2072 case CmpInst::FCMP_UNE:
2073 SETFOpc = &SETFOpcTable[1][0];
2075 break;
2076 }
2077
2078 bool NeedSwap;
2079 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate);
2080 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
2081
2082 const Value *CmpLHS = CI->getOperand(0);
2083 const Value *CmpRHS = CI->getOperand(1);
2084 if (NeedSwap)
2085 std::swap(CmpLHS, CmpRHS);
2086
2087 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2088 // Emit a compare of the LHS and RHS, setting the flags.
2089 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2090 return false;
2091
2092 if (SETFOpc) {
2093 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
2094 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
2095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC),
2096 FlagReg1)
2097 .addImm(SETFOpc[0]);
2098 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC),
2099 FlagReg2)
2100 .addImm(SETFOpc[1]);
2101 auto const &II = TII.get(SETFOpc[2]);
2102 if (II.getNumDefs()) {
2103 Register TmpReg = createResultReg(&X86::GR8RegClass);
2104 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, TmpReg)
2105 .addReg(FlagReg2).addReg(FlagReg1);
2106 } else {
2107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2108 .addReg(FlagReg2).addReg(FlagReg1);
2109 }
2110 }
2111 NeedTest = false;
2112 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2113 // Fake request the condition, otherwise the intrinsic might be completely
2114 // optimized away.
2115 Register TmpReg = getRegForValue(Cond);
2116 if (!TmpReg)
2117 return false;
2118
2119 NeedTest = false;
2120 }
2121
2122 if (NeedTest) {
2123 // Selects operate on i1, however, CondReg is 8 bits width and may contain
2124 // garbage. Indeed, only the less significant bit is supposed to be
2125 // accurate. If we read more than the lsb, we may see non-zero values
2126 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
2127 // the select. This is achieved by performing TEST against 1.
2128 Register CondReg = getRegForValue(Cond);
2129 if (!CondReg)
2130 return false;
2131
2132 // In case OpReg is a K register, COPY to a GPR
2133 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2134 Register KCondReg = CondReg;
2135 CondReg = createResultReg(&X86::GR32RegClass);
2136 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2137 TII.get(TargetOpcode::COPY), CondReg)
2138 .addReg(KCondReg);
2139 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2140 }
2141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
2142 .addReg(CondReg)
2143 .addImm(1);
2144 }
2145
2146 const Value *LHS = I->getOperand(1);
2147 const Value *RHS = I->getOperand(2);
2148
2149 Register RHSReg = getRegForValue(RHS);
2150 Register LHSReg = getRegForValue(LHS);
2151 if (!LHSReg || !RHSReg)
2152 return false;
2153
2154 const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
2155 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC) / 8, false,
2156 Subtarget->hasNDD());
2157 Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
2158 updateValueMap(I, ResultReg);
2159 return true;
2160}
2161
2162/// Emit SSE or AVX instructions to lower the select.
2163///
2164/// Try to use SSE1/SSE2 instructions to simulate a select without branches.
2165/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
2166/// SSE instructions are available. If AVX is available, try to use a VBLENDV.
2167bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
2168 // Optimize conditions coming from a compare if both instructions are in the
2169 // same basic block (values defined in other basic blocks may not have
2170 // initialized registers).
2171 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
2172 if (!CI || (CI->getParent() != I->getParent()))
2173 return false;
2174
2175 if (I->getType() != CI->getOperand(0)->getType() ||
2176 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2177 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2178 return false;
2179
2180 const Value *CmpLHS = CI->getOperand(0);
2181 const Value *CmpRHS = CI->getOperand(1);
2182 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2183
2184 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
2185 // We don't have to materialize a zero constant for this case and can just use
2186 // %x again on the RHS.
2187 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
2188 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2189 if (CmpRHSC && CmpRHSC->isNullValue())
2190 CmpRHS = CmpLHS;
2191 }
2192
2193 unsigned CC;
2194 bool NeedSwap;
2195 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
2196 if (CC > 7 && !Subtarget->hasAVX())
2197 return false;
2198
2199 if (NeedSwap)
2200 std::swap(CmpLHS, CmpRHS);
2201
2202 const Value *LHS = I->getOperand(1);
2203 const Value *RHS = I->getOperand(2);
2204
2205 Register LHSReg = getRegForValue(LHS);
2206 Register RHSReg = getRegForValue(RHS);
2207 Register CmpLHSReg = getRegForValue(CmpLHS);
2208 Register CmpRHSReg = getRegForValue(CmpRHS);
2209 if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
2210 return false;
2211
2212 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2213 Register ResultReg;
2214
2215 if (Subtarget->hasAVX512()) {
2216 // If we have AVX512 we can use a mask compare and masked movss/sd.
2217 const TargetRegisterClass *VR128X = &X86::VR128XRegClass;
2218 const TargetRegisterClass *VK1 = &X86::VK1RegClass;
2219
2220 unsigned CmpOpcode =
2221 (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri;
2222 Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
2223 CC);
2224
2225 // Need an IMPLICIT_DEF for the input that is used to generate the upper
2226 // bits of the result register since its not based on any of the inputs.
2227 Register ImplicitDefReg = createResultReg(VR128X);
2228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2229 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2230
2231 // Place RHSReg is the passthru of the masked movss/sd operation and put
2232 // LHS in the input. The mask input comes from the compare.
2233 unsigned MovOpcode =
2234 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2235 Register MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
2236 ImplicitDefReg, LHSReg);
2237
2238 ResultReg = createResultReg(RC);
2239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2240 TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
2241
2242 } else if (Subtarget->hasAVX()) {
2243 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2244
2245 // If we have AVX, create 1 blendv instead of 3 logic instructions.
2246 // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
2247 // uses XMM0 as the selection register. That may need just as many
2248 // instructions as the AND/ANDN/OR sequence due to register moves, so
2249 // don't bother.
2250 unsigned CmpOpcode =
2251 (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri;
2252 unsigned BlendOpcode =
2253 (RetVT == MVT::f32) ? X86::VBLENDVPSrrr : X86::VBLENDVPDrrr;
2254
2255 Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
2256 CC);
2257 Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
2258 CmpReg);
2259 ResultReg = createResultReg(RC);
2260 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2261 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
2262 } else {
2263 // Choose the SSE instruction sequence based on data type (float or double).
2264 static const uint16_t OpcTable[2][4] = {
2265 { X86::CMPSSrri, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2266 { X86::CMPSDrri, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2267 };
2268
2269 const uint16_t *Opc = nullptr;
2270 switch (RetVT.SimpleTy) {
2271 default: return false;
2272 case MVT::f32: Opc = &OpcTable[0][0]; break;
2273 case MVT::f64: Opc = &OpcTable[1][0]; break;
2274 }
2275
2276 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2277 Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC);
2278 Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
2279 Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
2280 Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
2281 ResultReg = createResultReg(RC);
2282 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2283 TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
2284 }
2285 updateValueMap(I, ResultReg);
2286 return true;
2287}
2288
2289bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
2290 // These are pseudo CMOV instructions and will be later expanded into control-
2291 // flow.
2292 unsigned Opc;
2293 switch (RetVT.SimpleTy) {
2294 default: return false;
2295 case MVT::i8: Opc = X86::CMOV_GR8; break;
2296 case MVT::i16: Opc = X86::CMOV_GR16; break;
2297 case MVT::i32: Opc = X86::CMOV_GR32; break;
2298 case MVT::f16:
2299 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR16X : X86::CMOV_FR16; break;
2300 case MVT::f32:
2301 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X : X86::CMOV_FR32; break;
2302 case MVT::f64:
2303 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X : X86::CMOV_FR64; break;
2304 }
2305
2306 const Value *Cond = I->getOperand(0);
2308
2309 // Optimize conditions coming from a compare if both instructions are in the
2310 // same basic block (values defined in other basic blocks may not have
2311 // initialized registers).
2312 const auto *CI = dyn_cast<CmpInst>(Cond);
2313 if (CI && (CI->getParent() == I->getParent())) {
2314 bool NeedSwap;
2315 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate());
2316 if (CC > X86::LAST_VALID_COND)
2317 return false;
2318
2319 const Value *CmpLHS = CI->getOperand(0);
2320 const Value *CmpRHS = CI->getOperand(1);
2321
2322 if (NeedSwap)
2323 std::swap(CmpLHS, CmpRHS);
2324
2325 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2326 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2327 return false;
2328 } else {
2329 Register CondReg = getRegForValue(Cond);
2330 if (!CondReg)
2331 return false;
2332
2333 // In case OpReg is a K register, COPY to a GPR
2334 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2335 Register KCondReg = CondReg;
2336 CondReg = createResultReg(&X86::GR32RegClass);
2337 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2338 TII.get(TargetOpcode::COPY), CondReg)
2339 .addReg(KCondReg);
2340 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2341 }
2342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
2343 .addReg(CondReg)
2344 .addImm(1);
2345 }
2346
2347 const Value *LHS = I->getOperand(1);
2348 const Value *RHS = I->getOperand(2);
2349
2350 Register LHSReg = getRegForValue(LHS);
2351 Register RHSReg = getRegForValue(RHS);
2352 if (!LHSReg || !RHSReg)
2353 return false;
2354
2355 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2356
2357 Register ResultReg =
2358 fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
2359 updateValueMap(I, ResultReg);
2360 return true;
2361}
2362
2363bool X86FastISel::X86SelectSelect(const Instruction *I) {
2364 MVT RetVT;
2365 if (!isTypeLegal(I->getType(), RetVT))
2366 return false;
2367
2368 // Check if we can fold the select.
2369 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
2370 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2371 const Value *Opnd = nullptr;
2372 switch (Predicate) {
2373 default: break;
2374 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
2375 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
2376 }
2377 // No need for a select anymore - this is an unconditional move.
2378 if (Opnd) {
2379 Register OpReg = getRegForValue(Opnd);
2380 if (!OpReg)
2381 return false;
2382 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2383 Register ResultReg = createResultReg(RC);
2384 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2385 TII.get(TargetOpcode::COPY), ResultReg)
2386 .addReg(OpReg);
2387 updateValueMap(I, ResultReg);
2388 return true;
2389 }
2390 }
2391
2392 // First try to use real conditional move instructions.
2393 if (X86FastEmitCMoveSelect(RetVT, I))
2394 return true;
2395
2396 // Try to use a sequence of SSE instructions to simulate a conditional move.
2397 if (X86FastEmitSSESelect(RetVT, I))
2398 return true;
2399
2400 // Fall-back to pseudo conditional move instructions, which will be later
2401 // converted to control-flow.
2402 if (X86FastEmitPseudoSelect(RetVT, I))
2403 return true;
2404
2405 return false;
2406}
2407
2408// Common code for X86SelectSIToFP and X86SelectUIToFP.
2409bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
2410 // The target-independent selection algorithm in FastISel already knows how
2411 // to select a SINT_TO_FP if the target is SSE but not AVX.
2412 // Early exit if the subtarget doesn't have AVX.
2413 // Unsigned conversion requires avx512.
2414 bool HasAVX512 = Subtarget->hasAVX512();
2415 if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
2416 return false;
2417
2418 // TODO: We could sign extend narrower types.
2419 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2420 if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
2421 return false;
2422
2423 // Select integer to float/double conversion.
2424 Register OpReg = getRegForValue(I->getOperand(0));
2425 if (!OpReg)
2426 return false;
2427
2428 unsigned Opcode;
2429
2430 static const uint16_t SCvtOpc[2][2][2] = {
2431 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
2432 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
2433 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
2434 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
2435 };
2436 static const uint16_t UCvtOpc[2][2] = {
2437 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
2438 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
2439 };
2440 bool Is64Bit = SrcVT == MVT::i64;
2441
2442 if (I->getType()->isDoubleTy()) {
2443 // s/uitofp int -> double
2444 Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
2445 } else if (I->getType()->isFloatTy()) {
2446 // s/uitofp int -> float
2447 Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
2448 } else
2449 return false;
2450
2451 MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
2452 const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
2453 Register ImplicitDefReg = createResultReg(RC);
2454 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2455 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2456 Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
2457 updateValueMap(I, ResultReg);
2458 return true;
2459}
2460
2461bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
2462 return X86SelectIntToFP(I, /*IsSigned*/true);
2463}
2464
2465bool X86FastISel::X86SelectUIToFP(const Instruction *I) {
2466 return X86SelectIntToFP(I, /*IsSigned*/false);
2467}
2468
2469// Helper method used by X86SelectFPExt and X86SelectFPTrunc.
2470bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2471 unsigned TargetOpc,
2472 const TargetRegisterClass *RC) {
2473 assert((I->getOpcode() == Instruction::FPExt ||
2474 I->getOpcode() == Instruction::FPTrunc) &&
2475 "Instruction must be an FPExt or FPTrunc!");
2476 bool HasAVX = Subtarget->hasAVX();
2477
2478 Register OpReg = getRegForValue(I->getOperand(0));
2479 if (!OpReg)
2480 return false;
2481
2482 Register ImplicitDefReg;
2483 if (HasAVX) {
2484 ImplicitDefReg = createResultReg(RC);
2485 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2486 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2487
2488 }
2489
2490 Register ResultReg = createResultReg(RC);
2491 MachineInstrBuilder MIB;
2492 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpc),
2493 ResultReg);
2494
2495 if (HasAVX)
2496 MIB.addReg(ImplicitDefReg);
2497
2498 MIB.addReg(OpReg);
2499 updateValueMap(I, ResultReg);
2500 return true;
2501}
2502
2503bool X86FastISel::X86SelectFPExt(const Instruction *I) {
2504 if (Subtarget->hasSSE2() && I->getType()->isDoubleTy() &&
2505 I->getOperand(0)->getType()->isFloatTy()) {
2506 bool HasAVX512 = Subtarget->hasAVX512();
2507 // fpext from float to double.
2508 unsigned Opc =
2509 HasAVX512 ? X86::VCVTSS2SDZrr
2510 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2511 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64));
2512 }
2513
2514 return false;
2515}
2516
2517bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
2518 if (Subtarget->hasSSE2() && I->getType()->isFloatTy() &&
2519 I->getOperand(0)->getType()->isDoubleTy()) {
2520 bool HasAVX512 = Subtarget->hasAVX512();
2521 // fptrunc from double to float.
2522 unsigned Opc =
2523 HasAVX512 ? X86::VCVTSD2SSZrr
2524 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2525 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32));
2526 }
2527
2528 return false;
2529}
2530
2531bool X86FastISel::X86SelectTrunc(const Instruction *I) {
2532 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2533 EVT DstVT = TLI.getValueType(DL, I->getType());
2534
2535 // This code only handles truncation to byte.
2536 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2537 return false;
2538 if (!TLI.isTypeLegal(SrcVT))
2539 return false;
2540
2541 Register InputReg = getRegForValue(I->getOperand(0));
2542 if (!InputReg)
2543 // Unhandled operand. Halt "fast" selection and bail.
2544 return false;
2545
2546 if (SrcVT == MVT::i8) {
2547 // Truncate from i8 to i1; no code needed.
2548 updateValueMap(I, InputReg);
2549 return true;
2550 }
2551
2552 // Issue an extract_subreg.
2553 Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
2554 X86::sub_8bit);
2555 if (!ResultReg)
2556 return false;
2557
2558 updateValueMap(I, ResultReg);
2559 return true;
2560}
2561
2562bool X86FastISel::X86SelectBitCast(const Instruction *I) {
2563 // Select SSE2/AVX bitcasts between 128/256/512 bit vector types.
2564 MVT SrcVT, DstVT;
2565 if (!Subtarget->hasSSE2() ||
2566 !isTypeLegal(I->getOperand(0)->getType(), SrcVT) ||
2567 !isTypeLegal(I->getType(), DstVT))
2568 return false;
2569
2570 // Only allow vectors that use xmm/ymm/zmm.
2571 if (!SrcVT.isVector() || !DstVT.isVector() ||
2572 SrcVT.getVectorElementType() == MVT::i1 ||
2573 DstVT.getVectorElementType() == MVT::i1)
2574 return false;
2575
2576 Register Reg = getRegForValue(I->getOperand(0));
2577 if (!Reg)
2578 return false;
2579
2580 // Emit a reg-reg copy so we don't propagate cached known bits information
2581 // with the wrong VT if we fall out of fast isel after selecting this.
2582 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
2583 Register ResultReg = createResultReg(DstClass);
2584 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2585 ResultReg)
2586 .addReg(Reg);
2587
2588 updateValueMap(I, ResultReg);
2589 return true;
2590}
2591
2592bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2593 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2594}
2595
2596bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2597 X86AddressMode SrcAM, uint64_t Len) {
2598
2599 // Make sure we don't bloat code by inlining very large memcpy's.
2600 if (!IsMemcpySmall(Len))
2601 return false;
2602
2603 bool i64Legal = Subtarget->is64Bit();
2604
2605 // We don't care about alignment here since we just emit integer accesses.
2606 while (Len) {
2607 MVT VT;
2608 if (Len >= 8 && i64Legal)
2609 VT = MVT::i64;
2610 else if (Len >= 4)
2611 VT = MVT::i32;
2612 else if (Len >= 2)
2613 VT = MVT::i16;
2614 else
2615 VT = MVT::i8;
2616
2617 Register Reg;
2618 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2619 RV &= X86FastEmitStore(VT, Reg, DestAM);
2620 assert(RV && "Failed to emit load or store??");
2621 (void)RV;
2622
2623 unsigned Size = VT.getSizeInBits()/8;
2624 Len -= Size;
2625 DestAM.Disp += Size;
2626 SrcAM.Disp += Size;
2627 }
2628
2629 return true;
2630}
2631
2632bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2633 // FIXME: Handle more intrinsics.
2634 switch (II->getIntrinsicID()) {
2635 default:
2636 return false;
2637 case Intrinsic::frameaddress: {
2638 MachineFunction *MF = FuncInfo.MF;
2640 return false;
2641
2642 Type *RetTy = II->getCalledFunction()->getReturnType();
2643
2644 MVT VT;
2645 if (!isTypeLegal(RetTy, VT))
2646 return false;
2647
2648 unsigned Opc;
2649 const TargetRegisterClass *RC = nullptr;
2650
2651 switch (VT.SimpleTy) {
2652 default: llvm_unreachable("Invalid result type for frameaddress.");
2653 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2654 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2655 }
2656
2657 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2658 // we get the wrong frame register.
2659 MachineFrameInfo &MFI = MF->getFrameInfo();
2660 MFI.setFrameAddressIsTaken(true);
2661
2662 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2663 Register FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
2664 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2665 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2666 "Invalid Frame Register!");
2667
2668 // Always make a copy of the frame register to a vreg first, so that we
2669 // never directly reference the frame register (the TwoAddressInstruction-
2670 // Pass doesn't like that).
2671 Register SrcReg = createResultReg(RC);
2672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2673 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2674
2675 // Now recursively load from the frame address.
2676 // movq (%rbp), %rax
2677 // movq (%rax), %rax
2678 // movq (%rax), %rax
2679 // ...
2680 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2681 while (Depth--) {
2682 Register DestReg = createResultReg(RC);
2683 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2684 TII.get(Opc), DestReg), SrcReg);
2685 SrcReg = DestReg;
2686 }
2687
2688 updateValueMap(II, SrcReg);
2689 return true;
2690 }
2691 case Intrinsic::memcpy: {
2692 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2693 // Don't handle volatile or variable length memcpys.
2694 if (MCI->isVolatile())
2695 return false;
2696
2697 if (isa<ConstantInt>(MCI->getLength())) {
2698 // Small memcpy's are common enough that we want to do them
2699 // without a call if possible.
2700 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2701 if (IsMemcpySmall(Len)) {
2702 X86AddressMode DestAM, SrcAM;
2703 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2704 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2705 return false;
2706 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2707 return true;
2708 }
2709 }
2710
2711 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2712 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2713 return false;
2714
2715 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2716 return false;
2717
2718 return lowerCallTo(II, "memcpy", II->arg_size() - 1);
2719 }
2720 case Intrinsic::memset: {
2721 const MemSetInst *MSI = cast<MemSetInst>(II);
2722
2723 if (MSI->isVolatile())
2724 return false;
2725
2726 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2727 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2728 return false;
2729
2730 if (MSI->getDestAddressSpace() > 255)
2731 return false;
2732
2733 return lowerCallTo(II, "memset", II->arg_size() - 1);
2734 }
2735 case Intrinsic::stackprotector: {
2736 // Emit code to store the stack guard onto the stack.
2737 EVT PtrTy = TLI.getPointerTy(DL);
2738
2739 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2740 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2741
2742 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2743
2744 // Grab the frame index.
2745 X86AddressMode AM;
2746 if (!X86SelectAddress(Slot, AM)) return false;
2747 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2748 return true;
2749 }
2750 case Intrinsic::dbg_declare: {
2751 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2752 X86AddressMode AM;
2753 assert(DI->getAddress() && "Null address should be checked earlier!");
2754 if (!X86SelectAddress(DI->getAddress(), AM))
2755 return false;
2756 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2757 assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) &&
2758 "Expected inlined-at fields to agree");
2759 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II), AM)
2760 .addImm(0)
2761 .addMetadata(DI->getVariable())
2762 .addMetadata(DI->getExpression());
2763 return true;
2764 }
2765 case Intrinsic::trap: {
2766 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP));
2767 return true;
2768 }
2769 case Intrinsic::sqrt: {
2770 if (!Subtarget->hasSSE1())
2771 return false;
2772
2773 Type *RetTy = II->getCalledFunction()->getReturnType();
2774
2775 MVT VT;
2776 if (!isTypeLegal(RetTy, VT))
2777 return false;
2778
2779 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2780 // is not generated by FastISel yet.
2781 // FIXME: Update this code once tablegen can handle it.
2782 static const uint16_t SqrtOpc[3][2] = {
2783 { X86::SQRTSSr, X86::SQRTSDr },
2784 { X86::VSQRTSSr, X86::VSQRTSDr },
2785 { X86::VSQRTSSZr, X86::VSQRTSDZr },
2786 };
2787 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2788 Subtarget->hasAVX() ? 1 :
2789 0;
2790 unsigned Opc;
2791 switch (VT.SimpleTy) {
2792 default: return false;
2793 case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break;
2794 case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break;
2795 }
2796
2797 const Value *SrcVal = II->getArgOperand(0);
2798 Register SrcReg = getRegForValue(SrcVal);
2799
2800 if (!SrcReg)
2801 return false;
2802
2803 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2804 Register ImplicitDefReg;
2805 if (AVXLevel > 0) {
2806 ImplicitDefReg = createResultReg(RC);
2807 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2808 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2809 }
2810
2811 Register ResultReg = createResultReg(RC);
2812 MachineInstrBuilder MIB;
2813 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
2814 ResultReg);
2815
2816 if (ImplicitDefReg)
2817 MIB.addReg(ImplicitDefReg);
2818
2819 MIB.addReg(SrcReg);
2820
2821 updateValueMap(II, ResultReg);
2822 return true;
2823 }
2824 case Intrinsic::sadd_with_overflow:
2825 case Intrinsic::uadd_with_overflow:
2826 case Intrinsic::ssub_with_overflow:
2827 case Intrinsic::usub_with_overflow:
2828 case Intrinsic::smul_with_overflow:
2829 case Intrinsic::umul_with_overflow: {
2830 // This implements the basic lowering of the xalu with overflow intrinsics
2831 // into add/sub/mul followed by either seto or setb.
2832 const Function *Callee = II->getCalledFunction();
2833 auto *Ty = cast<StructType>(Callee->getReturnType());
2834 Type *RetTy = Ty->getTypeAtIndex(0U);
2835 assert(Ty->getTypeAtIndex(1)->isIntegerTy() &&
2836 Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 &&
2837 "Overflow value expected to be an i1");
2838
2839 MVT VT;
2840 if (!isTypeLegal(RetTy, VT))
2841 return false;
2842
2843 if (VT < MVT::i8 || VT > MVT::i64)
2844 return false;
2845
2846 const Value *LHS = II->getArgOperand(0);
2847 const Value *RHS = II->getArgOperand(1);
2848
2849 // Canonicalize immediate to the RHS.
2850 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) && II->isCommutative())
2851 std::swap(LHS, RHS);
2852
2853 unsigned BaseOpc, CondCode;
2854 switch (II->getIntrinsicID()) {
2855 default: llvm_unreachable("Unexpected intrinsic!");
2856 case Intrinsic::sadd_with_overflow:
2857 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
2858 case Intrinsic::uadd_with_overflow:
2859 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
2860 case Intrinsic::ssub_with_overflow:
2861 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
2862 case Intrinsic::usub_with_overflow:
2863 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
2864 case Intrinsic::smul_with_overflow:
2865 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
2866 case Intrinsic::umul_with_overflow:
2867 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
2868 }
2869
2870 Register LHSReg = getRegForValue(LHS);
2871 if (!LHSReg)
2872 return false;
2873
2874 Register ResultReg;
2875 // Check if we have an immediate version.
2876 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2877 static const uint16_t Opc[2][4] = {
2878 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2879 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2880 };
2881
2882 if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
2883 CondCode == X86::COND_O) {
2884 // We can use INC/DEC.
2885 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2886 bool IsDec = BaseOpc == ISD::SUB;
2887 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2888 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2889 .addReg(LHSReg);
2890 } else
2891 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
2892 }
2893
2894 Register RHSReg;
2895 if (!ResultReg) {
2896 RHSReg = getRegForValue(RHS);
2897 if (!RHSReg)
2898 return false;
2899 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
2900 }
2901
2902 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2903 // it manually.
2904 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
2905 static const uint16_t MULOpc[] =
2906 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2907 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2908 // First copy the first operand into RAX, which is an implicit input to
2909 // the X86::MUL*r instruction.
2910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2911 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2912 .addReg(LHSReg);
2913 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2914 TLI.getRegClassFor(VT), RHSReg);
2915 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
2916 static const uint16_t MULOpc[] =
2917 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2918 if (VT == MVT::i8) {
2919 // Copy the first operand into AL, which is an implicit input to the
2920 // X86::IMUL8r instruction.
2921 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2922 TII.get(TargetOpcode::COPY), X86::AL)
2923 .addReg(LHSReg);
2924 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
2925 } else
2926 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2927 TLI.getRegClassFor(VT), LHSReg, RHSReg);
2928 }
2929
2930 if (!ResultReg)
2931 return false;
2932
2933 // Assign to a GPR since the overflow return value is lowered to a SETcc.
2934 Register ResultReg2 = createResultReg(&X86::GR8RegClass);
2935 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2936 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(GET_SETCC),
2937 ResultReg2)
2938 .addImm(CondCode);
2939
2940 updateValueMap(II, ResultReg, 2);
2941 return true;
2942 }
2943 case Intrinsic::x86_sse_cvttss2si:
2944 case Intrinsic::x86_sse_cvttss2si64:
2945 case Intrinsic::x86_sse2_cvttsd2si:
2946 case Intrinsic::x86_sse2_cvttsd2si64: {
2947 bool IsInputDouble;
2948 switch (II->getIntrinsicID()) {
2949 default: llvm_unreachable("Unexpected intrinsic.");
2950 case Intrinsic::x86_sse_cvttss2si:
2951 case Intrinsic::x86_sse_cvttss2si64:
2952 if (!Subtarget->hasSSE1())
2953 return false;
2954 IsInputDouble = false;
2955 break;
2956 case Intrinsic::x86_sse2_cvttsd2si:
2957 case Intrinsic::x86_sse2_cvttsd2si64:
2958 if (!Subtarget->hasSSE2())
2959 return false;
2960 IsInputDouble = true;
2961 break;
2962 }
2963
2964 Type *RetTy = II->getCalledFunction()->getReturnType();
2965 MVT VT;
2966 if (!isTypeLegal(RetTy, VT))
2967 return false;
2968
2969 static const uint16_t CvtOpc[3][2][2] = {
2970 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
2971 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
2972 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
2973 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
2974 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
2975 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
2976 };
2977 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2978 Subtarget->hasAVX() ? 1 :
2979 0;
2980 unsigned Opc;
2981 switch (VT.SimpleTy) {
2982 default: llvm_unreachable("Unexpected result type.");
2983 case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break;
2984 case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break;
2985 }
2986
2987 // Check if we can fold insertelement instructions into the convert.
2988 const Value *Op = II->getArgOperand(0);
2989 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
2990 const Value *Index = IE->getOperand(2);
2991 if (!isa<ConstantInt>(Index))
2992 break;
2993 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
2994
2995 if (!Idx) {
2996 Op = IE->getOperand(1);
2997 break;
2998 }
2999 Op = IE->getOperand(0);
3000 }
3001
3002 Register Reg = getRegForValue(Op);
3003 if (!Reg)
3004 return false;
3005
3006 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3007 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
3008 .addReg(Reg);
3009
3010 updateValueMap(II, ResultReg);
3011 return true;
3012 }
3013 case Intrinsic::x86_sse42_crc32_32_8:
3014 case Intrinsic::x86_sse42_crc32_32_16:
3015 case Intrinsic::x86_sse42_crc32_32_32:
3016 case Intrinsic::x86_sse42_crc32_64_64: {
3017 if (!Subtarget->hasCRC32())
3018 return false;
3019
3020 Type *RetTy = II->getCalledFunction()->getReturnType();
3021
3022 MVT VT;
3023 if (!isTypeLegal(RetTy, VT))
3024 return false;
3025
3026 unsigned Opc;
3027 const TargetRegisterClass *RC = nullptr;
3028
3029 switch (II->getIntrinsicID()) {
3030 default:
3031 llvm_unreachable("Unexpected intrinsic.");
3032#define GET_EGPR_IF_ENABLED(OPC) Subtarget->hasEGPR() ? OPC##_EVEX : OPC
3033 case Intrinsic::x86_sse42_crc32_32_8:
3034 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r8);
3035 RC = &X86::GR32RegClass;
3036 break;
3037 case Intrinsic::x86_sse42_crc32_32_16:
3038 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r16);
3039 RC = &X86::GR32RegClass;
3040 break;
3041 case Intrinsic::x86_sse42_crc32_32_32:
3042 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r32);
3043 RC = &X86::GR32RegClass;
3044 break;
3045 case Intrinsic::x86_sse42_crc32_64_64:
3046 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r64r64);
3047 RC = &X86::GR64RegClass;
3048 break;
3049#undef GET_EGPR_IF_ENABLED
3050 }
3051
3052 const Value *LHS = II->getArgOperand(0);
3053 const Value *RHS = II->getArgOperand(1);
3054
3055 Register LHSReg = getRegForValue(LHS);
3056 Register RHSReg = getRegForValue(RHS);
3057 if (!LHSReg || !RHSReg)
3058 return false;
3059
3060 Register ResultReg = fastEmitInst_rr(Opc, RC, LHSReg, RHSReg);
3061 if (!ResultReg)
3062 return false;
3063
3064 updateValueMap(II, ResultReg);
3065 return true;
3066 }
3067 }
3068}
3069
3070bool X86FastISel::fastLowerArguments() {
3071 if (!FuncInfo.CanLowerReturn)
3072 return false;
3073
3074 const Function *F = FuncInfo.Fn;
3075 if (F->isVarArg())
3076 return false;
3077
3078 CallingConv::ID CC = F->getCallingConv();
3079 if (CC != CallingConv::C)
3080 return false;
3081
3082 if (Subtarget->isCallingConvWin64(CC))
3083 return false;
3084
3085 if (!Subtarget->is64Bit())
3086 return false;
3087
3088 if (Subtarget->useSoftFloat())
3089 return false;
3090
3091 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
3092 unsigned GPRCnt = 0;
3093 unsigned FPRCnt = 0;
3094 for (auto const &Arg : F->args()) {
3095 if (Arg.hasAttribute(Attribute::ByVal) ||
3096 Arg.hasAttribute(Attribute::InReg) ||
3097 Arg.hasAttribute(Attribute::StructRet) ||
3098 Arg.hasAttribute(Attribute::SwiftSelf) ||
3099 Arg.hasAttribute(Attribute::SwiftAsync) ||
3100 Arg.hasAttribute(Attribute::SwiftError) ||
3101 Arg.hasAttribute(Attribute::Nest))
3102 return false;
3103
3104 Type *ArgTy = Arg.getType();
3105 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3106 return false;
3107
3108 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3109 if (!ArgVT.isSimple()) return false;
3110 switch (ArgVT.getSimpleVT().SimpleTy) {
3111 default: return false;
3112 case MVT::i32:
3113 case MVT::i64:
3114 ++GPRCnt;
3115 break;
3116 case MVT::f32:
3117 case MVT::f64:
3118 if (!Subtarget->hasSSE1())
3119 return false;
3120 ++FPRCnt;
3121 break;
3122 }
3123
3124 if (GPRCnt > 6)
3125 return false;
3126
3127 if (FPRCnt > 8)
3128 return false;
3129 }
3130
3131 static const MCPhysReg GPR32ArgRegs[] = {
3132 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3133 };
3134 static const MCPhysReg GPR64ArgRegs[] = {
3135 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3136 };
3137 static const MCPhysReg XMMArgRegs[] = {
3138 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3139 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3140 };
3141
3142 unsigned GPRIdx = 0;
3143 unsigned FPRIdx = 0;
3144 for (auto const &Arg : F->args()) {
3145 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
3146 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
3147 MCRegister SrcReg;
3148 switch (VT.SimpleTy) {
3149 default: llvm_unreachable("Unexpected value type.");
3150 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
3151 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
3152 case MVT::f32: [[fallthrough]];
3153 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
3154 }
3155 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3156 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3157 // Without this, EmitLiveInCopies may eliminate the livein if its only
3158 // use is a bitcast (which isn't turned into an instruction).
3159 Register ResultReg = createResultReg(RC);
3160 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3161 TII.get(TargetOpcode::COPY), ResultReg)
3162 .addReg(DstReg, getKillRegState(true));
3163 updateValueMap(&Arg, ResultReg);
3164 }
3165 return true;
3166}
3167
3168static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
3169 CallingConv::ID CC,
3170 const CallBase *CB) {
3171 if (Subtarget->is64Bit())
3172 return 0;
3173 if (Subtarget->getTargetTriple().isOSMSVCRT())
3174 return 0;
3175 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3176 CC == CallingConv::HiPE || CC == CallingConv::Tail ||
3178 return 0;
3179
3180 if (CB)
3181 if (CB->arg_empty() || !CB->paramHasAttr(0, Attribute::StructRet) ||
3182 CB->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
3183 return 0;
3184
3185 return 4;
3186}
3187
3188bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3189 auto &OutVals = CLI.OutVals;
3190 auto &OutFlags = CLI.OutFlags;
3191 auto &OutRegs = CLI.OutRegs;
3192 auto &Ins = CLI.Ins;
3193 auto &InRegs = CLI.InRegs;
3194 CallingConv::ID CC = CLI.CallConv;
3195 bool &IsTailCall = CLI.IsTailCall;
3196 bool IsVarArg = CLI.IsVarArg;
3197 const Value *Callee = CLI.Callee;
3198 MCSymbol *Symbol = CLI.Symbol;
3199 const auto *CB = CLI.CB;
3200
3201 bool Is64Bit = Subtarget->is64Bit();
3202 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3203
3204 // If the return type is illegal, check if the ABI requires a type conversion
3205 // that FastISel cannot handle. Fall back to DAG ISel in such cases.
3206 // For example, bfloat is returned as f16 in XMM0, however FastISel would
3207 // assign f32 register type and store it in FuncInfo.ValueMap. This would
3208 // cause DAG incorrectly perform type conversion from f32 to bfloat after get
3209 // the value from FuncInfo.ValueMap.
3210 // However, i1 is promoted to i8 and return i8 defined by ABI, so FastISel can
3211 // lower it without switching to DAGISel.
3212 SmallVector<Type *> RetTys;
3213 ComputeValueTypes(DL, CLI.RetTy, RetTys);
3214 for (Type *RetTy : RetTys) {
3215 MVT RetVT = MVT::Other;
3216 if (!isTypeLegal(RetTy, RetVT)) {
3217 if (RetVT == MVT::Other)
3218 return false; // Unknown type, let DAG ISel handle it.
3219
3220 // RetVT is not MVT::Other, it must be simple now. It is something rely on
3221 // the logic of isTypeLegal().
3222 MVT ABIVT = TLI.getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
3223 CLI.CallConv, RetVT);
3224 MVT RegVT = TLI.getRegisterType(CLI.RetTy->getContext(), RetVT);
3225 if (ABIVT != RegVT)
3226 return false;
3227 }
3228 }
3229
3230 // Call / invoke instructions with NoCfCheck attribute require special
3231 // handling.
3232 if (CB && CB->doesNoCfCheck())
3233 return false;
3234
3235 // Functions with no_caller_saved_registers that need special handling.
3236 if ((CB && isa<CallInst>(CB) && CB->hasFnAttr("no_caller_saved_registers")))
3237 return false;
3238
3239 // Functions with no_callee_saved_registers that need special handling.
3240 if ((CB && CB->hasFnAttr("no_callee_saved_registers")))
3241 return false;
3242
3243 // Indirect calls with CFI checks need special handling.
3244 if (CB && CB->isIndirectCall() && CB->getOperandBundle(LLVMContext::OB_kcfi))
3245 return false;
3246
3247 // Functions using thunks for indirect calls need to use SDISel.
3248 if (Subtarget->useIndirectThunkCalls())
3249 return false;
3250
3251 // Handle only C and fastcc calling conventions for now.
3252 switch (CC) {
3253 default: return false;
3254 case CallingConv::C:
3255 case CallingConv::Fast:
3256 case CallingConv::Tail:
3257 case CallingConv::Swift:
3258 case CallingConv::SwiftTail:
3259 case CallingConv::X86_FastCall:
3260 case CallingConv::X86_StdCall:
3261 case CallingConv::X86_ThisCall:
3262 case CallingConv::Win64:
3263 case CallingConv::X86_64_SysV:
3264 case CallingConv::CFGuard_Check:
3265 break;
3266 }
3267
3268 // Allow SelectionDAG isel to handle tail calls.
3269 if (IsTailCall)
3270 return false;
3271
3272 // fastcc with -tailcallopt is intended to provide a guaranteed
3273 // tail call optimization. Fastisel doesn't know how to do that.
3274 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
3275 CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
3276 return false;
3277
3278 // Don't know how to handle Win64 varargs yet. Nothing special needed for
3279 // x86-32. Special handling for x86-64 is implemented.
3280 if (IsVarArg && IsWin64)
3281 return false;
3282
3283 // Don't know about inalloca yet.
3284 if (CLI.CB && CLI.CB->hasInAllocaArgument())
3285 return false;
3286
3287 for (auto Flag : CLI.OutFlags)
3288 if (Flag.isSwiftError() || Flag.isPreallocated())
3289 return false;
3290
3291 // Can't handle import call optimization.
3292 if (Is64Bit &&
3293 MF->getFunction().getParent()->getModuleFlag("import-call-optimization"))
3294 return false;
3295
3296 SmallVector<MVT, 16> OutVTs;
3298 SmallVector<Register, 16> ArgRegs;
3299
3300 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
3301 // instruction. This is safe because it is common to all FastISel supported
3302 // calling conventions on x86.
3303 for (int i = 0, e = OutVals.size(); i != e; ++i) {
3304 Value *&Val = OutVals[i];
3305 ISD::ArgFlagsTy Flags = OutFlags[i];
3306 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
3307 if (CI->getBitWidth() < 32) {
3308 if (Flags.isSExt())
3309 Val = ConstantInt::get(CI->getContext(), CI->getValue().sext(32));
3310 else
3311 Val = ConstantInt::get(CI->getContext(), CI->getValue().zext(32));
3312 }
3313 }
3314
3315 // Passing bools around ends up doing a trunc to i1 and passing it.
3316 // Codegen this as an argument + "and 1".
3317 MVT VT;
3318 auto *TI = dyn_cast<TruncInst>(Val);
3319 Register ResultReg;
3320 if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
3321 (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
3322 Value *PrevVal = TI->getOperand(0);
3323 ResultReg = getRegForValue(PrevVal);
3324
3325 if (!ResultReg)
3326 return false;
3327
3328 if (!isTypeLegal(PrevVal->getType(), VT))
3329 return false;
3330
3331 ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1);
3332 } else {
3333 if (!isTypeLegal(Val->getType(), VT) ||
3334 (VT.isVector() && VT.getVectorElementType() == MVT::i1))
3335 return false;
3336 ResultReg = getRegForValue(Val);
3337 }
3338
3339 if (!ResultReg)
3340 return false;
3341
3342 ArgRegs.push_back(ResultReg);
3343 OutVTs.push_back(VT);
3344 ArgTys.push_back(Val->getType());
3345 }
3346
3347 // Analyze operands of the call, assigning locations to each operand.
3349 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3350
3351 // Allocate shadow area for Win64
3352 if (IsWin64)
3353 CCInfo.AllocateStack(32, Align(8));
3354
3355 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, ArgTys, CC_X86);
3356
3357 // Get a count of how many bytes are to be pushed on the stack.
3358 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3359
3360 // Issue CALLSEQ_START
3361 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
3362 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown))
3363 .addImm(NumBytes).addImm(0).addImm(0);
3364
3365 // Walk the register/memloc assignments, inserting copies/loads.
3366 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3367 for (const CCValAssign &VA : ArgLocs) {
3368 const Value *ArgVal = OutVals[VA.getValNo()];
3369 MVT ArgVT = OutVTs[VA.getValNo()];
3370
3371 if (ArgVT == MVT::x86mmx)
3372 return false;
3373
3374 Register ArgReg = ArgRegs[VA.getValNo()];
3375
3376 // Promote the value if needed.
3377 switch (VA.getLocInfo()) {
3378 case CCValAssign::Full: break;
3379 case CCValAssign::SExt: {
3380 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3381 "Unexpected extend");
3382
3383 if (ArgVT == MVT::i1)
3384 return false;
3385
3386 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3387 ArgVT, ArgReg);
3388 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
3389 ArgVT = VA.getLocVT();
3390 break;
3391 }
3392 case CCValAssign::ZExt: {
3393 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3394 "Unexpected extend");
3395
3396 // Handle zero-extension from i1 to i8, which is common.
3397 if (ArgVT == MVT::i1) {
3398 // Set the high bits to zero.
3399 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
3400 ArgVT = MVT::i8;
3401
3402 if (!ArgReg)
3403 return false;
3404 }
3405
3406 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3407 ArgVT, ArgReg);
3408 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
3409 ArgVT = VA.getLocVT();
3410 break;
3411 }
3412 case CCValAssign::AExt: {
3413 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3414 "Unexpected extend");
3415 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
3416 ArgVT, ArgReg);
3417 if (!Emitted)
3418 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3419 ArgVT, ArgReg);
3420 if (!Emitted)
3421 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3422 ArgVT, ArgReg);
3423
3424 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
3425 ArgVT = VA.getLocVT();
3426 break;
3427 }
3428 case CCValAssign::BCvt: {
3429 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg);
3430 assert(ArgReg && "Failed to emit a bitcast!");
3431 ArgVT = VA.getLocVT();
3432 break;
3433 }
3434 case CCValAssign::VExt:
3435 // VExt has not been implemented, so this should be impossible to reach
3436 // for now. However, fallback to Selection DAG isel once implemented.
3437 return false;
3441 case CCValAssign::FPExt:
3442 case CCValAssign::Trunc:
3443 llvm_unreachable("Unexpected loc info!");
3445 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
3446 // support this.
3447 return false;
3448 }
3449
3450 if (VA.isRegLoc()) {
3451 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3452 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
3453 OutRegs.push_back(VA.getLocReg());
3454 } else {
3455 assert(VA.isMemLoc() && "Unknown value location!");
3456
3457 // Don't emit stores for undef values.
3458 if (isa<UndefValue>(ArgVal))
3459 continue;
3460
3461 unsigned LocMemOffset = VA.getLocMemOffset();
3462 X86AddressMode AM;
3463 AM.Base.Reg = RegInfo->getStackRegister();
3464 AM.Disp = LocMemOffset;
3465 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
3466 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
3467 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3468 MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
3469 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
3470 if (Flags.isByVal()) {
3471 X86AddressMode SrcAM;
3472 SrcAM.Base.Reg = ArgReg;
3473 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
3474 return false;
3475 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3476 // If this is a really simple value, emit this with the Value* version
3477 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
3478 // as it can cause us to reevaluate the argument.
3479 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3480 return false;
3481 } else {
3482 if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
3483 return false;
3484 }
3485 }
3486 }
3487
3488 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3489 // GOT pointer.
3490 if (Subtarget->isPICStyleGOT()) {
3491 Register Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3493 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3494 }
3495
3496 if (Is64Bit && IsVarArg && !IsWin64) {
3497 // From AMD64 ABI document:
3498 // For calls that may call functions that use varargs or stdargs
3499 // (prototype-less calls or calls to functions containing ellipsis (...) in
3500 // the declaration) %al is used as hidden argument to specify the number
3501 // of SSE registers used. The contents of %al do not need to match exactly
3502 // the number of registers, but must be an ubound on the number of SSE
3503 // registers used and is in the range 0 - 8 inclusive.
3504
3505 // Count the number of XMM registers allocated.
3506 static const MCPhysReg XMMArgRegs[] = {
3507 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3508 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3509 };
3510 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3511 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3512 && "SSE registers cannot be used when SSE is disabled");
3513 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
3514 X86::AL).addImm(NumXMMRegs);
3515 }
3516
3517 // Materialize callee address in a register. FIXME: GV address can be
3518 // handled with a CALLpcrel32 instead.
3519 X86AddressMode CalleeAM;
3520 if (!X86SelectCallAddress(Callee, CalleeAM))
3521 return false;
3522
3523 Register CalleeOp;
3524 const GlobalValue *GV = nullptr;
3525 if (CalleeAM.GV != nullptr) {
3526 GV = CalleeAM.GV;
3527 } else if (CalleeAM.Base.Reg) {
3528 CalleeOp = CalleeAM.Base.Reg;
3529 } else
3530 return false;
3531
3532 // Issue the call.
3533 MachineInstrBuilder MIB;
3534 if (CalleeOp) {
3535 // Register-indirect call.
3536 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3537 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc))
3538 .addReg(CalleeOp);
3539 } else {
3540 // Direct call.
3541 assert(GV && "Not a direct call");
3542 // See if we need any target-specific flags on the GV operand.
3543 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3544 if (OpFlags == X86II::MO_PLT && !Is64Bit &&
3545 TM.getRelocationModel() == Reloc::Static && isa<Function>(GV) &&
3546 cast<Function>(GV)->isIntrinsic())
3547 OpFlags = X86II::MO_NO_FLAG;
3548
3549 // This will be a direct call, or an indirect call through memory for
3550 // NonLazyBind calls or dllimport calls.
3551 bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT ||
3552 OpFlags == X86II::MO_GOTPCREL ||
3553 OpFlags == X86II::MO_GOTPCREL_NORELAX ||
3554 OpFlags == X86II::MO_COFFSTUB;
3555 unsigned CallOpc = NeedLoad
3556 ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
3557 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
3558
3559 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc));
3560 if (NeedLoad)
3561 MIB.addReg(Is64Bit ? X86::RIP : X86::NoRegister).addImm(1).addReg(0);
3562 if (Symbol)
3563 MIB.addSym(Symbol, OpFlags);
3564 else
3565 MIB.addGlobalAddress(GV, 0, OpFlags);
3566 if (NeedLoad)
3567 MIB.addReg(0);
3568 }
3569
3570 // Add a register mask operand representing the call-preserved registers.
3571 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3572 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3573
3574 // Add an implicit use GOT pointer in EBX.
3575 if (Subtarget->isPICStyleGOT())
3576 MIB.addReg(X86::EBX, RegState::Implicit);
3577
3578 if (Is64Bit && IsVarArg && !IsWin64)
3579 MIB.addReg(X86::AL, RegState::Implicit);
3580
3581 // Add implicit physical register uses to the call.
3582 for (auto Reg : OutRegs)
3583 MIB.addReg(Reg, RegState::Implicit);
3584
3585 // Issue CALLSEQ_END
3586 unsigned NumBytesForCalleeToPop =
3587 X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
3588 TM.Options.GuaranteedTailCallOpt)
3589 ? NumBytes // Callee pops everything.
3590 : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB);
3591 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3592 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
3593 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
3594
3595 // Now handle call return values.
3597 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3598 CLI.RetTy->getContext());
3599 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
3600
3601 // Copy all of the result registers out of their specified physreg.
3602 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3603 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3604 CCValAssign &VA = RVLocs[i];
3605 EVT CopyVT = VA.getValVT();
3606 Register CopyReg = ResultReg + i;
3607 Register SrcReg = VA.getLocReg();
3608
3609 // If this is x86-64, and we disabled SSE, we can't return FP values
3610 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3611 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
3612 report_fatal_error("SSE register return with SSE disabled");
3613 }
3614
3615 // If we prefer to use the value in xmm registers, copy it out as f80 and
3616 // use a truncate to move it from fp stack reg to xmm reg.
3617 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
3618 isScalarFPTypeInSSEReg(VA.getValVT())) {
3619 CopyVT = MVT::f80;
3620 CopyReg = createResultReg(&X86::RFP80RegClass);
3621 }
3622
3623 // Copy out the result.
3624 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3625 TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg);
3626 InRegs.push_back(VA.getLocReg());
3627
3628 // Round the f80 to the right size, which also moves it to the appropriate
3629 // xmm register. This is accomplished by storing the f80 value in memory
3630 // and then loading it back.
3631 if (CopyVT != VA.getValVT()) {
3632 EVT ResVT = VA.getValVT();
3633 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3634 unsigned MemSize = ResVT.getSizeInBits()/8;
3635 int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false);
3636 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3637 TII.get(Opc)), FI)
3638 .addReg(CopyReg);
3639 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
3640 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3641 TII.get(Opc), ResultReg + i), FI);
3642 }
3643 }
3644
3645 CLI.ResultReg = ResultReg;
3646 CLI.NumResultRegs = RVLocs.size();
3647 CLI.Call = MIB;
3648
3649 // Add call site info for call graph section.
3650 if (TM.Options.EmitCallGraphSection && CB && CB->isIndirectCall()) {
3651 MachineFunction::CallSiteInfo CSInfo(*CB);
3652 MF->addCallSiteInfo(CLI.Call, std::move(CSInfo));
3653 }
3654
3655 return true;
3656}
3657
3658bool
3659X86FastISel::fastSelectInstruction(const Instruction *I) {
3660 switch (I->getOpcode()) {
3661 default: break;
3662 case Instruction::Load:
3663 return X86SelectLoad(I);
3664 case Instruction::Store:
3665 return X86SelectStore(I);
3666 case Instruction::Ret:
3667 return X86SelectRet(I);
3668 case Instruction::ICmp:
3669 case Instruction::FCmp:
3670 return X86SelectCmp(I);
3671 case Instruction::ZExt:
3672 return X86SelectZExt(I);
3673 case Instruction::SExt:
3674 return X86SelectSExt(I);
3675 case Instruction::CondBr:
3676 return X86SelectBranch(I);
3677 case Instruction::LShr:
3678 case Instruction::AShr:
3679 case Instruction::Shl:
3680 return X86SelectShift(I);
3681 case Instruction::SDiv:
3682 case Instruction::UDiv:
3683 case Instruction::SRem:
3684 case Instruction::URem:
3685 return X86SelectDivRem(I);
3686 case Instruction::Select:
3687 return X86SelectSelect(I);
3688 case Instruction::Trunc:
3689 return X86SelectTrunc(I);
3690 case Instruction::FPExt:
3691 return X86SelectFPExt(I);
3692 case Instruction::FPTrunc:
3693 return X86SelectFPTrunc(I);
3694 case Instruction::SIToFP:
3695 return X86SelectSIToFP(I);
3696 case Instruction::UIToFP:
3697 return X86SelectUIToFP(I);
3698 case Instruction::IntToPtr: // Deliberate fall-through.
3699 case Instruction::PtrToInt: {
3700 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3701 EVT DstVT = TLI.getValueType(DL, I->getType());
3702 if (DstVT.bitsGT(SrcVT))
3703 return X86SelectZExt(I);
3704 if (DstVT.bitsLT(SrcVT))
3705 return X86SelectTrunc(I);
3706 Register Reg = getRegForValue(I->getOperand(0));
3707 if (!Reg)
3708 return false;
3709 updateValueMap(I, Reg);
3710 return true;
3711 }
3712 case Instruction::BitCast:
3713 return X86SelectBitCast(I);
3714 }
3715
3716 return false;
3717}
3718
3719Register X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3720 if (VT > MVT::i64)
3721 return Register();
3722
3723 uint64_t Imm = CI->getZExtValue();
3724 if (Imm == 0) {
3725 Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3726 switch (VT.SimpleTy) {
3727 default: llvm_unreachable("Unexpected value type");
3728 case MVT::i1:
3729 case MVT::i8:
3730 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
3731 case MVT::i16:
3732 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
3733 case MVT::i32:
3734 return SrcReg;
3735 case MVT::i64: {
3736 Register ResultReg = createResultReg(&X86::GR64RegClass);
3737 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3738 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3739 .addReg(SrcReg)
3740 .addImm(X86::sub_32bit);
3741 return ResultReg;
3742 }
3743 }
3744 }
3745
3746 unsigned Opc = 0;
3747 switch (VT.SimpleTy) {
3748 default: llvm_unreachable("Unexpected value type");
3749 case MVT::i1:
3750 VT = MVT::i8;
3751 [[fallthrough]];
3752 case MVT::i8: Opc = X86::MOV8ri; break;
3753 case MVT::i16: Opc = X86::MOV16ri; break;
3754 case MVT::i32: Opc = X86::MOV32ri; break;
3755 case MVT::i64: {
3756 if (isUInt<32>(Imm))
3757 Opc = X86::MOV32ri64;
3758 else if (isInt<32>(Imm))
3759 Opc = X86::MOV64ri32;
3760 else
3761 Opc = X86::MOV64ri;
3762 break;
3763 }
3764 }
3765 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3766}
3767
3768Register X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3769 if (CFP->isNullValue())
3770 return fastMaterializeFloatZero(CFP);
3771
3772 // Can't handle alternate code models yet.
3773 CodeModel::Model CM = TM.getCodeModel();
3774 if (CM != CodeModel::Small && CM != CodeModel::Medium &&
3775 CM != CodeModel::Large)
3776 return Register();
3777
3778 // Get opcode and regclass of the output for the given load instruction.
3779 unsigned Opc = 0;
3780 bool HasSSE1 = Subtarget->hasSSE1();
3781 bool HasSSE2 = Subtarget->hasSSE2();
3782 bool HasAVX = Subtarget->hasAVX();
3783 bool HasAVX512 = Subtarget->hasAVX512();
3784 switch (VT.SimpleTy) {
3785 default:
3786 return Register();
3787 case MVT::f32:
3788 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
3789 : HasAVX ? X86::VMOVSSrm_alt
3790 : HasSSE1 ? X86::MOVSSrm_alt
3791 : X86::LD_Fp32m;
3792 break;
3793 case MVT::f64:
3794 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
3795 : HasAVX ? X86::VMOVSDrm_alt
3796 : HasSSE2 ? X86::MOVSDrm_alt
3797 : X86::LD_Fp64m;
3798 break;
3799 case MVT::f80:
3800 // No f80 support yet.
3801 return Register();
3802 }
3803
3804 // MachineConstantPool wants an explicit alignment.
3805 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
3806
3807 // x86-32 PIC requires a PIC base register for constant pools.
3808 Register PICBase;
3809 unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
3810 if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
3811 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3812 else if (OpFlag == X86II::MO_GOTOFF)
3813 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3814 else if (Subtarget->is64Bit() && TM.getCodeModel() != CodeModel::Large)
3815 PICBase = X86::RIP;
3816
3817 // Create the load from the constant pool.
3818 unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
3819 Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
3820
3821 // Large code model only applies to 64-bit mode.
3822 if (Subtarget->is64Bit() && CM == CodeModel::Large) {
3823 Register AddrReg = createResultReg(&X86::GR64RegClass);
3824 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
3825 AddrReg)
3826 .addConstantPoolIndex(CPI, 0, OpFlag);
3827 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3828 TII.get(Opc), ResultReg);
3829 addRegReg(MIB, AddrReg, false, X86::NoSubRegister, PICBase, false,
3830 X86::NoSubRegister);
3831 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3833 MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment);
3834 MIB->addMemOperand(*FuncInfo.MF, MMO);
3835 return ResultReg;
3836 }
3837
3838 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3839 TII.get(Opc), ResultReg),
3840 CPI, PICBase, OpFlag);
3841 return ResultReg;
3842}
3843
3844Register X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3845 // Can't handle large GlobalValues yet.
3846 if (TM.getCodeModel() != CodeModel::Small &&
3847 TM.getCodeModel() != CodeModel::Medium)
3848 return Register();
3849 if (TM.isLargeGlobalValue(GV))
3850 return Register();
3851
3852 // Materialize addresses with LEA/MOV instructions.
3853 X86AddressMode AM;
3854 if (X86SelectAddress(GV, AM)) {
3855 // If the expression is just a basereg, then we're done, otherwise we need
3856 // to emit an LEA.
3858 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3859 return AM.Base.Reg;
3860
3861 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3862 if (TM.getRelocationModel() == Reloc::Static &&
3863 TLI.getPointerTy(DL) == MVT::i64) {
3864 // The displacement code could be more than 32 bits away so we need to use
3865 // an instruction with a 64 bit immediate
3866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
3867 ResultReg)
3868 .addGlobalAddress(GV);
3869 } else {
3870 unsigned Opc =
3871 TLI.getPointerTy(DL) == MVT::i32
3872 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3873 : X86::LEA64r;
3874 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3875 TII.get(Opc), ResultReg), AM);
3876 }
3877 return ResultReg;
3878 }
3879 return Register();
3880}
3881
3882Register X86FastISel::fastMaterializeConstant(const Constant *C) {
3883 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
3884
3885 // Only handle simple types.
3886 if (!CEVT.isSimple())
3887 return Register();
3888 MVT VT = CEVT.getSimpleVT();
3889
3890 if (const auto *CI = dyn_cast<ConstantInt>(C))
3891 return X86MaterializeInt(CI, VT);
3892 if (const auto *CFP = dyn_cast<ConstantFP>(C))
3893 return X86MaterializeFP(CFP, VT);
3894 if (const auto *GV = dyn_cast<GlobalValue>(C))
3895 return X86MaterializeGV(GV, VT);
3896 if (isa<UndefValue>(C)) {
3897 unsigned Opc = 0;
3898 switch (VT.SimpleTy) {
3899 default:
3900 break;
3901 case MVT::f32:
3902 if (!Subtarget->hasSSE1())
3903 Opc = X86::LD_Fp032;
3904 break;
3905 case MVT::f64:
3906 if (!Subtarget->hasSSE2())
3907 Opc = X86::LD_Fp064;
3908 break;
3909 case MVT::f80:
3910 Opc = X86::LD_Fp080;
3911 break;
3912 }
3913
3914 if (Opc) {
3915 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3916 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
3917 ResultReg);
3918 return ResultReg;
3919 }
3920 }
3921
3922 return Register();
3923}
3924
3925Register X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3926 // Fail on dynamic allocas. At this point, getRegForValue has already
3927 // checked its CSE maps, so if we're here trying to handle a dynamic
3928 // alloca, we're not going to succeed. X86SelectAddress has a
3929 // check for dynamic allocas, because it's called directly from
3930 // various places, but targetMaterializeAlloca also needs a check
3931 // in order to avoid recursion between getRegForValue,
3932 // X86SelectAddrss, and targetMaterializeAlloca.
3933 if (!FuncInfo.StaticAllocaMap.count(C))
3934 return Register();
3935 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3936
3937 X86AddressMode AM;
3938 if (!X86SelectAddress(C, AM))
3939 return Register();
3940 unsigned Opc =
3941 TLI.getPointerTy(DL) == MVT::i32
3942 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3943 : X86::LEA64r;
3944 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3945 Register ResultReg = createResultReg(RC);
3946 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3947 TII.get(Opc), ResultReg), AM);
3948 return ResultReg;
3949}
3950
3951Register X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3952 MVT VT;
3953 if (!isTypeLegal(CF->getType(), VT))
3954 return Register();
3955
3956 // Get opcode and regclass for the given zero.
3957 bool HasSSE1 = Subtarget->hasSSE1();
3958 bool HasSSE2 = Subtarget->hasSSE2();
3959 bool HasAVX512 = Subtarget->hasAVX512();
3960 unsigned Opc = 0;
3961 switch (VT.SimpleTy) {
3962 default: return 0;
3963 case MVT::f16:
3964 Opc = HasAVX512 ? X86::AVX512_FsFLD0SH : X86::FsFLD0SH;
3965 break;
3966 case MVT::f32:
3967 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS
3968 : HasSSE1 ? X86::FsFLD0SS
3969 : X86::LD_Fp032;
3970 break;
3971 case MVT::f64:
3972 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD
3973 : HasSSE2 ? X86::FsFLD0SD
3974 : X86::LD_Fp064;
3975 break;
3976 case MVT::f80:
3977 // No f80 support yet.
3978 return Register();
3979 }
3980
3981 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3982 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
3983 return ResultReg;
3984}
3985
3986bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3987 const LoadInst *LI) {
3988 const Value *Ptr = LI->getPointerOperand();
3989 X86AddressMode AM;
3990 if (!X86SelectAddress(Ptr, AM))
3991 return false;
3992
3993 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3994
3995 unsigned Size = DL.getTypeAllocSize(LI->getType());
3996
3998 AM.getFullAddress(AddrOps);
3999
4000 MachineInstr *CopyMI = nullptr;
4001 MachineInstr *Result = XII.foldMemoryOperandImpl(
4002 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, LI->getAlign(),
4003 /*AllowCommute=*/true, CopyMI);
4004 if (!Result)
4005 return false;
4006
4007 // The index register could be in the wrong register class. Unfortunately,
4008 // foldMemoryOperandImpl could have commuted the instruction so its not enough
4009 // to just look at OpNo + the offset to the index reg. We actually need to
4010 // scan the instruction to find the index reg and see if its the correct reg
4011 // class.
4012 unsigned OperandNo = 0;
4013 for (MachineInstr::mop_iterator I = Result->operands_begin(),
4014 E = Result->operands_end(); I != E; ++I, ++OperandNo) {
4015 MachineOperand &MO = *I;
4016 if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
4017 continue;
4018 // Found the index reg, now try to rewrite it.
4019 Register IndexReg = constrainOperandRegClass(Result->getDesc(),
4020 MO.getReg(), OperandNo);
4021 if (IndexReg == MO.getReg())
4022 continue;
4023 MO.setReg(IndexReg);
4024 }
4025
4026 if (MI->isCall())
4027 FuncInfo.MF->moveAdditionalCallInfo(MI, Result);
4028 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
4029 Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
4031 removeDeadCode(I, std::next(I));
4032 return true;
4033}
4034
4035Register X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
4036 const TargetRegisterClass *RC,
4037 Register Op0, Register Op1,
4038 Register Op2, Register Op3) {
4039 const MCInstrDesc &II = TII.get(MachineInstOpcode);
4040
4041 Register ResultReg = createResultReg(RC);
4042 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
4043 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
4044 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
4045 Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3);
4046
4047 if (II.getNumDefs() >= 1)
4048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
4049 .addReg(Op0)
4050 .addReg(Op1)
4051 .addReg(Op2)
4052 .addReg(Op3);
4053 else {
4054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
4055 .addReg(Op0)
4056 .addReg(Op1)
4057 .addReg(Op2)
4058 .addReg(Op3);
4059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
4060 ResultReg)
4061 .addReg(II.implicit_defs()[0]);
4062 }
4063 return ResultReg;
4064}
4065
4066namespace llvm {
4068 const TargetLibraryInfo *libInfo,
4069 const LibcallLoweringInfo *libcallLowering) {
4070 return new X86FastISel(funcInfo, libInfo, libcallLowering);
4071}
4072}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the FastISel class.
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
unsigned OpIndex
#define GET_EGPR_IF_ENABLED(OPC)
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC)
If we have a comparison with RHS as the RHS of the comparison, return an opcode that works for the co...
static std::pair< unsigned, bool > getX86SSEConditionCode(CmpInst::Predicate Predicate)
static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget, CallingConv::ID CC, const CallBase *CB)
#define GET_SETCC
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget)
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
Value * RHS
Value * LHS
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1054
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1027
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool arg_empty() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition Constants.h:225
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Value * getAddress() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Module * getParent()
Get the module that this global value is contained inside of...
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
Tracks which library functions to use for a particular subtarget.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool usesWindowsCFI() const
Definition MCAsmInfo.h:664
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
void addCallSiteInfo(const MachineInstr *CallI, CallSiteInfo &&CallInfo)
Start tracking the arguments passed to the call CallI.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
unsigned getNumOperands() const
Retuns the total number of operands.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
MachineOperand * mop_iterator
iterator/begin/end - Iterate over all operands of a machine instruction.
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
Value * getLength() const
Value * getRawDest() const
unsigned getDestAddressSpace() const
bool isVolatile() const
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Definition Module.cpp:358
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void push_back(const T &Elt)
Align getAlign() const
Value * getValueOperand()
Value * getPointerOperand()
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:774
Provides information about what library functions are available for the current target.
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
Definition Triple.h:739
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
const Use * const_op_iterator
Definition User.h:255
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
Register getPtrSizedFrameRegister(const MachineFunction &MF) const
Register getStackRegister() const
bool hasSSE1() const
bool isTargetMCU() const
const Triple & getTargetTriple() const
bool hasAVX512() const
bool hasSSE2() const
bool hasAVX() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
Definition CallingConv.h:53
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition TypePool.h:28
@ User
could "use" a pointer
@ Emitted
Assigned address, still materializing.
Definition Core.h:794
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, Register Reg1, bool isKill1, unsigned SubReg1, Register Reg2, bool isKill2, unsigned SubReg2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
Op::Description Desc
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
Definition Analysis.cpp:72
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:292
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
void getFullAddress(SmallVectorImpl< MachineOperand > &MO)
const GlobalValue * GV
union llvm::X86AddressMode::BaseUnion Base
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType