LLVM 23.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/IntrinsicsX86.h"
42#include "llvm/Support/Debug.h"
46#include <cassert>
47#include <cstdint>
48#include <tuple>
49
50#define DEBUG_TYPE "X86-isel"
51
52using namespace llvm;
53
54namespace {
55
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
59
60class X86InstructionSelector : public InstructionSelector {
61public:
62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63 const X86RegisterBankInfo &RBI);
64
65 bool select(MachineInstr &I) override;
66 static const char *getName() { return DEBUG_TYPE; }
67
68private:
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75 Align Alignment) const;
76 // TODO: remove once p0<->i32/i64 matching is available
77 unsigned getPtrLoadStoreOp(const LLT &Ty, const RegisterBank &RB,
78 unsigned Opc) const;
79
81 MachineFunction &MF) const;
82 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
97 MachineFunction &MF) const;
98 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
99 MachineFunction &MF) const;
101 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
103 MachineFunction &MF);
105 MachineFunction &MF);
106 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
108 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
109 MachineFunction &MF) const;
110 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
111 MachineFunction &MF) const;
112 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
113 const Register DstReg,
114 const TargetRegisterClass *DstRC,
115 const Register SrcReg,
116 const TargetRegisterClass *SrcRC) const;
117 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
120 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
122 bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
123 MachineFunction &MF) const;
124
125 ComplexRendererFns selectAddr(MachineOperand &Root) const;
126
127 // emit insert subreg instruction and insert it before MachineInstr &I
128 bool emitInsertSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
129 MachineRegisterInfo &MRI, MachineFunction &MF) const;
130 // emit extract subreg instruction and insert it before MachineInstr &I
131 bool emitExtractSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
132 MachineRegisterInfo &MRI, MachineFunction &MF) const;
133
134 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
136 MachineRegisterInfo &MRI) const;
137
138 const X86TargetMachine &TM;
139 const X86Subtarget &STI;
140 const X86InstrInfo &TII;
141 const X86RegisterInfo &TRI;
142 const X86RegisterBankInfo &RBI;
143
144#define GET_GLOBALISEL_PREDICATES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_PREDICATES_DECL
147
148#define GET_GLOBALISEL_TEMPORARIES_DECL
149#include "X86GenGlobalISel.inc"
150#undef GET_GLOBALISEL_TEMPORARIES_DECL
151};
152
153} // end anonymous namespace
154
155#define GET_GLOBALISEL_IMPL
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_IMPL
158
159X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
160 const X86Subtarget &STI,
161 const X86RegisterBankInfo &RBI)
162 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
163 RBI(RBI),
165#include "X86GenGlobalISel.inc"
168#include "X86GenGlobalISel.inc"
170{
171}
172
173// FIXME: This should be target-independent, inferred from the types declared
174// for each class in the bank.
176X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
177 if (RB.getID() == X86::GPRRegBankID) {
178 if (Ty.getSizeInBits() <= 8)
179 return &X86::GR8RegClass;
180 if (Ty.getSizeInBits() == 16)
181 return &X86::GR16RegClass;
182 if (Ty.getSizeInBits() == 32)
183 return &X86::GR32RegClass;
184 if (Ty.getSizeInBits() == 64)
185 return &X86::GR64RegClass;
186 }
187 if (RB.getID() == X86::VECRRegBankID) {
188 if (Ty.getSizeInBits() == 16)
189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
190 if (Ty.getSizeInBits() == 32)
191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
192 if (Ty.getSizeInBits() == 64)
193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
194 if (Ty.getSizeInBits() == 128)
195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
196 if (Ty.getSizeInBits() == 256)
197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
198 if (Ty.getSizeInBits() == 512)
199 return &X86::VR512RegClass;
200 }
201
202 if (RB.getID() == X86::PSRRegBankID) {
203 if (Ty.getSizeInBits() == 80)
204 return &X86::RFP80RegClass;
205 if (Ty.getSizeInBits() == 64)
206 return &X86::RFP64RegClass;
207 if (Ty.getSizeInBits() == 32)
208 return &X86::RFP32RegClass;
209 }
210
211 llvm_unreachable("Unknown RegBank!");
212}
213
214const TargetRegisterClass *
215X86InstructionSelector::getRegClass(LLT Ty, Register Reg,
216 MachineRegisterInfo &MRI) const {
217 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
218 return getRegClass(Ty, RegBank);
219}
220
221static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
222 unsigned SubIdx = X86::NoSubRegister;
223 if (RC == &X86::GR32RegClass) {
224 SubIdx = X86::sub_32bit;
225 } else if (RC == &X86::GR16RegClass) {
226 SubIdx = X86::sub_16bit;
227 } else if (RC == &X86::GR8RegClass) {
228 SubIdx = X86::sub_8bit;
229 }
230
231 return SubIdx;
232}
233
235 assert(Reg.isPhysical());
236 if (X86::GR64RegClass.contains(Reg))
237 return &X86::GR64RegClass;
238 if (X86::GR32RegClass.contains(Reg))
239 return &X86::GR32RegClass;
240 if (X86::GR16RegClass.contains(Reg))
241 return &X86::GR16RegClass;
242 if (X86::GR8RegClass.contains(Reg))
243 return &X86::GR8RegClass;
244
245 llvm_unreachable("Unknown RegClass for PhysReg!");
246}
247
248// FIXME: We need some sort of API in RBI/TRI to allow generic code to
249// constrain operands of simple instructions given a TargetRegisterClass
250// and LLT
251bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
252 MachineRegisterInfo &MRI) const {
253 for (MachineOperand &MO : I.operands()) {
254 if (!MO.isReg())
255 continue;
256 Register Reg = MO.getReg();
257 if (!Reg)
258 continue;
259 if (Reg.isPhysical())
260 continue;
261 LLT Ty = MRI.getType(Reg);
262 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
263 const TargetRegisterClass *RC =
265 if (!RC) {
266 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
267 RC = getRegClass(Ty, RB);
268 if (!RC) {
270 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
271 break;
272 }
273 }
274 RBI.constrainGenericRegister(Reg, *RC, MRI);
275 }
276
277 return true;
278}
279
280// Set X86 Opcode and constrain DestReg.
281bool X86InstructionSelector::selectCopy(MachineInstr &I,
282 MachineRegisterInfo &MRI) const {
283 Register DstReg = I.getOperand(0).getReg();
284 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
285 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
286
287 Register SrcReg = I.getOperand(1).getReg();
288 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
289 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
290
291 if (DstReg.isPhysical()) {
292 assert(I.isCopy() && "Generic operators do not allow physical registers");
293
294 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
295 DstRegBank.getID() == X86::GPRRegBankID) {
296
297 const TargetRegisterClass *SrcRC =
298 getRegClass(MRI.getType(SrcReg), SrcRegBank);
299 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
300
301 if (SrcRC != DstRC) {
302 // This case can be generated by ABI lowering, performe anyext
303 Register ExtSrc = MRI.createVirtualRegister(DstRC);
304 BuildMI(*I.getParent(), I, I.getDebugLoc(),
305 TII.get(TargetOpcode::SUBREG_TO_REG))
306 .addDef(ExtSrc)
307 .addReg(SrcReg)
308 .addImm(getSubRegIndex(SrcRC));
309
310 I.getOperand(1).setReg(ExtSrc);
311 }
312 }
313
314 // Special case GPR16 -> XMM
315 if (SrcSize == 16 && SrcRegBank.getID() == X86::GPRRegBankID &&
316 (DstRegBank.getID() == X86::VECRRegBankID)) {
317
318 const DebugLoc &DL = I.getDebugLoc();
319
320 // Any extend GPR16 -> GPR32
321 Register ExtReg = MRI.createVirtualRegister(&X86::GR32RegClass);
322 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
323 ExtReg)
324 .addReg(SrcReg)
325 .addImm(X86::sub_16bit);
326
327 // Copy GR32 -> XMM
328 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
329 .addReg(ExtReg);
330
331 I.eraseFromParent();
332 }
333
334 // Special case XMM -> GR16
335 if (DstSize == 16 && DstRegBank.getID() == X86::GPRRegBankID &&
336 (SrcRegBank.getID() == X86::VECRRegBankID)) {
337
338 const DebugLoc &DL = I.getDebugLoc();
339
340 // Move XMM to GR32 register.
341 Register Temp32 = MRI.createVirtualRegister(&X86::GR32RegClass);
342 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Temp32)
343 .addReg(SrcReg);
344
345 // Extract the lower 16 bits
346 if (Register Dst32 = TRI.getMatchingSuperReg(DstReg, X86::sub_16bit,
347 &X86::GR32RegClass)) {
348 // Optimization for Physical Dst (e.g. AX): Copy to EAX directly.
349 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Dst32)
350 .addReg(Temp32);
351 } else {
352 // Handle if there is no super.
353 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
354 .addReg(Temp32, {}, X86::sub_16bit);
355 }
356
357 I.eraseFromParent();
358 }
359
360 return true;
361 }
362
363 assert((!SrcReg.isPhysical() || I.isCopy()) &&
364 "No phys reg on generic operators");
365 assert((DstSize == SrcSize ||
366 // Copies are a mean to setup initial types, the number of
367 // bits may not exactly match.
368 (SrcReg.isPhysical() &&
369 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
370 "Copy with different width?!");
371
372 const TargetRegisterClass *DstRC =
373 getRegClass(MRI.getType(DstReg), DstRegBank);
374
375 if (SrcRegBank.getID() == X86::GPRRegBankID &&
376 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
377 SrcReg.isPhysical()) {
378 // Change the physical register to performe truncate.
379
380 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
381
382 if (DstRC != SrcRC) {
383 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
384 I.getOperand(1).substPhysReg(SrcReg, TRI);
385 }
386 }
387
388 // No need to constrain SrcReg. It will get constrained when
389 // we hit another of its use or its defs.
390 // Copies do not have constraints.
391 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
392 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
393 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
394 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
395 << " operand\n");
396 return false;
397 }
398 }
399 I.setDesc(TII.get(X86::COPY));
400 return true;
401}
402
403bool X86InstructionSelector::select(MachineInstr &I) {
404 assert(I.getParent() && "Instruction should be in a basic block!");
405 assert(I.getParent()->getParent() && "Instruction should be in a function!");
406
407 MachineBasicBlock &MBB = *I.getParent();
408 MachineFunction &MF = *MBB.getParent();
409 MachineRegisterInfo &MRI = MF.getRegInfo();
410
411 unsigned Opcode = I.getOpcode();
412 if (!isPreISelGenericOpcode(Opcode) && !I.isPreISelOpcode()) {
413 // Certain non-generic instructions also need some special handling.
414
415 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
416 return false;
417
418 if (I.isCopy())
419 return selectCopy(I, MRI);
420
421 if (I.isDebugInstr())
422 return selectDebugInstr(I, MRI);
423
424 return true;
425 }
426
427 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
428 "Generic instruction has unexpected implicit operands\n");
429
430 if (selectImpl(I, *CoverageInfo))
431 return true;
432
433 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
434
435 // TODO: This should be implemented by tblgen.
436 switch (I.getOpcode()) {
437 default:
438 return false;
439 case TargetOpcode::G_STORE:
440 case TargetOpcode::G_LOAD:
441 return selectLoadStoreOp(I, MRI, MF);
442 case TargetOpcode::G_PTR_ADD:
443 case TargetOpcode::G_FRAME_INDEX:
444 return selectFrameIndexOrGep(I, MRI, MF);
445 case TargetOpcode::G_GLOBAL_VALUE:
446 return selectGlobalValue(I, MRI, MF);
447 case TargetOpcode::G_CONSTANT:
448 return selectConstant(I, MRI, MF);
449 case TargetOpcode::G_FCONSTANT:
450 return materializeFP(I, MRI, MF);
451 case TargetOpcode::G_PTRTOINT:
452 case TargetOpcode::G_TRUNC:
453 return selectTruncOrPtrToInt(I, MRI, MF);
454 case TargetOpcode::G_INTTOPTR:
455 case TargetOpcode::G_FREEZE:
456 return selectCopy(I, MRI);
457 case TargetOpcode::G_ZEXT:
458 return selectZext(I, MRI, MF);
459 case TargetOpcode::G_ANYEXT:
460 return selectAnyext(I, MRI, MF);
461 case TargetOpcode::G_ICMP:
462 return selectCmp(I, MRI, MF);
463 case TargetOpcode::G_FCMP:
464 return selectFCmp(I, MRI, MF);
465 case TargetOpcode::G_UADDE:
466 case TargetOpcode::G_UADDO:
467 case TargetOpcode::G_USUBE:
468 case TargetOpcode::G_USUBO:
469 return selectUAddSub(I, MRI, MF);
470 case TargetOpcode::G_UNMERGE_VALUES:
471 return selectUnmergeValues(I, MRI, MF);
472 case TargetOpcode::G_MERGE_VALUES:
473 case TargetOpcode::G_CONCAT_VECTORS:
474 return selectMergeValues(I, MRI, MF);
475 case TargetOpcode::G_EXTRACT:
476 return selectExtract(I, MRI, MF);
477 case TargetOpcode::G_INSERT:
478 return selectInsert(I, MRI, MF);
479 case TargetOpcode::G_BRCOND:
480 return selectCondBranch(I, MRI, MF);
481 case TargetOpcode::G_IMPLICIT_DEF:
482 case TargetOpcode::G_PHI:
483 return selectImplicitDefOrPHI(I, MRI);
484 case TargetOpcode::G_MUL:
485 case TargetOpcode::G_SMULH:
486 case TargetOpcode::G_UMULH:
487 case TargetOpcode::G_SDIV:
488 case TargetOpcode::G_UDIV:
489 case TargetOpcode::G_SREM:
490 case TargetOpcode::G_UREM:
491 return selectMulDivRem(I, MRI, MF);
492 case TargetOpcode::G_SELECT:
493 return selectSelect(I, MRI, MF);
494 }
495
496 return false;
497}
498
499unsigned X86InstructionSelector::getPtrLoadStoreOp(const LLT &Ty,
500 const RegisterBank &RB,
501 unsigned Opc) const {
502 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
503 "Only G_STORE and G_LOAD are expected for selection");
504 if (Ty.isPointer() && X86::GPRRegBankID == RB.getID()) {
505 bool IsLoad = (Opc == TargetOpcode::G_LOAD);
506 switch (Ty.getSizeInBits()) {
507 default:
508 break;
509 case 32:
510 return IsLoad ? X86::MOV32rm : X86::MOV32mr;
511 case 64:
512 return IsLoad ? X86::MOV64rm : X86::MOV64mr;
513 }
514 }
515 return Opc;
516}
517
518unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
519 const RegisterBank &RB,
520 unsigned Opc,
521 Align Alignment) const {
522 bool Isload = (Opc == TargetOpcode::G_LOAD);
523 bool HasAVX = STI.hasAVX();
524 bool HasAVX512 = STI.hasAVX512();
525 bool HasVLX = STI.hasVLX();
526
527 if (Ty == LLT::scalar(8)) {
528 if (X86::GPRRegBankID == RB.getID())
529 return Isload ? X86::MOV8rm : X86::MOV8mr;
530 } else if (Ty == LLT::scalar(16)) {
531 if (X86::GPRRegBankID == RB.getID())
532 return Isload ? X86::MOV16rm : X86::MOV16mr;
533 } else if (Ty == LLT::scalar(32)) {
534 if (X86::GPRRegBankID == RB.getID())
535 return Isload ? X86::MOV32rm : X86::MOV32mr;
536 if (X86::VECRRegBankID == RB.getID())
537 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
538 HasAVX ? X86::VMOVSSrm_alt :
539 X86::MOVSSrm_alt)
540 : (HasAVX512 ? X86::VMOVSSZmr :
541 HasAVX ? X86::VMOVSSmr :
542 X86::MOVSSmr);
543 if (X86::PSRRegBankID == RB.getID())
544 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
545 } else if (Ty == LLT::scalar(64)) {
546 if (X86::GPRRegBankID == RB.getID())
547 return Isload ? X86::MOV64rm : X86::MOV64mr;
548 if (X86::VECRRegBankID == RB.getID())
549 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
550 HasAVX ? X86::VMOVSDrm_alt :
551 X86::MOVSDrm_alt)
552 : (HasAVX512 ? X86::VMOVSDZmr :
553 HasAVX ? X86::VMOVSDmr :
554 X86::MOVSDmr);
555 if (X86::PSRRegBankID == RB.getID())
556 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
557 } else if (Ty == LLT::scalar(80)) {
558 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
559 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
560 if (Alignment >= Align(16))
561 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
562 : HasAVX512
563 ? X86::VMOVAPSZ128rm_NOVLX
564 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
565 : (HasVLX ? X86::VMOVAPSZ128mr
566 : HasAVX512
567 ? X86::VMOVAPSZ128mr_NOVLX
568 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
569 else
570 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
571 : HasAVX512
572 ? X86::VMOVUPSZ128rm_NOVLX
573 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
574 : (HasVLX ? X86::VMOVUPSZ128mr
575 : HasAVX512
576 ? X86::VMOVUPSZ128mr_NOVLX
577 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
578 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
579 if (Alignment >= Align(32))
580 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
581 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
582 : X86::VMOVAPSYrm)
583 : (HasVLX ? X86::VMOVAPSZ256mr
584 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
585 : X86::VMOVAPSYmr);
586 else
587 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
588 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
589 : X86::VMOVUPSYrm)
590 : (HasVLX ? X86::VMOVUPSZ256mr
591 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
592 : X86::VMOVUPSYmr);
593 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
594 if (Alignment >= Align(64))
595 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
596 else
597 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
598 }
599 return Opc;
600}
601
602// Fill in an address from the given instruction.
604 const MachineRegisterInfo &MRI,
605 const X86Subtarget &STI, X86AddressMode &AM) {
606 assert(I.getOperand(0).isReg() && "unsupported operand.");
607 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
608 "unsupported type.");
609
610 switch (I.getOpcode()) {
611 default:
612 break;
613 case TargetOpcode::G_FRAME_INDEX:
614 AM.Base.FrameIndex = I.getOperand(1).getIndex();
616 return true;
617 case TargetOpcode::G_PTR_ADD: {
618 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
619 int64_t Imm = *COff;
620 if (isInt<32>(Imm)) { // Check for displacement overflow.
621 AM.Disp = static_cast<int32_t>(Imm);
622 AM.Base.Reg = I.getOperand(1).getReg();
623 return true;
624 }
625 }
626 break;
627 }
628 case TargetOpcode::G_GLOBAL_VALUE: {
629 auto GV = I.getOperand(1).getGlobal();
630 if (GV->isThreadLocal()) {
631 return false; // TODO: we don't support TLS yet.
632 }
633 // Can't handle alternate code models yet.
634 if (TM.getCodeModel() != CodeModel::Small)
635 return false;
636 AM.GV = GV;
638
639 // TODO: The ABI requires an extra load. not supported yet.
641 return false;
642
643 // TODO: This reference is relative to the pic base. not supported yet.
645 return false;
646
647 if (STI.isPICStyleRIPRel()) {
648 // Use rip-relative addressing.
649 assert(AM.Base.Reg == 0 && AM.IndexReg == 0 &&
650 "RIP-relative addresses can't have additional register operands");
651 AM.Base.Reg = X86::RIP;
652 }
653 return true;
654 }
655 case TargetOpcode::G_CONSTANT_POOL: {
656 // TODO: Need a separate move for Large model
657 if (TM.getCodeModel() == CodeModel::Large)
658 return false;
659
660 AM.GVOpFlags = STI.classifyLocalReference(nullptr);
661 if (AM.GVOpFlags == X86II::MO_GOTOFF)
662 AM.Base.Reg = STI.getInstrInfo()->getGlobalBaseReg(I.getMF());
663 else if (STI.is64Bit())
664 AM.Base.Reg = X86::RIP;
665 AM.CP = true;
666 AM.Disp = I.getOperand(1).getIndex();
667 return true;
668 }
669 }
670 // Default behavior.
671 AM.Base.Reg = I.getOperand(0).getReg();
672 return true;
673}
674
675bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
676 MachineRegisterInfo &MRI,
677 MachineFunction &MF) const {
678 unsigned Opc = I.getOpcode();
679
680 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
681 "Only G_STORE and G_LOAD are expected for selection");
682
683 const Register DefReg = I.getOperand(0).getReg();
684 LLT Ty = MRI.getType(DefReg);
685 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
686
687 assert(I.hasOneMemOperand());
688 auto &MemOp = **I.memoperands_begin();
689 if (MemOp.isAtomic()) {
690 // Note: for unordered operations, we rely on the fact the appropriate MMO
691 // is already on the instruction we're mutating, and thus we don't need to
692 // make any changes. So long as we select an opcode which is capable of
693 // loading or storing the appropriate size atomically, the rest of the
694 // backend is required to respect the MMO state.
695 if (!MemOp.isUnordered()) {
696 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
697 return false;
698 }
699 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
700 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
701 return false;
702 }
703 }
704
705 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB, Opc);
706 if (NewOpc == Opc)
707 return false;
708
709 I.setDesc(TII.get(NewOpc));
710 MachineInstrBuilder MIB(MF, I);
711 MachineInstr *Ptr = MRI.getVRegDef(I.getOperand(1).getReg());
712
713 X86AddressMode AM;
714 if (!X86SelectAddress(*Ptr, TM, MRI, STI, AM))
715 return false;
716
717 if (Opc == TargetOpcode::G_LOAD) {
718 I.removeOperand(1);
719 addFullAddress(MIB, AM);
720 } else {
721 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
722 I.removeOperand(1);
723 I.removeOperand(0);
724 addFullAddress(MIB, AM).addUse(DefReg);
725 }
727 I.addImplicitDefUseOperands(MF);
728 return true;
729}
730
731static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
732 if (Ty == LLT::pointer(0, 64))
733 return X86::LEA64r;
734 else if (Ty == LLT::pointer(0, 32))
735 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
736 else
737 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
738}
739
740bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
741 MachineRegisterInfo &MRI,
742 MachineFunction &MF) const {
743 unsigned Opc = I.getOpcode();
744
745 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
746 "unexpected instruction");
747
748 const Register DefReg = I.getOperand(0).getReg();
749 LLT Ty = MRI.getType(DefReg);
750
751 // Use LEA to calculate frame index and GEP
752 unsigned NewOpc = getLeaOP(Ty, STI);
753 I.setDesc(TII.get(NewOpc));
754 MachineInstrBuilder MIB(MF, I);
755
756 if (Opc == TargetOpcode::G_FRAME_INDEX) {
757 addOffset(MIB, 0);
758 } else {
759 MachineOperand &InxOp = I.getOperand(2);
760 I.addOperand(InxOp); // set IndexReg
761 InxOp.ChangeToImmediate(1); // set Scale
762 MIB.addImm(0).addReg(0);
763 }
764
766 return true;
767}
768
769bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
770 MachineRegisterInfo &MRI,
771 MachineFunction &MF) const {
772 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
773 "unexpected instruction");
774
775 X86AddressMode AM;
776 if (!X86SelectAddress(I, TM, MRI, STI, AM))
777 return false;
778
779 const Register DefReg = I.getOperand(0).getReg();
780 LLT Ty = MRI.getType(DefReg);
781 unsigned NewOpc = getLeaOP(Ty, STI);
782
783 I.setDesc(TII.get(NewOpc));
784 MachineInstrBuilder MIB(MF, I);
785
786 I.removeOperand(1);
787 addFullAddress(MIB, AM);
788
790 return true;
791}
792
793bool X86InstructionSelector::selectConstant(MachineInstr &I,
794 MachineRegisterInfo &MRI,
795 MachineFunction &MF) const {
796 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
797 "unexpected instruction");
798
799 const Register DefReg = I.getOperand(0).getReg();
800 LLT Ty = MRI.getType(DefReg);
801
802 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
803 return false;
804
805 uint64_t Val = 0;
806 if (I.getOperand(1).isCImm()) {
807 Val = I.getOperand(1).getCImm()->getZExtValue();
808 I.getOperand(1).ChangeToImmediate(Val);
809 } else if (I.getOperand(1).isImm()) {
810 Val = I.getOperand(1).getImm();
811 } else
812 llvm_unreachable("Unsupported operand type.");
813
814 unsigned NewOpc;
815 switch (Ty.getSizeInBits()) {
816 case 8:
817 NewOpc = X86::MOV8ri;
818 break;
819 case 16:
820 NewOpc = X86::MOV16ri;
821 break;
822 case 32:
823 NewOpc = X86::MOV32ri;
824 break;
825 case 64:
826 if (isUInt<32>(Val))
827 NewOpc = X86::MOV32ri64;
828 else if (isInt<32>(Val))
829 NewOpc = X86::MOV64ri32;
830 else
831 NewOpc = X86::MOV64ri;
832 break;
833 default:
834 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
835 }
836
837 I.setDesc(TII.get(NewOpc));
839 return true;
840}
841
842// Helper function for selectTruncOrPtrToInt and selectAnyext.
843// Returns true if DstRC lives on a floating register class and
844// SrcRC lives on a 128-bit vector class.
845static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
846 const TargetRegisterClass *SrcRC) {
847 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
848 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
849 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
850}
851
852bool X86InstructionSelector::selectTurnIntoCOPY(
853 MachineInstr &I, MachineRegisterInfo &MRI, const Register DstReg,
854 const TargetRegisterClass *DstRC, const Register SrcReg,
855 const TargetRegisterClass *SrcRC) const {
856
857 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
858 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
859 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
860 << " operand\n");
861 return false;
862 }
863 I.setDesc(TII.get(X86::COPY));
864 return true;
865}
866
867bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
868 MachineRegisterInfo &MRI,
869 MachineFunction &MF) const {
870 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
871 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
872 "unexpected instruction");
873
874 const Register DstReg = I.getOperand(0).getReg();
875 const Register SrcReg = I.getOperand(1).getReg();
876
877 const LLT DstTy = MRI.getType(DstReg);
878 const LLT SrcTy = MRI.getType(SrcReg);
879
880 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
881 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
882
883 if (DstRB.getID() != SrcRB.getID()) {
884 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
885 << " input/output on different banks\n");
886 return false;
887 }
888
889 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
890 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
891
892 if (!DstRC || !SrcRC)
893 return false;
894
895 // If that's truncation of the value that lives on the vector class and goes
896 // into the floating class, just replace it with copy, as we are able to
897 // select it as a regular move.
898 if (canTurnIntoCOPY(DstRC, SrcRC))
899 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
900
901 if (DstRB.getID() != X86::GPRRegBankID)
902 return false;
903
904 unsigned SubIdx;
905 if (DstRC == SrcRC) {
906 // Nothing to be done
907 SubIdx = X86::NoSubRegister;
908 } else if (DstRC == &X86::GR32RegClass) {
909 SubIdx = X86::sub_32bit;
910 } else if (DstRC == &X86::GR16RegClass) {
911 SubIdx = X86::sub_16bit;
912 } else if (DstRC == &X86::GR8RegClass) {
913 SubIdx = X86::sub_8bit;
914 } else {
915 return false;
916 }
917
918 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
919
920 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
921 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
922 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
923 << "\n");
924 return false;
925 }
926
927 I.getOperand(1).setSubReg(SubIdx);
928
929 I.setDesc(TII.get(X86::COPY));
930 return true;
931}
932
933bool X86InstructionSelector::selectZext(MachineInstr &I,
934 MachineRegisterInfo &MRI,
935 MachineFunction &MF) const {
936 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
937
938 const Register DstReg = I.getOperand(0).getReg();
939 const Register SrcReg = I.getOperand(1).getReg();
940
941 const LLT DstTy = MRI.getType(DstReg);
942 const LLT SrcTy = MRI.getType(SrcReg);
943
944 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
945 "8=>16 Zext is handled by tablegen");
946 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
947 "8=>32 Zext is handled by tablegen");
948 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
949 "16=>32 Zext is handled by tablegen");
950 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
951 "8=>64 Zext is handled by tablegen");
952 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
953 "16=>64 Zext is handled by tablegen");
954 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
955 "32=>64 Zext is handled by tablegen");
956
957 if (SrcTy != LLT::scalar(1))
958 return false;
959
960 unsigned AndOpc;
961 if (DstTy == LLT::scalar(8))
962 AndOpc = X86::AND8ri;
963 else if (DstTy == LLT::scalar(16))
964 AndOpc = X86::AND16ri;
965 else if (DstTy == LLT::scalar(32))
966 AndOpc = X86::AND32ri;
967 else if (DstTy == LLT::scalar(64))
968 AndOpc = X86::AND64ri32;
969 else
970 return false;
971
972 Register DefReg = SrcReg;
973 if (DstTy != LLT::scalar(8)) {
974 Register ImpDefReg =
975 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
976 BuildMI(*I.getParent(), I, I.getDebugLoc(),
977 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
978
979 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
980 BuildMI(*I.getParent(), I, I.getDebugLoc(),
981 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
982 .addReg(ImpDefReg)
983 .addReg(SrcReg)
984 .addImm(X86::sub_8bit);
985 }
986
987 MachineInstr &AndInst =
988 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
989 .addReg(DefReg)
990 .addImm(1);
991
993
994 I.eraseFromParent();
995 return true;
996}
997
998bool X86InstructionSelector::selectAnyext(MachineInstr &I,
999 MachineRegisterInfo &MRI,
1000 MachineFunction &MF) const {
1001 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
1002
1003 const Register DstReg = I.getOperand(0).getReg();
1004 const Register SrcReg = I.getOperand(1).getReg();
1005
1006 const LLT DstTy = MRI.getType(DstReg);
1007 const LLT SrcTy = MRI.getType(SrcReg);
1008
1009 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1010 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1011
1012 assert(DstRB.getID() == SrcRB.getID() &&
1013 "G_ANYEXT input/output on different banks\n");
1014
1015 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1016 "G_ANYEXT incorrect operand size");
1017
1018 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1019 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
1020
1021 // If that's ANY_EXT of the value that lives on the floating class and goes
1022 // into the vector class, just replace it with copy, as we are able to select
1023 // it as a regular move.
1024 if (canTurnIntoCOPY(SrcRC, DstRC))
1025 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
1026
1027 if (DstRB.getID() != X86::GPRRegBankID)
1028 return false;
1029
1030 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1031 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1032 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1033 << " operand\n");
1034 return false;
1035 }
1036
1037 if (SrcRC == DstRC) {
1038 I.setDesc(TII.get(X86::COPY));
1039 return true;
1040 }
1041
1042 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1043 TII.get(TargetOpcode::SUBREG_TO_REG))
1044 .addDef(DstReg)
1045 .addReg(SrcReg)
1046 .addImm(getSubRegIndex(SrcRC));
1047
1048 I.eraseFromParent();
1049 return true;
1050}
1051
1052bool X86InstructionSelector::selectCmp(MachineInstr &I,
1053 MachineRegisterInfo &MRI,
1054 MachineFunction &MF) const {
1055 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
1056
1057 X86::CondCode CC;
1058 bool SwapArgs;
1059 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
1060 (CmpInst::Predicate)I.getOperand(1).getPredicate());
1061
1062 Register LHS = I.getOperand(2).getReg();
1063 Register RHS = I.getOperand(3).getReg();
1064
1065 if (SwapArgs)
1066 std::swap(LHS, RHS);
1067
1068 unsigned OpCmp;
1069 LLT Ty = MRI.getType(LHS);
1070
1071 switch (Ty.getSizeInBits()) {
1072 default:
1073 return false;
1074 case 8:
1075 OpCmp = X86::CMP8rr;
1076 break;
1077 case 16:
1078 OpCmp = X86::CMP16rr;
1079 break;
1080 case 32:
1081 OpCmp = X86::CMP32rr;
1082 break;
1083 case 64:
1084 OpCmp = X86::CMP64rr;
1085 break;
1086 }
1087
1088 MachineInstr &CmpInst =
1089 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1090 .addReg(LHS)
1091 .addReg(RHS);
1092
1093 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1094 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
1095
1098
1099 I.eraseFromParent();
1100 return true;
1101}
1102
1103bool X86InstructionSelector::selectFCmp(MachineInstr &I,
1104 MachineRegisterInfo &MRI,
1105 MachineFunction &MF) const {
1106 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
1107
1108 Register LhsReg = I.getOperand(2).getReg();
1109 Register RhsReg = I.getOperand(3).getReg();
1111 (CmpInst::Predicate)I.getOperand(1).getPredicate();
1112
1113 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1114 static const uint16_t SETFOpcTable[2][3] = {
1115 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1116 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1117 const uint16_t *SETFOpc = nullptr;
1118 switch (Predicate) {
1119 default:
1120 break;
1121 case CmpInst::FCMP_OEQ:
1122 SETFOpc = &SETFOpcTable[0][0];
1123 break;
1124 case CmpInst::FCMP_UNE:
1125 SETFOpc = &SETFOpcTable[1][0];
1126 break;
1127 }
1128
1129 assert((LhsReg.isVirtual() && RhsReg.isVirtual()) &&
1130 "Both arguments of FCMP need to be virtual!");
1131 auto *LhsBank = RBI.getRegBank(LhsReg, MRI, TRI);
1132 [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI);
1133 assert((LhsBank == RhsBank) &&
1134 "Both banks assigned to FCMP arguments need to be same!");
1135
1136 // Compute the opcode for the CMP instruction.
1137 unsigned OpCmp;
1138 LLT Ty = MRI.getType(LhsReg);
1139 switch (Ty.getSizeInBits()) {
1140 default:
1141 return false;
1142 case 32:
1143 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32
1144 : X86::UCOMISSrr;
1145 break;
1146 case 64:
1147 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64
1148 : X86::UCOMISDrr;
1149 break;
1150 case 80:
1151 OpCmp = X86::UCOM_FpIr80;
1152 break;
1153 }
1154
1155 Register ResultReg = I.getOperand(0).getReg();
1157 ResultReg,
1158 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1159 if (SETFOpc) {
1160 MachineInstr &CmpInst =
1161 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1162 .addReg(LhsReg)
1163 .addReg(RhsReg);
1164
1165 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1166 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1167 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1168 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1169 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1170 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1171 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1172 TII.get(SETFOpc[2]), ResultReg)
1173 .addReg(FlagReg1)
1174 .addReg(FlagReg2);
1179
1180 I.eraseFromParent();
1181 return true;
1182 }
1183
1184 X86::CondCode CC;
1185 bool SwapArgs;
1186 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1187 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1188
1189 if (SwapArgs)
1190 std::swap(LhsReg, RhsReg);
1191
1192 // Emit a compare of LHS/RHS.
1193 MachineInstr &CmpInst =
1194 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1195 .addReg(LhsReg)
1196 .addReg(RhsReg);
1197
1198 MachineInstr &Set =
1199 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1202 I.eraseFromParent();
1203 return true;
1204}
1205
1206bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1207 MachineRegisterInfo &MRI,
1208 MachineFunction &MF) const {
1209 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1210 I.getOpcode() == TargetOpcode::G_UADDO ||
1211 I.getOpcode() == TargetOpcode::G_USUBE ||
1212 I.getOpcode() == TargetOpcode::G_USUBO) &&
1213 "unexpected instruction");
1214
1215 auto &CarryMI = cast<GAddSubCarryOut>(I);
1216
1217 const Register DstReg = CarryMI.getDstReg();
1218 const Register CarryOutReg = CarryMI.getCarryOutReg();
1219 const Register Op0Reg = CarryMI.getLHSReg();
1220 const Register Op1Reg = CarryMI.getRHSReg();
1221 bool IsSub = CarryMI.isSub();
1222
1223 const LLT DstTy = MRI.getType(DstReg);
1224 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1225
1226 // TODO: Handle immediate argument variants?
1227 unsigned OpADC, OpADD, OpSBB, OpSUB;
1228 switch (DstTy.getSizeInBits()) {
1229 case 8:
1230 OpADC = X86::ADC8rr;
1231 OpADD = X86::ADD8rr;
1232 OpSBB = X86::SBB8rr;
1233 OpSUB = X86::SUB8rr;
1234 break;
1235 case 16:
1236 OpADC = X86::ADC16rr;
1237 OpADD = X86::ADD16rr;
1238 OpSBB = X86::SBB16rr;
1239 OpSUB = X86::SUB16rr;
1240 break;
1241 case 32:
1242 OpADC = X86::ADC32rr;
1243 OpADD = X86::ADD32rr;
1244 OpSBB = X86::SBB32rr;
1245 OpSUB = X86::SUB32rr;
1246 break;
1247 case 64:
1248 OpADC = X86::ADC64rr;
1249 OpADD = X86::ADD64rr;
1250 OpSBB = X86::SBB64rr;
1251 OpSUB = X86::SUB64rr;
1252 break;
1253 default:
1254 llvm_unreachable("selectUAddSub unsupported type.");
1255 }
1256
1257 const RegisterBank &CarryRB = *RBI.getRegBank(CarryOutReg, MRI, TRI);
1258 const TargetRegisterClass *CarryRC =
1259 getRegClass(MRI.getType(CarryOutReg), CarryRB);
1260
1261 unsigned Opcode = IsSub ? OpSUB : OpADD;
1262
1263 // G_UADDE/G_USUBE - find CarryIn def instruction.
1264 if (auto CarryInMI = dyn_cast<GAddSubCarryInOut>(&I)) {
1265 Register CarryInReg = CarryInMI->getCarryInReg();
1266 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1267 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1268 CarryInReg = Def->getOperand(1).getReg();
1269 Def = MRI.getVRegDef(CarryInReg);
1270 }
1271
1272 // TODO - handle more CF generating instructions
1273 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1274 Def->getOpcode() == TargetOpcode::G_UADDO ||
1275 Def->getOpcode() == TargetOpcode::G_USUBE ||
1276 Def->getOpcode() == TargetOpcode::G_USUBO) {
1277 // carry set by prev ADD/SUB.
1278
1279 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::CMP8ri))
1280 .addReg(CarryInReg)
1281 .addImm(1);
1282
1283 if (!RBI.constrainGenericRegister(CarryInReg, *CarryRC, MRI))
1284 return false;
1285
1286 Opcode = IsSub ? OpSBB : OpADC;
1287 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1288 // carry is constant, support only 0.
1289 if (*val != 0)
1290 return false;
1291
1292 Opcode = IsSub ? OpSUB : OpADD;
1293 } else
1294 return false;
1295 }
1296
1297 MachineInstr &Inst =
1298 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1299 .addReg(Op0Reg)
1300 .addReg(Op1Reg);
1301
1302 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), CarryOutReg)
1304
1306 if (!RBI.constrainGenericRegister(CarryOutReg, *CarryRC, MRI))
1307 return false;
1308
1309 I.eraseFromParent();
1310 return true;
1311}
1312
1313bool X86InstructionSelector::selectExtract(MachineInstr &I,
1314 MachineRegisterInfo &MRI,
1315 MachineFunction &MF) const {
1316 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1317 "unexpected instruction");
1318
1319 const Register DstReg = I.getOperand(0).getReg();
1320 const Register SrcReg = I.getOperand(1).getReg();
1321 int64_t Index = I.getOperand(2).getImm();
1322
1323 const LLT DstTy = MRI.getType(DstReg);
1324 const LLT SrcTy = MRI.getType(SrcReg);
1325
1326 // Meanwile handle vector type only.
1327 if (!DstTy.isVector())
1328 return false;
1329
1330 if (Index % DstTy.getSizeInBits() != 0)
1331 return false; // Not extract subvector.
1332
1333 if (Index == 0) {
1334 // Replace by extract subreg copy.
1335 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1336 return false;
1337
1338 I.eraseFromParent();
1339 return true;
1340 }
1341
1342 bool HasAVX = STI.hasAVX();
1343 bool HasAVX512 = STI.hasAVX512();
1344 bool HasVLX = STI.hasVLX();
1345
1346 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1347 if (HasVLX)
1348 I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
1349 else if (HasAVX)
1350 I.setDesc(TII.get(X86::VEXTRACTF128rri));
1351 else
1352 return false;
1353 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1354 if (DstTy.getSizeInBits() == 128)
1355 I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
1356 else if (DstTy.getSizeInBits() == 256)
1357 I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
1358 else
1359 return false;
1360 } else
1361 return false;
1362
1363 // Convert to X86 VEXTRACT immediate.
1364 Index = Index / DstTy.getSizeInBits();
1365 I.getOperand(2).setImm(Index);
1366
1368 return true;
1369}
1370
1371bool X86InstructionSelector::emitExtractSubreg(Register DstReg, Register SrcReg,
1372 MachineInstr &I,
1373 MachineRegisterInfo &MRI,
1374 MachineFunction &MF) const {
1375 const LLT DstTy = MRI.getType(DstReg);
1376 const LLT SrcTy = MRI.getType(SrcReg);
1377 unsigned SubIdx = X86::NoSubRegister;
1378
1379 if (!DstTy.isVector() || !SrcTy.isVector())
1380 return false;
1381
1382 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1383 "Incorrect Src/Dst register size");
1384
1385 if (DstTy.getSizeInBits() == 128)
1386 SubIdx = X86::sub_xmm;
1387 else if (DstTy.getSizeInBits() == 256)
1388 SubIdx = X86::sub_ymm;
1389 else
1390 return false;
1391
1392 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1393 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1394
1395 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1396
1397 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1398 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1399 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1400 return false;
1401 }
1402
1403 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1404 .addReg(SrcReg, {}, SubIdx);
1405
1406 return true;
1407}
1408
1409bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,
1410 MachineInstr &I,
1411 MachineRegisterInfo &MRI,
1412 MachineFunction &MF) const {
1413 const LLT DstTy = MRI.getType(DstReg);
1414 const LLT SrcTy = MRI.getType(SrcReg);
1415 unsigned SubIdx = X86::NoSubRegister;
1416
1417 // TODO: support scalar types
1418 if (!DstTy.isVector() || !SrcTy.isVector())
1419 return false;
1420
1421 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1422 "Incorrect Src/Dst register size");
1423
1424 if (SrcTy.getSizeInBits() == 128)
1425 SubIdx = X86::sub_xmm;
1426 else if (SrcTy.getSizeInBits() == 256)
1427 SubIdx = X86::sub_ymm;
1428 else
1429 return false;
1430
1431 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1432 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1433
1434 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1435 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1436 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1437 return false;
1438 }
1439
1440 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1441 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1442 .addReg(SrcReg);
1443
1444 return true;
1445}
1446
1447bool X86InstructionSelector::selectInsert(MachineInstr &I,
1448 MachineRegisterInfo &MRI,
1449 MachineFunction &MF) const {
1450 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1451
1452 const Register DstReg = I.getOperand(0).getReg();
1453 const Register SrcReg = I.getOperand(1).getReg();
1454 const Register InsertReg = I.getOperand(2).getReg();
1455 int64_t Index = I.getOperand(3).getImm();
1456
1457 const LLT DstTy = MRI.getType(DstReg);
1458 const LLT InsertRegTy = MRI.getType(InsertReg);
1459
1460 // Meanwile handle vector type only.
1461 if (!DstTy.isVector())
1462 return false;
1463
1464 if (Index % InsertRegTy.getSizeInBits() != 0)
1465 return false; // Not insert subvector.
1466
1467 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1468 // Replace by subreg copy.
1469 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1470 return false;
1471
1472 I.eraseFromParent();
1473 return true;
1474 }
1475
1476 bool HasAVX = STI.hasAVX();
1477 bool HasAVX512 = STI.hasAVX512();
1478 bool HasVLX = STI.hasVLX();
1479
1480 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1481 if (HasVLX)
1482 I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
1483 else if (HasAVX)
1484 I.setDesc(TII.get(X86::VINSERTF128rri));
1485 else
1486 return false;
1487 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1488 if (InsertRegTy.getSizeInBits() == 128)
1489 I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
1490 else if (InsertRegTy.getSizeInBits() == 256)
1491 I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
1492 else
1493 return false;
1494 } else
1495 return false;
1496
1497 // Convert to X86 VINSERT immediate.
1498 Index = Index / InsertRegTy.getSizeInBits();
1499
1500 I.getOperand(3).setImm(Index);
1501
1503 return true;
1504}
1505
1506bool X86InstructionSelector::selectUnmergeValues(
1507 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1508 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1509 "unexpected instruction");
1510
1511 // Split to extracts.
1512 unsigned NumDefs = I.getNumOperands() - 1;
1513 Register SrcReg = I.getOperand(NumDefs).getReg();
1514 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1515
1516 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1517 MachineInstr &ExtrInst =
1518 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1519 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1520 .addReg(SrcReg)
1521 .addImm(Idx * DefSize);
1522
1523 if (!select(ExtrInst))
1524 return false;
1525 }
1526
1527 I.eraseFromParent();
1528 return true;
1529}
1530
1531bool X86InstructionSelector::selectMergeValues(
1532 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1533 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1534 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1535 "unexpected instruction");
1536
1537 // Split to inserts.
1538 Register DstReg = I.getOperand(0).getReg();
1539 Register SrcReg0 = I.getOperand(1).getReg();
1540
1541 const LLT DstTy = MRI.getType(DstReg);
1542 const LLT SrcTy = MRI.getType(SrcReg0);
1543 unsigned SrcSize = SrcTy.getSizeInBits();
1544
1545 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1546
1547 // For the first src use insertSubReg.
1548 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1549 MRI.setRegBank(DefReg, RegBank);
1550 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1551 return false;
1552
1553 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1554 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1555 MRI.setRegBank(Tmp, RegBank);
1556
1557 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1558 TII.get(TargetOpcode::G_INSERT), Tmp)
1559 .addReg(DefReg)
1560 .addReg(I.getOperand(Idx).getReg())
1561 .addImm((Idx - 1) * SrcSize);
1562
1563 DefReg = Tmp;
1564
1565 if (!select(InsertInst))
1566 return false;
1567 }
1568
1569 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1570 TII.get(TargetOpcode::COPY), DstReg)
1571 .addReg(DefReg);
1572
1573 if (!select(CopyInst))
1574 return false;
1575
1576 I.eraseFromParent();
1577 return true;
1578}
1579
1580bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1581 MachineRegisterInfo &MRI,
1582 MachineFunction &MF) const {
1583 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1584
1585 const Register CondReg = I.getOperand(0).getReg();
1586 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1587
1588 MachineInstr &TestInst =
1589 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1590 .addReg(CondReg)
1591 .addImm(1);
1592 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1593 .addMBB(DestMBB).addImm(X86::COND_NE);
1594
1595 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1596
1597 I.eraseFromParent();
1598 return true;
1599}
1600
1601bool X86InstructionSelector::materializeFP(MachineInstr &I,
1602 MachineRegisterInfo &MRI,
1603 MachineFunction &MF) const {
1604 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1605 "unexpected instruction");
1606
1607 // Can't handle alternate code models yet.
1609 if (CM != CodeModel::Small && CM != CodeModel::Large)
1610 return false;
1611
1612 const Register DstReg = I.getOperand(0).getReg();
1613 const LLT DstTy = MRI.getType(DstReg);
1614 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1615 // Create the load from the constant pool.
1616 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1617 const auto &DL = MF.getDataLayout();
1618 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
1619 const DebugLoc &DbgLoc = I.getDebugLoc();
1620
1621 unsigned Opc =
1622 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1623
1624 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1625 MachineInstr *LoadInst = nullptr;
1626 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1627
1628 if (CM == CodeModel::Large && STI.is64Bit()) {
1629 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1630 // they cannot be folded into immediate fields.
1631
1632 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1633 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1634 .addConstantPoolIndex(CPI, 0, OpFlag);
1635
1636 MachineMemOperand *MMO = MF.getMachineMemOperand(
1638 LLT::pointer(0, DL.getPointerSizeInBits()), Alignment);
1639
1640 LoadInst =
1641 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1642 AddrReg)
1643 .addMemOperand(MMO);
1644
1645 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1646 // Handle the case when globals fit in our immediate field.
1647 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1648
1649 // x86-32 PIC requires a PIC base register for constant pools.
1650 unsigned PICBase = 0;
1651 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1652 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1653 // In DAGISEL the code that initialize it generated by the CGBR pass.
1654 return false; // TODO support the mode.
1655 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1656 PICBase = X86::RIP;
1657
1658 LoadInst = addConstantPoolReference(
1659 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1660 OpFlag);
1661 } else
1662 return false;
1663
1664 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1665 I.eraseFromParent();
1666 return true;
1667}
1668
1669bool X86InstructionSelector::selectImplicitDefOrPHI(
1670 MachineInstr &I, MachineRegisterInfo &MRI) const {
1671 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1672 I.getOpcode() == TargetOpcode::G_PHI) &&
1673 "unexpected instruction");
1674
1675 Register DstReg = I.getOperand(0).getReg();
1676
1677 if (!MRI.getRegClassOrNull(DstReg)) {
1678 const LLT DstTy = MRI.getType(DstReg);
1679 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1680
1681 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1682 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1683 << " operand\n");
1684 return false;
1685 }
1686 }
1687
1688 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1689 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1690 else
1691 I.setDesc(TII.get(X86::PHI));
1692
1693 return true;
1694}
1695
1696bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1697 MachineRegisterInfo &MRI,
1698 MachineFunction &MF) const {
1699 // The implementation of this function is adapted from X86FastISel.
1700 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1701 I.getOpcode() == TargetOpcode::G_SMULH ||
1702 I.getOpcode() == TargetOpcode::G_UMULH ||
1703 I.getOpcode() == TargetOpcode::G_SDIV ||
1704 I.getOpcode() == TargetOpcode::G_SREM ||
1705 I.getOpcode() == TargetOpcode::G_UDIV ||
1706 I.getOpcode() == TargetOpcode::G_UREM) &&
1707 "unexpected instruction");
1708
1709 const Register DstReg = I.getOperand(0).getReg();
1710 const Register Op1Reg = I.getOperand(1).getReg();
1711 const Register Op2Reg = I.getOperand(2).getReg();
1712
1713 const LLT RegTy = MRI.getType(DstReg);
1714 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1715 "Arguments and return value types must match");
1716
1717 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1718 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1719 return false;
1720
1721 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1722 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1723 const static bool S = true; // IsSigned
1724 const static bool U = false; // !IsSigned
1725 const static unsigned Copy = TargetOpcode::COPY;
1726
1727 // For the X86 IDIV instruction, in most cases the dividend
1728 // (numerator) must be in a specific register pair highreg:lowreg,
1729 // producing the quotient in lowreg and the remainder in highreg.
1730 // For most data types, to set up the instruction, the dividend is
1731 // copied into lowreg, and lowreg is sign-extended into highreg. The
1732 // exception is i8, where the dividend is defined as a single register rather
1733 // than a register pair, and we therefore directly sign-extend the dividend
1734 // into lowreg, instead of copying, and ignore the highreg.
1735 const static struct MulDivRemEntry {
1736 // The following portion depends only on the data type.
1737 unsigned SizeInBits;
1738 unsigned LowInReg; // low part of the register pair
1739 unsigned HighInReg; // high part of the register pair
1740 // The following portion depends on both the data type and the operation.
1741 struct MulDivRemResult {
1742 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1743 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1744 // highreg, or copying a zero into highreg.
1745 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1746 // zero/sign-extending into lowreg for i8.
1747 unsigned ResultReg; // Register containing the desired result.
1748 bool IsOpSigned; // Whether to use signed or unsigned form.
1749 } ResultTable[NumOps];
1750 } OpTable[NumTypes] = {
1751 {8,
1752 X86::AX,
1753 0,
1754 {
1755 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1756 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1757 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1758 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1759 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1760 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1761 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1762 }}, // i8
1763 {16,
1764 X86::AX,
1765 X86::DX,
1766 {
1767 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1768 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1769 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1770 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1771 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1772 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1773 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1774 }}, // i16
1775 {32,
1776 X86::EAX,
1777 X86::EDX,
1778 {
1779 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1780 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1781 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1782 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1783 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1784 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1785 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1786 }}, // i32
1787 {64,
1788 X86::RAX,
1789 X86::RDX,
1790 {
1791 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1792 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1793 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1794 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1795 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1796 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1797 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1798 }}, // i64
1799 };
1800
1801 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1802 return El.SizeInBits == RegTy.getSizeInBits();
1803 });
1804 if (OpEntryIt == std::end(OpTable))
1805 return false;
1806
1807 unsigned OpIndex;
1808 switch (I.getOpcode()) {
1809 default:
1810 llvm_unreachable("Unexpected mul/div/rem opcode");
1811 case TargetOpcode::G_SDIV:
1812 OpIndex = 0;
1813 break;
1814 case TargetOpcode::G_SREM:
1815 OpIndex = 1;
1816 break;
1817 case TargetOpcode::G_UDIV:
1818 OpIndex = 2;
1819 break;
1820 case TargetOpcode::G_UREM:
1821 OpIndex = 3;
1822 break;
1823 case TargetOpcode::G_MUL:
1824 OpIndex = 4;
1825 break;
1826 case TargetOpcode::G_SMULH:
1827 OpIndex = 5;
1828 break;
1829 case TargetOpcode::G_UMULH:
1830 OpIndex = 6;
1831 break;
1832 }
1833
1834 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1835 const MulDivRemEntry::MulDivRemResult &OpEntry =
1836 TypeEntry.ResultTable[OpIndex];
1837
1838 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1839 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1840 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1841 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1842 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1843 << " operand\n");
1844 return false;
1845 }
1846
1847 // Move op1 into low-order input register.
1848 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1849 TypeEntry.LowInReg)
1850 .addReg(Op1Reg);
1851
1852 // Zero-extend or sign-extend into high-order input register.
1853 if (OpEntry.OpSignExtend) {
1854 if (OpEntry.IsOpSigned)
1855 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1856 TII.get(OpEntry.OpSignExtend));
1857 else {
1858 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1859 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1860 Zero32);
1861
1862 // Copy the zero into the appropriate sub/super/identical physical
1863 // register. Unfortunately the operations needed are not uniform enough
1864 // to fit neatly into the table above.
1865 if (RegTy.getSizeInBits() == 16) {
1866 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1867 TypeEntry.HighInReg)
1868 .addReg(Zero32, {}, X86::sub_16bit);
1869 } else if (RegTy.getSizeInBits() == 32) {
1870 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1871 TypeEntry.HighInReg)
1872 .addReg(Zero32);
1873 } else if (RegTy.getSizeInBits() == 64) {
1874 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1875 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1876 .addReg(Zero32)
1877 .addImm(X86::sub_32bit);
1878 }
1879 }
1880 }
1881
1882 // Generate the DIV/IDIV/MUL/IMUL instruction.
1883 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1884 .addReg(Op2Reg);
1885
1886 // For i8 remainder, we can't reference ah directly, as we'll end
1887 // up with bogus copies like %r9b = COPY %ah. Reference ax
1888 // instead to prevent ah references in a rex instruction.
1889 //
1890 // The current assumption of the fast register allocator is that isel
1891 // won't generate explicit references to the GR8_NOREX registers. If
1892 // the allocator and/or the backend get enhanced to be more robust in
1893 // that regard, this can be, and should be, removed.
1894 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1895 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1896 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1897 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1898 .addReg(X86::AX);
1899
1900 // Shift AX right by 8 bits instead of using AH.
1901 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1902 ResultSuperReg)
1903 .addReg(SourceSuperReg)
1904 .addImm(8);
1905
1906 // Now reference the 8-bit subreg of the result.
1907 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1908 DstReg)
1909 .addReg(ResultSuperReg, {}, X86::sub_8bit);
1910 } else {
1911 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1912 DstReg)
1913 .addReg(OpEntry.ResultReg);
1914 }
1915 I.eraseFromParent();
1916
1917 return true;
1918}
1919
1920bool X86InstructionSelector::selectSelect(MachineInstr &I,
1921 MachineRegisterInfo &MRI,
1922 MachineFunction &MF) const {
1923 GSelect &Sel = cast<GSelect>(I);
1924 Register DstReg = Sel.getReg(0);
1925 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::TEST32rr))
1926 .addReg(Sel.getCondReg())
1927 .addReg(Sel.getCondReg());
1928
1929 unsigned OpCmp;
1930 LLT Ty = MRI.getType(DstReg);
1931 if (Ty.getSizeInBits() == 80) {
1932 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::CMOVE_Fp80),
1933 DstReg)
1934 .addReg(Sel.getTrueReg())
1935 .addReg(Sel.getFalseReg());
1936 } else {
1937 switch (Ty.getSizeInBits()) {
1938 default:
1939 return false;
1940 case 8:
1941 OpCmp = X86::CMOV_GR8;
1942 break;
1943 case 16:
1944 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1945 break;
1946 case 32:
1947 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1948 break;
1949 case 64:
1950 assert(STI.is64Bit() && STI.canUseCMOV());
1951 OpCmp = X86::CMOV64rr;
1952 break;
1953 }
1954 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
1955 .addReg(Sel.getTrueReg())
1956 .addReg(Sel.getFalseReg())
1958 }
1959 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
1960 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1961 LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
1962 return false;
1963 }
1964
1965 Sel.eraseFromParent();
1966 return true;
1967}
1968
1969InstructionSelector::ComplexRendererFns
1970X86InstructionSelector::selectAddr(MachineOperand &Root) const {
1971 MachineInstr *MI = Root.getParent();
1972 MachineIRBuilder MIRBuilder(*MI);
1973
1974 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1975 MachineInstr *Ptr = MRI.getVRegDef(Root.getReg());
1976 X86AddressMode AM;
1977 X86SelectAddress(*Ptr, TM, MRI, STI, AM);
1978
1979 if (AM.IndexReg)
1980 return std::nullopt;
1981
1982 return {// Base
1983 {[=](MachineInstrBuilder &MIB) {
1985 MIB.addUse(AM.Base.Reg);
1986 else {
1988 "Unknown type of address base");
1989 MIB.addFrameIndex(AM.Base.FrameIndex);
1990 }
1991 },
1992 // Scale
1993 [=](MachineInstrBuilder &MIB) { MIB.addImm(AM.Scale); },
1994 // Index
1995 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); },
1996 // Disp
1997 [=](MachineInstrBuilder &MIB) {
1998 if (AM.GV)
1999 MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
2000 else if (AM.CP)
2001 MIB.addConstantPoolIndex(AM.Disp, 0, AM.GVOpFlags);
2002 else
2003 MIB.addImm(AM.Disp);
2004 },
2005 // Segment
2006 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); }}};
2007}
2008
2009InstructionSelector *
2011 const X86Subtarget &Subtarget,
2012 const X86RegisterBankInfo &RBI) {
2013 return new X86InstructionSelector(TM, Subtarget, RBI);
2014}
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
#define LLVM_DEBUG(...)
Definition Debug.h:114
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
CodeModel::Model getCodeModel() const
Returns the code model.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
This class provides the information for the target register banks.
bool canUseCMOV() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
bool isPICStyleRIPRel() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
bool hasAVX() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition TypePool.h:27
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:293
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:313
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
const GlobalValue * GV
union llvm::X86AddressMode::BaseUnion Base
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType