LLVM 23.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/IntrinsicsX86.h"
42#include "llvm/Support/Debug.h"
46#include <cassert>
47#include <cstdint>
48#include <tuple>
49
50#define DEBUG_TYPE "X86-isel"
51
52using namespace llvm;
53
54namespace {
55
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
59
60class X86InstructionSelector : public InstructionSelector {
61public:
62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63 const X86RegisterBankInfo &RBI);
64
65 bool select(MachineInstr &I) override;
66 static const char *getName() { return DEBUG_TYPE; }
67
68private:
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75 Align Alignment) const;
76 // TODO: remove once p0<->i32/i64 matching is available
77 unsigned getPtrLoadStoreOp(const LLT &Ty, const RegisterBank &RB,
78 unsigned Opc) const;
79
81 MachineFunction &MF) const;
82 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
97 MachineFunction &MF) const;
98 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
99 MachineFunction &MF) const;
103 MachineFunction &MF);
105 MachineFunction &MF);
106 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
108 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
109 MachineFunction &MF) const;
110 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
111 MachineFunction &MF) const;
112 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
113 const Register DstReg,
114 const TargetRegisterClass *DstRC,
115 const Register SrcReg,
116 const TargetRegisterClass *SrcRC) const;
117 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
120 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
122 bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
123 MachineFunction &MF) const;
124
125 ComplexRendererFns selectAddr(MachineOperand &Root) const;
126
127 // emit insert subreg instruction and insert it before MachineInstr &I
128 bool emitInsertSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
130 // emit extract subreg instruction and insert it before MachineInstr &I
131 bool emitExtractSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
133
134 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
136 MachineRegisterInfo &MRI) const;
137
138 const X86TargetMachine &TM;
139 const X86Subtarget &STI;
140 const X86InstrInfo &TII;
141 const X86RegisterInfo &TRI;
142 const X86RegisterBankInfo &RBI;
143
144#define GET_GLOBALISEL_PREDICATES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_PREDICATES_DECL
147
148#define GET_GLOBALISEL_TEMPORARIES_DECL
149#include "X86GenGlobalISel.inc"
150#undef GET_GLOBALISEL_TEMPORARIES_DECL
151};
152
153} // end anonymous namespace
154
155#define GET_GLOBALISEL_IMPL
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_IMPL
158
159X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
160 const X86Subtarget &STI,
161 const X86RegisterBankInfo &RBI)
162 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
163 RBI(RBI),
165#include "X86GenGlobalISel.inc"
168#include "X86GenGlobalISel.inc"
170{
171}
172
173// FIXME: This should be target-independent, inferred from the types declared
174// for each class in the bank.
176X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
177 if (RB.getID() == X86::GPRRegBankID) {
178 if (Ty.getSizeInBits() <= 8)
179 return &X86::GR8RegClass;
180 if (Ty.getSizeInBits() == 16)
181 return &X86::GR16RegClass;
182 if (Ty.getSizeInBits() == 32)
183 return &X86::GR32RegClass;
184 if (Ty.getSizeInBits() == 64)
185 return &X86::GR64RegClass;
186 }
187 if (RB.getID() == X86::VECRRegBankID) {
188 if (Ty.getSizeInBits() == 16)
189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
190 if (Ty.getSizeInBits() == 32)
191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
192 if (Ty.getSizeInBits() == 64)
193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
194 if (Ty.getSizeInBits() == 128)
195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
196 if (Ty.getSizeInBits() == 256)
197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
198 if (Ty.getSizeInBits() == 512)
199 return &X86::VR512RegClass;
200 }
201
202 if (RB.getID() == X86::PSRRegBankID) {
203 if (Ty.getSizeInBits() == 80)
204 return &X86::RFP80RegClass;
205 if (Ty.getSizeInBits() == 64)
206 return &X86::RFP64RegClass;
207 if (Ty.getSizeInBits() == 32)
208 return &X86::RFP32RegClass;
209 }
210
211 llvm_unreachable("Unknown RegBank!");
212}
213
214const TargetRegisterClass *
215X86InstructionSelector::getRegClass(LLT Ty, Register Reg,
216 MachineRegisterInfo &MRI) const {
217 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
218 return getRegClass(Ty, RegBank);
219}
220
221static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
222 unsigned SubIdx = X86::NoSubRegister;
223 if (RC == &X86::GR32RegClass) {
224 SubIdx = X86::sub_32bit;
225 } else if (RC == &X86::GR16RegClass) {
226 SubIdx = X86::sub_16bit;
227 } else if (RC == &X86::GR8RegClass) {
228 SubIdx = X86::sub_8bit;
229 }
230
231 return SubIdx;
232}
233
235 assert(Reg.isPhysical());
236 if (X86::GR64RegClass.contains(Reg))
237 return &X86::GR64RegClass;
238 if (X86::GR32RegClass.contains(Reg))
239 return &X86::GR32RegClass;
240 if (X86::GR16RegClass.contains(Reg))
241 return &X86::GR16RegClass;
242 if (X86::GR8RegClass.contains(Reg))
243 return &X86::GR8RegClass;
244
245 llvm_unreachable("Unknown RegClass for PhysReg!");
246}
247
248// FIXME: We need some sort of API in RBI/TRI to allow generic code to
249// constrain operands of simple instructions given a TargetRegisterClass
250// and LLT
251bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
252 MachineRegisterInfo &MRI) const {
253 for (MachineOperand &MO : I.operands()) {
254 if (!MO.isReg())
255 continue;
256 Register Reg = MO.getReg();
257 if (!Reg)
258 continue;
259 if (Reg.isPhysical())
260 continue;
261 LLT Ty = MRI.getType(Reg);
262 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
263 const TargetRegisterClass *RC =
265 if (!RC) {
266 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
267 RC = getRegClass(Ty, RB);
268 if (!RC) {
270 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
271 break;
272 }
273 }
275 }
276
277 return true;
278}
279
280// Set X86 Opcode and constrain DestReg.
281bool X86InstructionSelector::selectCopy(MachineInstr &I,
282 MachineRegisterInfo &MRI) const {
283 Register DstReg = I.getOperand(0).getReg();
284 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
285 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
286
287 Register SrcReg = I.getOperand(1).getReg();
288 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
289 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
290
291 if (DstReg.isPhysical()) {
292 assert(I.isCopy() && "Generic operators do not allow physical registers");
293
294 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
295 DstRegBank.getID() == X86::GPRRegBankID) {
296
297 const TargetRegisterClass *SrcRC =
298 getRegClass(MRI.getType(SrcReg), SrcRegBank);
299 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
300
301 if (SrcRC != DstRC) {
302 // This case can be generated by ABI lowering, performe anyext
303 Register ExtSrc = MRI.createVirtualRegister(DstRC);
304 BuildMI(*I.getParent(), I, I.getDebugLoc(),
305 TII.get(TargetOpcode::SUBREG_TO_REG))
306 .addDef(ExtSrc)
307 .addReg(SrcReg)
308 .addImm(getSubRegIndex(SrcRC));
309
310 I.getOperand(1).setReg(ExtSrc);
311 }
312 }
313
314 // Special case GPR16 -> XMM
315 if (SrcSize == 16 && SrcRegBank.getID() == X86::GPRRegBankID &&
316 (DstRegBank.getID() == X86::VECRRegBankID)) {
317
318 const DebugLoc &DL = I.getDebugLoc();
319
320 // Any extend GPR16 -> GPR32
321 Register ExtReg = MRI.createVirtualRegister(&X86::GR32RegClass);
322 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
323 ExtReg)
324 .addReg(SrcReg)
325 .addImm(X86::sub_16bit);
326
327 // Copy GR32 -> XMM
328 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
329 .addReg(ExtReg);
330
331 I.eraseFromParent();
332 }
333
334 // Special case XMM -> GR16
335 if (DstSize == 16 && DstRegBank.getID() == X86::GPRRegBankID &&
336 (SrcRegBank.getID() == X86::VECRRegBankID)) {
337
338 const DebugLoc &DL = I.getDebugLoc();
339
340 // Move XMM to GR32 register.
341 Register Temp32 = MRI.createVirtualRegister(&X86::GR32RegClass);
342 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Temp32)
343 .addReg(SrcReg);
344
345 // Extract the lower 16 bits
346 if (Register Dst32 = TRI.getMatchingSuperReg(DstReg, X86::sub_16bit,
347 &X86::GR32RegClass)) {
348 // Optimization for Physical Dst (e.g. AX): Copy to EAX directly.
349 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Dst32)
350 .addReg(Temp32);
351 } else {
352 // Handle if there is no super.
353 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
354 .addReg(Temp32, {}, X86::sub_16bit);
355 }
356
357 I.eraseFromParent();
358 }
359
360 return true;
361 }
362
363 assert((!SrcReg.isPhysical() || I.isCopy()) &&
364 "No phys reg on generic operators");
365 assert((DstSize == SrcSize ||
366 // Copies are a mean to setup initial types, the number of
367 // bits may not exactly match.
368 (SrcReg.isPhysical() &&
369 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
370 "Copy with different width?!");
371
372 const TargetRegisterClass *DstRC =
373 getRegClass(MRI.getType(DstReg), DstRegBank);
374
375 if (SrcRegBank.getID() == X86::GPRRegBankID &&
376 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
377 SrcReg.isPhysical()) {
378 // Change the physical register to performe truncate.
379
380 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
381
382 if (DstRC != SrcRC) {
383 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
384 I.getOperand(1).substPhysReg(SrcReg, TRI);
385 }
386 }
387
388 // No need to constrain SrcReg. It will get constrained when
389 // we hit another of its use or its defs.
390 // Copies do not have constraints.
391 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
392 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
393 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
394 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
395 << " operand\n");
396 return false;
397 }
398 }
399 I.setDesc(TII.get(X86::COPY));
400 return true;
401}
402
403bool X86InstructionSelector::select(MachineInstr &I) {
404 assert(I.getParent() && "Instruction should be in a basic block!");
405 assert(I.getParent()->getParent() && "Instruction should be in a function!");
406
407 MachineBasicBlock &MBB = *I.getParent();
408 MachineFunction &MF = *MBB.getParent();
409 MachineRegisterInfo &MRI = MF.getRegInfo();
410
411 unsigned Opcode = I.getOpcode();
412 if (!isPreISelGenericOpcode(Opcode) && !I.isPreISelOpcode()) {
413 // Certain non-generic instructions also need some special handling.
414
415 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
416 return false;
417
418 if (I.isCopy())
419 return selectCopy(I, MRI);
420
421 if (I.isDebugInstr())
422 return selectDebugInstr(I, MRI);
423
424 return true;
425 }
426
427 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
428 "Generic instruction has unexpected implicit operands\n");
429
430 if (selectImpl(I, *CoverageInfo))
431 return true;
432
433 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
434
435 // TODO: This should be implemented by tblgen.
436 switch (I.getOpcode()) {
437 default:
438 return false;
439 case TargetOpcode::G_STORE:
440 case TargetOpcode::G_LOAD:
441 return selectLoadStoreOp(I, MRI, MF);
442 case TargetOpcode::G_PTR_ADD:
443 case TargetOpcode::G_FRAME_INDEX:
444 return selectFrameIndexOrGep(I, MRI, MF);
445 case TargetOpcode::G_GLOBAL_VALUE:
446 return selectGlobalValue(I, MRI, MF);
447 case TargetOpcode::G_CONSTANT:
448 return selectConstant(I, MRI, MF);
449 case TargetOpcode::G_FCONSTANT:
450 return materializeFP(I, MRI, MF);
451 case TargetOpcode::G_PTRTOINT:
452 case TargetOpcode::G_TRUNC:
453 return selectTruncOrPtrToInt(I, MRI, MF);
454 case TargetOpcode::G_INTTOPTR:
455 case TargetOpcode::G_FREEZE:
456 return selectCopy(I, MRI);
457 case TargetOpcode::G_ZEXT:
458 return selectZext(I, MRI, MF);
459 case TargetOpcode::G_ANYEXT:
460 return selectAnyext(I, MRI, MF);
461 case TargetOpcode::G_ICMP:
462 return selectCmp(I, MRI, MF);
463 case TargetOpcode::G_FCMP:
464 return selectFCmp(I, MRI, MF);
465 case TargetOpcode::G_UADDE:
466 case TargetOpcode::G_UADDO:
467 case TargetOpcode::G_USUBE:
468 case TargetOpcode::G_USUBO:
469 return selectUAddSub(I, MRI, MF);
470 case TargetOpcode::G_UNMERGE_VALUES:
471 return selectUnmergeValues(I, MRI, MF);
472 case TargetOpcode::G_MERGE_VALUES:
473 case TargetOpcode::G_CONCAT_VECTORS:
474 return selectMergeValues(I, MRI, MF);
475 case TargetOpcode::G_EXTRACT:
476 return selectExtract(I, MRI, MF);
477 case TargetOpcode::G_INSERT:
478 return selectInsert(I, MRI, MF);
479 case TargetOpcode::G_BRCOND:
480 return selectCondBranch(I, MRI, MF);
481 case TargetOpcode::G_IMPLICIT_DEF:
482 case TargetOpcode::G_PHI:
483 return selectImplicitDefOrPHI(I, MRI);
484 case TargetOpcode::G_MUL:
485 case TargetOpcode::G_SMULH:
486 case TargetOpcode::G_UMULH:
487 case TargetOpcode::G_SDIV:
488 case TargetOpcode::G_UDIV:
489 case TargetOpcode::G_SREM:
490 case TargetOpcode::G_UREM:
491 return selectMulDivRem(I, MRI, MF);
492 case TargetOpcode::G_SELECT:
493 return selectSelect(I, MRI, MF);
494 }
495
496 return false;
497}
498
499unsigned X86InstructionSelector::getPtrLoadStoreOp(const LLT &Ty,
500 const RegisterBank &RB,
501 unsigned Opc) const {
502 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
503 "Only G_STORE and G_LOAD are expected for selection");
504 if (Ty.isPointer() && X86::GPRRegBankID == RB.getID()) {
505 bool IsLoad = (Opc == TargetOpcode::G_LOAD);
506 switch (Ty.getSizeInBits()) {
507 default:
508 break;
509 case 32:
510 return IsLoad ? X86::MOV32rm : X86::MOV32mr;
511 case 64:
512 return IsLoad ? X86::MOV64rm : X86::MOV64mr;
513 }
514 }
515 return Opc;
516}
517
518unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
519 const RegisterBank &RB,
520 unsigned Opc,
521 Align Alignment) const {
522 bool Isload = (Opc == TargetOpcode::G_LOAD);
523 bool HasAVX = STI.hasAVX();
524 bool HasAVX512 = STI.hasAVX512();
525 bool HasVLX = STI.hasVLX();
526
527 if (Ty == LLT::scalar(8)) {
528 if (X86::GPRRegBankID == RB.getID())
529 return Isload ? X86::MOV8rm : X86::MOV8mr;
530 } else if (Ty == LLT::scalar(16)) {
531 if (X86::GPRRegBankID == RB.getID())
532 return Isload ? X86::MOV16rm : X86::MOV16mr;
533 } else if (Ty == LLT::scalar(32)) {
534 if (X86::GPRRegBankID == RB.getID())
535 return Isload ? X86::MOV32rm : X86::MOV32mr;
536 if (X86::VECRRegBankID == RB.getID())
537 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
538 HasAVX ? X86::VMOVSSrm_alt :
539 X86::MOVSSrm_alt)
540 : (HasAVX512 ? X86::VMOVSSZmr :
541 HasAVX ? X86::VMOVSSmr :
542 X86::MOVSSmr);
543 if (X86::PSRRegBankID == RB.getID())
544 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
545 } else if (Ty == LLT::scalar(64)) {
546 if (X86::GPRRegBankID == RB.getID())
547 return Isload ? X86::MOV64rm : X86::MOV64mr;
548 if (X86::VECRRegBankID == RB.getID())
549 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
550 HasAVX ? X86::VMOVSDrm_alt :
551 X86::MOVSDrm_alt)
552 : (HasAVX512 ? X86::VMOVSDZmr :
553 HasAVX ? X86::VMOVSDmr :
554 X86::MOVSDmr);
555 if (X86::PSRRegBankID == RB.getID())
556 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
557 } else if (Ty == LLT::scalar(80)) {
558 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
559 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
560 if (Alignment >= Align(16))
561 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
562 : HasAVX512
563 ? X86::VMOVAPSZ128rm_NOVLX
564 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
565 : (HasVLX ? X86::VMOVAPSZ128mr
566 : HasAVX512
567 ? X86::VMOVAPSZ128mr_NOVLX
568 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
569 else
570 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
571 : HasAVX512
572 ? X86::VMOVUPSZ128rm_NOVLX
573 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
574 : (HasVLX ? X86::VMOVUPSZ128mr
575 : HasAVX512
576 ? X86::VMOVUPSZ128mr_NOVLX
577 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
578 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
579 if (Alignment >= Align(32))
580 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
581 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
582 : X86::VMOVAPSYrm)
583 : (HasVLX ? X86::VMOVAPSZ256mr
584 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
585 : X86::VMOVAPSYmr);
586 else
587 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
588 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
589 : X86::VMOVUPSYrm)
590 : (HasVLX ? X86::VMOVUPSZ256mr
591 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
592 : X86::VMOVUPSYmr);
593 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
594 if (Alignment >= Align(64))
595 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
596 else
597 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
598 }
599 return Opc;
600}
601
602// Fill in an address from the given instruction.
605 const X86Subtarget &STI, X86AddressMode &AM) {
606 assert(I.getOperand(0).isReg() && "unsupported operand.");
607 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
608 "unsupported type.");
609
610 switch (I.getOpcode()) {
611 default:
612 break;
613 case TargetOpcode::G_FRAME_INDEX:
614 AM.Base.FrameIndex = I.getOperand(1).getIndex();
616 return true;
617 case TargetOpcode::G_PTR_ADD: {
618 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
619 int64_t Imm = *COff;
620 if (isInt<32>(Imm)) { // Check for displacement overflow.
621 AM.Disp = static_cast<int32_t>(Imm);
622 AM.Base.Reg = I.getOperand(1).getReg();
623 return true;
624 }
625 }
626 break;
627 }
628 case TargetOpcode::G_GLOBAL_VALUE: {
629 auto GV = I.getOperand(1).getGlobal();
630 if (GV->isThreadLocal()) {
631 return false; // TODO: we don't support TLS yet.
632 }
633 // Can't handle alternate code models yet.
634 if (TM.getCodeModel() != CodeModel::Small)
635 return false;
636 AM.GV = GV;
638
639 // TODO: The ABI requires an extra load. not supported yet.
641 return false;
642
643 // TODO: This reference is relative to the pic base. not supported yet.
645 return false;
646
647 if (STI.isPICStyleRIPRel()) {
648 // Use rip-relative addressing.
649 assert(AM.Base.Reg == 0 && AM.IndexReg == 0 &&
650 "RIP-relative addresses can't have additional register operands");
651 AM.Base.Reg = X86::RIP;
652 }
653 return true;
654 }
655 case TargetOpcode::G_CONSTANT_POOL: {
656 // TODO: Need a separate move for Large model
657 if (TM.getCodeModel() == CodeModel::Large)
658 return false;
659
660 AM.GVOpFlags = STI.classifyLocalReference(nullptr);
661 if (AM.GVOpFlags == X86II::MO_GOTOFF)
662 AM.Base.Reg = STI.getInstrInfo()->getGlobalBaseReg(I.getMF());
663 else if (STI.is64Bit())
664 AM.Base.Reg = X86::RIP;
665 AM.CP = true;
666 AM.Disp = I.getOperand(1).getIndex();
667 return true;
668 }
669 }
670 // Default behavior.
671 AM.Base.Reg = I.getOperand(0).getReg();
672 return true;
673}
674
675bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
676 MachineRegisterInfo &MRI,
677 MachineFunction &MF) const {
678 unsigned Opc = I.getOpcode();
679
680 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
681 "Only G_STORE and G_LOAD are expected for selection");
682
683 const Register DefReg = I.getOperand(0).getReg();
684 LLT Ty = MRI.getType(DefReg);
685 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
686
687 assert(I.hasOneMemOperand());
688 auto &MemOp = **I.memoperands_begin();
689 if (MemOp.isAtomic()) {
690 // Note: for unordered operations, we rely on the fact the appropriate MMO
691 // is already on the instruction we're mutating, and thus we don't need to
692 // make any changes. So long as we select an opcode which is capable of
693 // loading or storing the appropriate size atomically, the rest of the
694 // backend is required to respect the MMO state.
695 if (!MemOp.isUnordered()) {
696 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
697 return false;
698 }
699 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
700 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
701 return false;
702 }
703 }
704
705 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB, Opc);
706 if (NewOpc == Opc)
707 return false;
708
709 I.setDesc(TII.get(NewOpc));
710 MachineInstrBuilder MIB(MF, I);
711 MachineInstr *Ptr = MRI.getVRegDef(I.getOperand(1).getReg());
712
713 X86AddressMode AM;
714 if (!X86SelectAddress(*Ptr, TM, MRI, STI, AM))
715 return false;
716
717 if (Opc == TargetOpcode::G_LOAD) {
718 I.removeOperand(1);
719 addFullAddress(MIB, AM);
720 } else {
721 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
722 I.removeOperand(1);
723 I.removeOperand(0);
724 addFullAddress(MIB, AM).addUse(DefReg);
725 }
727 I.addImplicitDefUseOperands(MF);
728 return true;
729}
730
731static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
732 if (Ty == LLT::pointer(0, 64))
733 return X86::LEA64r;
734 else if (Ty == LLT::pointer(0, 32))
735 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
736 else
737 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
738}
739
740bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
741 MachineRegisterInfo &MRI,
742 MachineFunction &MF) const {
743 unsigned Opc = I.getOpcode();
744
745 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
746 "unexpected instruction");
747
748 const Register DefReg = I.getOperand(0).getReg();
749 LLT Ty = MRI.getType(DefReg);
750
751 // Use LEA to calculate frame index and GEP
752 unsigned NewOpc = getLeaOP(Ty, STI);
753 I.setDesc(TII.get(NewOpc));
754 MachineInstrBuilder MIB(MF, I);
755
756 if (Opc == TargetOpcode::G_FRAME_INDEX) {
757 addOffset(MIB, 0);
758 } else {
759 MachineOperand &InxOp = I.getOperand(2);
760 I.addOperand(InxOp); // set IndexReg
761 InxOp.ChangeToImmediate(1); // set Scale
762 MIB.addImm(0).addReg(0);
763 }
764
766 return true;
767}
768
769bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
770 MachineRegisterInfo &MRI,
771 MachineFunction &MF) const {
772 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
773 "unexpected instruction");
774
775 X86AddressMode AM;
776 if (!X86SelectAddress(I, TM, MRI, STI, AM))
777 return false;
778
779 const Register DefReg = I.getOperand(0).getReg();
780 LLT Ty = MRI.getType(DefReg);
781 unsigned NewOpc = getLeaOP(Ty, STI);
782
783 I.setDesc(TII.get(NewOpc));
784 MachineInstrBuilder MIB(MF, I);
785
786 I.removeOperand(1);
787 addFullAddress(MIB, AM);
788
790 return true;
791}
792
793bool X86InstructionSelector::selectConstant(MachineInstr &I,
794 MachineRegisterInfo &MRI,
795 MachineFunction &MF) const {
796 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
797 "unexpected instruction");
798
799 const Register DefReg = I.getOperand(0).getReg();
800 LLT Ty = MRI.getType(DefReg);
801
802 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
803 return false;
804
805 uint64_t Val = 0;
806 if (I.getOperand(1).isCImm()) {
807 Val = I.getOperand(1).getCImm()->getZExtValue();
808 I.getOperand(1).ChangeToImmediate(Val);
809 } else if (I.getOperand(1).isImm()) {
810 Val = I.getOperand(1).getImm();
811 } else
812 llvm_unreachable("Unsupported operand type.");
813
814 unsigned NewOpc;
815 switch (Ty.getSizeInBits()) {
816 case 8:
817 NewOpc = X86::MOV8ri;
818 break;
819 case 16:
820 NewOpc = X86::MOV16ri;
821 break;
822 case 32:
823 NewOpc = X86::MOV32ri;
824 break;
825 case 64:
826 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
827 if (isInt<32>(Val))
828 NewOpc = X86::MOV64ri32;
829 else
830 NewOpc = X86::MOV64ri;
831 break;
832 default:
833 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
834 }
835
836 I.setDesc(TII.get(NewOpc));
838 return true;
839}
840
841// Helper function for selectTruncOrPtrToInt and selectAnyext.
842// Returns true if DstRC lives on a floating register class and
843// SrcRC lives on a 128-bit vector class.
844static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
845 const TargetRegisterClass *SrcRC) {
846 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
847 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
848 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
849}
850
851bool X86InstructionSelector::selectTurnIntoCOPY(
852 MachineInstr &I, MachineRegisterInfo &MRI, const Register DstReg,
853 const TargetRegisterClass *DstRC, const Register SrcReg,
854 const TargetRegisterClass *SrcRC) const {
855
856 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
857 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
858 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
859 << " operand\n");
860 return false;
861 }
862 I.setDesc(TII.get(X86::COPY));
863 return true;
864}
865
866bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
867 MachineRegisterInfo &MRI,
868 MachineFunction &MF) const {
869 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
870 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
871 "unexpected instruction");
872
873 const Register DstReg = I.getOperand(0).getReg();
874 const Register SrcReg = I.getOperand(1).getReg();
875
876 const LLT DstTy = MRI.getType(DstReg);
877 const LLT SrcTy = MRI.getType(SrcReg);
878
879 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
880 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
881
882 if (DstRB.getID() != SrcRB.getID()) {
883 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
884 << " input/output on different banks\n");
885 return false;
886 }
887
888 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
889 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
890
891 if (!DstRC || !SrcRC)
892 return false;
893
894 // If that's truncation of the value that lives on the vector class and goes
895 // into the floating class, just replace it with copy, as we are able to
896 // select it as a regular move.
897 if (canTurnIntoCOPY(DstRC, SrcRC))
898 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
899
900 if (DstRB.getID() != X86::GPRRegBankID)
901 return false;
902
903 unsigned SubIdx;
904 if (DstRC == SrcRC) {
905 // Nothing to be done
906 SubIdx = X86::NoSubRegister;
907 } else if (DstRC == &X86::GR32RegClass) {
908 SubIdx = X86::sub_32bit;
909 } else if (DstRC == &X86::GR16RegClass) {
910 SubIdx = X86::sub_16bit;
911 } else if (DstRC == &X86::GR8RegClass) {
912 SubIdx = X86::sub_8bit;
913 } else {
914 return false;
915 }
916
917 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
918
919 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
920 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
921 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
922 << "\n");
923 return false;
924 }
925
926 I.getOperand(1).setSubReg(SubIdx);
927
928 I.setDesc(TII.get(X86::COPY));
929 return true;
930}
931
932bool X86InstructionSelector::selectZext(MachineInstr &I,
933 MachineRegisterInfo &MRI,
934 MachineFunction &MF) const {
935 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
936
937 const Register DstReg = I.getOperand(0).getReg();
938 const Register SrcReg = I.getOperand(1).getReg();
939
940 const LLT DstTy = MRI.getType(DstReg);
941 const LLT SrcTy = MRI.getType(SrcReg);
942
943 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
944 "8=>16 Zext is handled by tablegen");
945 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
946 "8=>32 Zext is handled by tablegen");
947 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
948 "16=>32 Zext is handled by tablegen");
949 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
950 "8=>64 Zext is handled by tablegen");
951 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
952 "16=>64 Zext is handled by tablegen");
953 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
954 "32=>64 Zext is handled by tablegen");
955
956 if (SrcTy != LLT::scalar(1))
957 return false;
958
959 unsigned AndOpc;
960 if (DstTy == LLT::scalar(8))
961 AndOpc = X86::AND8ri;
962 else if (DstTy == LLT::scalar(16))
963 AndOpc = X86::AND16ri;
964 else if (DstTy == LLT::scalar(32))
965 AndOpc = X86::AND32ri;
966 else if (DstTy == LLT::scalar(64))
967 AndOpc = X86::AND64ri32;
968 else
969 return false;
970
971 Register DefReg = SrcReg;
972 if (DstTy != LLT::scalar(8)) {
973 Register ImpDefReg =
974 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
975 BuildMI(*I.getParent(), I, I.getDebugLoc(),
976 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
977
978 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
979 BuildMI(*I.getParent(), I, I.getDebugLoc(),
980 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
981 .addReg(ImpDefReg)
982 .addReg(SrcReg)
983 .addImm(X86::sub_8bit);
984 }
985
986 MachineInstr &AndInst =
987 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
988 .addReg(DefReg)
989 .addImm(1);
990
992
993 I.eraseFromParent();
994 return true;
995}
996
997bool X86InstructionSelector::selectAnyext(MachineInstr &I,
998 MachineRegisterInfo &MRI,
999 MachineFunction &MF) const {
1000 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
1001
1002 const Register DstReg = I.getOperand(0).getReg();
1003 const Register SrcReg = I.getOperand(1).getReg();
1004
1005 const LLT DstTy = MRI.getType(DstReg);
1006 const LLT SrcTy = MRI.getType(SrcReg);
1007
1008 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1009 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1010
1011 assert(DstRB.getID() == SrcRB.getID() &&
1012 "G_ANYEXT input/output on different banks\n");
1013
1014 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1015 "G_ANYEXT incorrect operand size");
1016
1017 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1018 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
1019
1020 // If that's ANY_EXT of the value that lives on the floating class and goes
1021 // into the vector class, just replace it with copy, as we are able to select
1022 // it as a regular move.
1023 if (canTurnIntoCOPY(SrcRC, DstRC))
1024 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
1025
1026 if (DstRB.getID() != X86::GPRRegBankID)
1027 return false;
1028
1029 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1030 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1031 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1032 << " operand\n");
1033 return false;
1034 }
1035
1036 if (SrcRC == DstRC) {
1037 I.setDesc(TII.get(X86::COPY));
1038 return true;
1039 }
1040
1041 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1042 TII.get(TargetOpcode::SUBREG_TO_REG))
1043 .addDef(DstReg)
1044 .addReg(SrcReg)
1045 .addImm(getSubRegIndex(SrcRC));
1046
1047 I.eraseFromParent();
1048 return true;
1049}
1050
1051bool X86InstructionSelector::selectCmp(MachineInstr &I,
1052 MachineRegisterInfo &MRI,
1053 MachineFunction &MF) const {
1054 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
1055
1056 X86::CondCode CC;
1057 bool SwapArgs;
1058 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
1059 (CmpInst::Predicate)I.getOperand(1).getPredicate());
1060
1061 Register LHS = I.getOperand(2).getReg();
1062 Register RHS = I.getOperand(3).getReg();
1063
1064 if (SwapArgs)
1065 std::swap(LHS, RHS);
1066
1067 unsigned OpCmp;
1068 LLT Ty = MRI.getType(LHS);
1069
1070 switch (Ty.getSizeInBits()) {
1071 default:
1072 return false;
1073 case 8:
1074 OpCmp = X86::CMP8rr;
1075 break;
1076 case 16:
1077 OpCmp = X86::CMP16rr;
1078 break;
1079 case 32:
1080 OpCmp = X86::CMP32rr;
1081 break;
1082 case 64:
1083 OpCmp = X86::CMP64rr;
1084 break;
1085 }
1086
1087 MachineInstr &CmpInst =
1088 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1089 .addReg(LHS)
1090 .addReg(RHS);
1091
1092 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1093 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
1094
1097
1098 I.eraseFromParent();
1099 return true;
1100}
1101
1102bool X86InstructionSelector::selectFCmp(MachineInstr &I,
1103 MachineRegisterInfo &MRI,
1104 MachineFunction &MF) const {
1105 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
1106
1107 Register LhsReg = I.getOperand(2).getReg();
1108 Register RhsReg = I.getOperand(3).getReg();
1110 (CmpInst::Predicate)I.getOperand(1).getPredicate();
1111
1112 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1113 static const uint16_t SETFOpcTable[2][3] = {
1114 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1115 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1116 const uint16_t *SETFOpc = nullptr;
1117 switch (Predicate) {
1118 default:
1119 break;
1120 case CmpInst::FCMP_OEQ:
1121 SETFOpc = &SETFOpcTable[0][0];
1122 break;
1123 case CmpInst::FCMP_UNE:
1124 SETFOpc = &SETFOpcTable[1][0];
1125 break;
1126 }
1127
1128 assert((LhsReg.isVirtual() && RhsReg.isVirtual()) &&
1129 "Both arguments of FCMP need to be virtual!");
1130 auto *LhsBank = RBI.getRegBank(LhsReg, MRI, TRI);
1131 [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI);
1132 assert((LhsBank == RhsBank) &&
1133 "Both banks assigned to FCMP arguments need to be same!");
1134
1135 // Compute the opcode for the CMP instruction.
1136 unsigned OpCmp;
1137 LLT Ty = MRI.getType(LhsReg);
1138 switch (Ty.getSizeInBits()) {
1139 default:
1140 return false;
1141 case 32:
1142 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32
1143 : X86::UCOMISSrr;
1144 break;
1145 case 64:
1146 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64
1147 : X86::UCOMISDrr;
1148 break;
1149 case 80:
1150 OpCmp = X86::UCOM_FpIr80;
1151 break;
1152 }
1153
1154 Register ResultReg = I.getOperand(0).getReg();
1156 ResultReg,
1157 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1158 if (SETFOpc) {
1159 MachineInstr &CmpInst =
1160 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1161 .addReg(LhsReg)
1162 .addReg(RhsReg);
1163
1164 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1165 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1166 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1167 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1168 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1169 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1170 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1171 TII.get(SETFOpc[2]), ResultReg)
1172 .addReg(FlagReg1)
1173 .addReg(FlagReg2);
1178
1179 I.eraseFromParent();
1180 return true;
1181 }
1182
1183 X86::CondCode CC;
1184 bool SwapArgs;
1185 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1186 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1187
1188 if (SwapArgs)
1189 std::swap(LhsReg, RhsReg);
1190
1191 // Emit a compare of LHS/RHS.
1192 MachineInstr &CmpInst =
1193 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1194 .addReg(LhsReg)
1195 .addReg(RhsReg);
1196
1197 MachineInstr &Set =
1198 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1201 I.eraseFromParent();
1202 return true;
1203}
1204
1205bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1206 MachineRegisterInfo &MRI,
1207 MachineFunction &MF) const {
1208 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1209 I.getOpcode() == TargetOpcode::G_UADDO ||
1210 I.getOpcode() == TargetOpcode::G_USUBE ||
1211 I.getOpcode() == TargetOpcode::G_USUBO) &&
1212 "unexpected instruction");
1213
1214 auto &CarryMI = cast<GAddSubCarryOut>(I);
1215
1216 const Register DstReg = CarryMI.getDstReg();
1217 const Register CarryOutReg = CarryMI.getCarryOutReg();
1218 const Register Op0Reg = CarryMI.getLHSReg();
1219 const Register Op1Reg = CarryMI.getRHSReg();
1220 bool IsSub = CarryMI.isSub();
1221
1222 const LLT DstTy = MRI.getType(DstReg);
1223 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1224
1225 // TODO: Handle immediate argument variants?
1226 unsigned OpADC, OpADD, OpSBB, OpSUB;
1227 switch (DstTy.getSizeInBits()) {
1228 case 8:
1229 OpADC = X86::ADC8rr;
1230 OpADD = X86::ADD8rr;
1231 OpSBB = X86::SBB8rr;
1232 OpSUB = X86::SUB8rr;
1233 break;
1234 case 16:
1235 OpADC = X86::ADC16rr;
1236 OpADD = X86::ADD16rr;
1237 OpSBB = X86::SBB16rr;
1238 OpSUB = X86::SUB16rr;
1239 break;
1240 case 32:
1241 OpADC = X86::ADC32rr;
1242 OpADD = X86::ADD32rr;
1243 OpSBB = X86::SBB32rr;
1244 OpSUB = X86::SUB32rr;
1245 break;
1246 case 64:
1247 OpADC = X86::ADC64rr;
1248 OpADD = X86::ADD64rr;
1249 OpSBB = X86::SBB64rr;
1250 OpSUB = X86::SUB64rr;
1251 break;
1252 default:
1253 llvm_unreachable("selectUAddSub unsupported type.");
1254 }
1255
1256 const RegisterBank &CarryRB = *RBI.getRegBank(CarryOutReg, MRI, TRI);
1257 const TargetRegisterClass *CarryRC =
1258 getRegClass(MRI.getType(CarryOutReg), CarryRB);
1259
1260 unsigned Opcode = IsSub ? OpSUB : OpADD;
1261
1262 // G_UADDE/G_USUBE - find CarryIn def instruction.
1263 if (auto CarryInMI = dyn_cast<GAddSubCarryInOut>(&I)) {
1264 Register CarryInReg = CarryInMI->getCarryInReg();
1265 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1266 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1267 CarryInReg = Def->getOperand(1).getReg();
1268 Def = MRI.getVRegDef(CarryInReg);
1269 }
1270
1271 // TODO - handle more CF generating instructions
1272 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1273 Def->getOpcode() == TargetOpcode::G_UADDO ||
1274 Def->getOpcode() == TargetOpcode::G_USUBE ||
1275 Def->getOpcode() == TargetOpcode::G_USUBO) {
1276 // carry set by prev ADD/SUB.
1277
1278 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::CMP8ri))
1279 .addReg(CarryInReg)
1280 .addImm(1);
1281
1282 if (!RBI.constrainGenericRegister(CarryInReg, *CarryRC, MRI))
1283 return false;
1284
1285 Opcode = IsSub ? OpSBB : OpADC;
1286 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1287 // carry is constant, support only 0.
1288 if (*val != 0)
1289 return false;
1290
1291 Opcode = IsSub ? OpSUB : OpADD;
1292 } else
1293 return false;
1294 }
1295
1296 MachineInstr &Inst =
1297 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1298 .addReg(Op0Reg)
1299 .addReg(Op1Reg);
1300
1301 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), CarryOutReg)
1303
1305 if (!RBI.constrainGenericRegister(CarryOutReg, *CarryRC, MRI))
1306 return false;
1307
1308 I.eraseFromParent();
1309 return true;
1310}
1311
1312bool X86InstructionSelector::selectExtract(MachineInstr &I,
1313 MachineRegisterInfo &MRI,
1314 MachineFunction &MF) const {
1315 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1316 "unexpected instruction");
1317
1318 const Register DstReg = I.getOperand(0).getReg();
1319 const Register SrcReg = I.getOperand(1).getReg();
1320 int64_t Index = I.getOperand(2).getImm();
1321
1322 const LLT DstTy = MRI.getType(DstReg);
1323 const LLT SrcTy = MRI.getType(SrcReg);
1324
1325 // Meanwile handle vector type only.
1326 if (!DstTy.isVector())
1327 return false;
1328
1329 if (Index % DstTy.getSizeInBits() != 0)
1330 return false; // Not extract subvector.
1331
1332 if (Index == 0) {
1333 // Replace by extract subreg copy.
1334 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1335 return false;
1336
1337 I.eraseFromParent();
1338 return true;
1339 }
1340
1341 bool HasAVX = STI.hasAVX();
1342 bool HasAVX512 = STI.hasAVX512();
1343 bool HasVLX = STI.hasVLX();
1344
1345 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1346 if (HasVLX)
1347 I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
1348 else if (HasAVX)
1349 I.setDesc(TII.get(X86::VEXTRACTF128rri));
1350 else
1351 return false;
1352 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1353 if (DstTy.getSizeInBits() == 128)
1354 I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
1355 else if (DstTy.getSizeInBits() == 256)
1356 I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
1357 else
1358 return false;
1359 } else
1360 return false;
1361
1362 // Convert to X86 VEXTRACT immediate.
1363 Index = Index / DstTy.getSizeInBits();
1364 I.getOperand(2).setImm(Index);
1365
1367 return true;
1368}
1369
1370bool X86InstructionSelector::emitExtractSubreg(Register DstReg, Register SrcReg,
1371 MachineInstr &I,
1372 MachineRegisterInfo &MRI,
1373 MachineFunction &MF) const {
1374 const LLT DstTy = MRI.getType(DstReg);
1375 const LLT SrcTy = MRI.getType(SrcReg);
1376 unsigned SubIdx = X86::NoSubRegister;
1377
1378 if (!DstTy.isVector() || !SrcTy.isVector())
1379 return false;
1380
1381 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1382 "Incorrect Src/Dst register size");
1383
1384 if (DstTy.getSizeInBits() == 128)
1385 SubIdx = X86::sub_xmm;
1386 else if (DstTy.getSizeInBits() == 256)
1387 SubIdx = X86::sub_ymm;
1388 else
1389 return false;
1390
1391 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1392 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1393
1394 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1395
1396 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1397 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1398 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1399 return false;
1400 }
1401
1402 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1403 .addReg(SrcReg, {}, SubIdx);
1404
1405 return true;
1406}
1407
1408bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,
1409 MachineInstr &I,
1410 MachineRegisterInfo &MRI,
1411 MachineFunction &MF) const {
1412 const LLT DstTy = MRI.getType(DstReg);
1413 const LLT SrcTy = MRI.getType(SrcReg);
1414 unsigned SubIdx = X86::NoSubRegister;
1415
1416 // TODO: support scalar types
1417 if (!DstTy.isVector() || !SrcTy.isVector())
1418 return false;
1419
1420 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1421 "Incorrect Src/Dst register size");
1422
1423 if (SrcTy.getSizeInBits() == 128)
1424 SubIdx = X86::sub_xmm;
1425 else if (SrcTy.getSizeInBits() == 256)
1426 SubIdx = X86::sub_ymm;
1427 else
1428 return false;
1429
1430 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1431 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1432
1433 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1434 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1435 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1436 return false;
1437 }
1438
1439 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1440 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1441 .addReg(SrcReg);
1442
1443 return true;
1444}
1445
1446bool X86InstructionSelector::selectInsert(MachineInstr &I,
1447 MachineRegisterInfo &MRI,
1448 MachineFunction &MF) const {
1449 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1450
1451 const Register DstReg = I.getOperand(0).getReg();
1452 const Register SrcReg = I.getOperand(1).getReg();
1453 const Register InsertReg = I.getOperand(2).getReg();
1454 int64_t Index = I.getOperand(3).getImm();
1455
1456 const LLT DstTy = MRI.getType(DstReg);
1457 const LLT InsertRegTy = MRI.getType(InsertReg);
1458
1459 // Meanwile handle vector type only.
1460 if (!DstTy.isVector())
1461 return false;
1462
1463 if (Index % InsertRegTy.getSizeInBits() != 0)
1464 return false; // Not insert subvector.
1465
1466 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1467 // Replace by subreg copy.
1468 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1469 return false;
1470
1471 I.eraseFromParent();
1472 return true;
1473 }
1474
1475 bool HasAVX = STI.hasAVX();
1476 bool HasAVX512 = STI.hasAVX512();
1477 bool HasVLX = STI.hasVLX();
1478
1479 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1480 if (HasVLX)
1481 I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
1482 else if (HasAVX)
1483 I.setDesc(TII.get(X86::VINSERTF128rri));
1484 else
1485 return false;
1486 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1487 if (InsertRegTy.getSizeInBits() == 128)
1488 I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
1489 else if (InsertRegTy.getSizeInBits() == 256)
1490 I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
1491 else
1492 return false;
1493 } else
1494 return false;
1495
1496 // Convert to X86 VINSERT immediate.
1497 Index = Index / InsertRegTy.getSizeInBits();
1498
1499 I.getOperand(3).setImm(Index);
1500
1502 return true;
1503}
1504
1505bool X86InstructionSelector::selectUnmergeValues(
1506 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1507 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1508 "unexpected instruction");
1509
1510 // Split to extracts.
1511 unsigned NumDefs = I.getNumOperands() - 1;
1512 Register SrcReg = I.getOperand(NumDefs).getReg();
1513 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1514
1515 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1516 MachineInstr &ExtrInst =
1517 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1518 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1519 .addReg(SrcReg)
1520 .addImm(Idx * DefSize);
1521
1522 if (!select(ExtrInst))
1523 return false;
1524 }
1525
1526 I.eraseFromParent();
1527 return true;
1528}
1529
1530bool X86InstructionSelector::selectMergeValues(
1531 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1532 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1533 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1534 "unexpected instruction");
1535
1536 // Split to inserts.
1537 Register DstReg = I.getOperand(0).getReg();
1538 Register SrcReg0 = I.getOperand(1).getReg();
1539
1540 const LLT DstTy = MRI.getType(DstReg);
1541 const LLT SrcTy = MRI.getType(SrcReg0);
1542 unsigned SrcSize = SrcTy.getSizeInBits();
1543
1544 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1545
1546 // For the first src use insertSubReg.
1547 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1548 MRI.setRegBank(DefReg, RegBank);
1549 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1550 return false;
1551
1552 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1553 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1554 MRI.setRegBank(Tmp, RegBank);
1555
1556 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1557 TII.get(TargetOpcode::G_INSERT), Tmp)
1558 .addReg(DefReg)
1559 .addReg(I.getOperand(Idx).getReg())
1560 .addImm((Idx - 1) * SrcSize);
1561
1562 DefReg = Tmp;
1563
1564 if (!select(InsertInst))
1565 return false;
1566 }
1567
1568 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1569 TII.get(TargetOpcode::COPY), DstReg)
1570 .addReg(DefReg);
1571
1572 if (!select(CopyInst))
1573 return false;
1574
1575 I.eraseFromParent();
1576 return true;
1577}
1578
1579bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1580 MachineRegisterInfo &MRI,
1581 MachineFunction &MF) const {
1582 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1583
1584 const Register CondReg = I.getOperand(0).getReg();
1585 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1586
1587 MachineInstr &TestInst =
1588 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1589 .addReg(CondReg)
1590 .addImm(1);
1591 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1592 .addMBB(DestMBB).addImm(X86::COND_NE);
1593
1594 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1595
1596 I.eraseFromParent();
1597 return true;
1598}
1599
1600bool X86InstructionSelector::materializeFP(MachineInstr &I,
1601 MachineRegisterInfo &MRI,
1602 MachineFunction &MF) const {
1603 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1604 "unexpected instruction");
1605
1606 // Can't handle alternate code models yet.
1608 if (CM != CodeModel::Small && CM != CodeModel::Large)
1609 return false;
1610
1611 const Register DstReg = I.getOperand(0).getReg();
1612 const LLT DstTy = MRI.getType(DstReg);
1613 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1614 // Create the load from the constant pool.
1615 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1616 const auto &DL = MF.getDataLayout();
1617 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
1618 const DebugLoc &DbgLoc = I.getDebugLoc();
1619
1620 unsigned Opc =
1621 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1622
1623 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1624 MachineInstr *LoadInst = nullptr;
1625 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1626
1627 if (CM == CodeModel::Large && STI.is64Bit()) {
1628 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1629 // they cannot be folded into immediate fields.
1630
1631 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1632 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1633 .addConstantPoolIndex(CPI, 0, OpFlag);
1634
1635 MachineMemOperand *MMO = MF.getMachineMemOperand(
1637 LLT::pointer(0, DL.getPointerSizeInBits()), Alignment);
1638
1639 LoadInst =
1640 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1641 AddrReg)
1642 .addMemOperand(MMO);
1643
1644 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1645 // Handle the case when globals fit in our immediate field.
1646 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1647
1648 // x86-32 PIC requires a PIC base register for constant pools.
1649 unsigned PICBase = 0;
1650 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1651 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1652 // In DAGISEL the code that initialize it generated by the CGBR pass.
1653 return false; // TODO support the mode.
1654 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1655 PICBase = X86::RIP;
1656
1657 LoadInst = addConstantPoolReference(
1658 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1659 OpFlag);
1660 } else
1661 return false;
1662
1663 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1664 I.eraseFromParent();
1665 return true;
1666}
1667
1668bool X86InstructionSelector::selectImplicitDefOrPHI(
1669 MachineInstr &I, MachineRegisterInfo &MRI) const {
1670 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1671 I.getOpcode() == TargetOpcode::G_PHI) &&
1672 "unexpected instruction");
1673
1674 Register DstReg = I.getOperand(0).getReg();
1675
1676 if (!MRI.getRegClassOrNull(DstReg)) {
1677 const LLT DstTy = MRI.getType(DstReg);
1678 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1679
1680 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1681 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1682 << " operand\n");
1683 return false;
1684 }
1685 }
1686
1687 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1688 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1689 else
1690 I.setDesc(TII.get(X86::PHI));
1691
1692 return true;
1693}
1694
1695bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1696 MachineRegisterInfo &MRI,
1697 MachineFunction &MF) const {
1698 // The implementation of this function is adapted from X86FastISel.
1699 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1700 I.getOpcode() == TargetOpcode::G_SMULH ||
1701 I.getOpcode() == TargetOpcode::G_UMULH ||
1702 I.getOpcode() == TargetOpcode::G_SDIV ||
1703 I.getOpcode() == TargetOpcode::G_SREM ||
1704 I.getOpcode() == TargetOpcode::G_UDIV ||
1705 I.getOpcode() == TargetOpcode::G_UREM) &&
1706 "unexpected instruction");
1707
1708 const Register DstReg = I.getOperand(0).getReg();
1709 const Register Op1Reg = I.getOperand(1).getReg();
1710 const Register Op2Reg = I.getOperand(2).getReg();
1711
1712 const LLT RegTy = MRI.getType(DstReg);
1713 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1714 "Arguments and return value types must match");
1715
1716 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1717 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1718 return false;
1719
1720 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1721 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1722 const static bool S = true; // IsSigned
1723 const static bool U = false; // !IsSigned
1724 const static unsigned Copy = TargetOpcode::COPY;
1725
1726 // For the X86 IDIV instruction, in most cases the dividend
1727 // (numerator) must be in a specific register pair highreg:lowreg,
1728 // producing the quotient in lowreg and the remainder in highreg.
1729 // For most data types, to set up the instruction, the dividend is
1730 // copied into lowreg, and lowreg is sign-extended into highreg. The
1731 // exception is i8, where the dividend is defined as a single register rather
1732 // than a register pair, and we therefore directly sign-extend the dividend
1733 // into lowreg, instead of copying, and ignore the highreg.
1734 const static struct MulDivRemEntry {
1735 // The following portion depends only on the data type.
1736 unsigned SizeInBits;
1737 unsigned LowInReg; // low part of the register pair
1738 unsigned HighInReg; // high part of the register pair
1739 // The following portion depends on both the data type and the operation.
1740 struct MulDivRemResult {
1741 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1742 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1743 // highreg, or copying a zero into highreg.
1744 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1745 // zero/sign-extending into lowreg for i8.
1746 unsigned ResultReg; // Register containing the desired result.
1747 bool IsOpSigned; // Whether to use signed or unsigned form.
1748 } ResultTable[NumOps];
1749 } OpTable[NumTypes] = {
1750 {8,
1751 X86::AX,
1752 0,
1753 {
1754 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1755 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1756 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1757 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1758 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1759 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1760 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1761 }}, // i8
1762 {16,
1763 X86::AX,
1764 X86::DX,
1765 {
1766 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1767 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1768 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1769 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1770 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1771 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1772 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1773 }}, // i16
1774 {32,
1775 X86::EAX,
1776 X86::EDX,
1777 {
1778 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1779 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1780 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1781 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1782 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1783 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1784 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1785 }}, // i32
1786 {64,
1787 X86::RAX,
1788 X86::RDX,
1789 {
1790 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1791 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1792 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1793 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1794 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1795 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1796 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1797 }}, // i64
1798 };
1799
1800 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1801 return El.SizeInBits == RegTy.getSizeInBits();
1802 });
1803 if (OpEntryIt == std::end(OpTable))
1804 return false;
1805
1806 unsigned OpIndex;
1807 switch (I.getOpcode()) {
1808 default:
1809 llvm_unreachable("Unexpected mul/div/rem opcode");
1810 case TargetOpcode::G_SDIV:
1811 OpIndex = 0;
1812 break;
1813 case TargetOpcode::G_SREM:
1814 OpIndex = 1;
1815 break;
1816 case TargetOpcode::G_UDIV:
1817 OpIndex = 2;
1818 break;
1819 case TargetOpcode::G_UREM:
1820 OpIndex = 3;
1821 break;
1822 case TargetOpcode::G_MUL:
1823 OpIndex = 4;
1824 break;
1825 case TargetOpcode::G_SMULH:
1826 OpIndex = 5;
1827 break;
1828 case TargetOpcode::G_UMULH:
1829 OpIndex = 6;
1830 break;
1831 }
1832
1833 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1834 const MulDivRemEntry::MulDivRemResult &OpEntry =
1835 TypeEntry.ResultTable[OpIndex];
1836
1837 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1838 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1839 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1840 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1841 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1842 << " operand\n");
1843 return false;
1844 }
1845
1846 // Move op1 into low-order input register.
1847 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1848 TypeEntry.LowInReg)
1849 .addReg(Op1Reg);
1850
1851 // Zero-extend or sign-extend into high-order input register.
1852 if (OpEntry.OpSignExtend) {
1853 if (OpEntry.IsOpSigned)
1854 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1855 TII.get(OpEntry.OpSignExtend));
1856 else {
1857 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1858 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1859 Zero32);
1860
1861 // Copy the zero into the appropriate sub/super/identical physical
1862 // register. Unfortunately the operations needed are not uniform enough
1863 // to fit neatly into the table above.
1864 if (RegTy.getSizeInBits() == 16) {
1865 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1866 TypeEntry.HighInReg)
1867 .addReg(Zero32, {}, X86::sub_16bit);
1868 } else if (RegTy.getSizeInBits() == 32) {
1869 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1870 TypeEntry.HighInReg)
1871 .addReg(Zero32);
1872 } else if (RegTy.getSizeInBits() == 64) {
1873 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1874 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1875 .addReg(Zero32)
1876 .addImm(X86::sub_32bit);
1877 }
1878 }
1879 }
1880
1881 // Generate the DIV/IDIV/MUL/IMUL instruction.
1882 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1883 .addReg(Op2Reg);
1884
1885 // For i8 remainder, we can't reference ah directly, as we'll end
1886 // up with bogus copies like %r9b = COPY %ah. Reference ax
1887 // instead to prevent ah references in a rex instruction.
1888 //
1889 // The current assumption of the fast register allocator is that isel
1890 // won't generate explicit references to the GR8_NOREX registers. If
1891 // the allocator and/or the backend get enhanced to be more robust in
1892 // that regard, this can be, and should be, removed.
1893 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1894 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1895 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1896 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1897 .addReg(X86::AX);
1898
1899 // Shift AX right by 8 bits instead of using AH.
1900 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1901 ResultSuperReg)
1902 .addReg(SourceSuperReg)
1903 .addImm(8);
1904
1905 // Now reference the 8-bit subreg of the result.
1906 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1907 DstReg)
1908 .addReg(ResultSuperReg, {}, X86::sub_8bit);
1909 } else {
1910 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1911 DstReg)
1912 .addReg(OpEntry.ResultReg);
1913 }
1914 I.eraseFromParent();
1915
1916 return true;
1917}
1918
1919bool X86InstructionSelector::selectSelect(MachineInstr &I,
1920 MachineRegisterInfo &MRI,
1921 MachineFunction &MF) const {
1922 GSelect &Sel = cast<GSelect>(I);
1923 Register DstReg = Sel.getReg(0);
1924 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::TEST32rr))
1925 .addReg(Sel.getCondReg())
1926 .addReg(Sel.getCondReg());
1927
1928 unsigned OpCmp;
1929 LLT Ty = MRI.getType(DstReg);
1930 if (Ty.getSizeInBits() == 80) {
1931 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::CMOVE_Fp80),
1932 DstReg)
1933 .addReg(Sel.getTrueReg())
1934 .addReg(Sel.getFalseReg());
1935 } else {
1936 switch (Ty.getSizeInBits()) {
1937 default:
1938 return false;
1939 case 8:
1940 OpCmp = X86::CMOV_GR8;
1941 break;
1942 case 16:
1943 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1944 break;
1945 case 32:
1946 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1947 break;
1948 case 64:
1949 assert(STI.is64Bit() && STI.canUseCMOV());
1950 OpCmp = X86::CMOV64rr;
1951 break;
1952 }
1953 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
1954 .addReg(Sel.getTrueReg())
1955 .addReg(Sel.getFalseReg())
1957 }
1958 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
1959 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1960 LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
1961 return false;
1962 }
1963
1964 Sel.eraseFromParent();
1965 return true;
1966}
1967
1968InstructionSelector::ComplexRendererFns
1969X86InstructionSelector::selectAddr(MachineOperand &Root) const {
1970 MachineInstr *MI = Root.getParent();
1971 MachineIRBuilder MIRBuilder(*MI);
1972
1973 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1974 MachineInstr *Ptr = MRI.getVRegDef(Root.getReg());
1975 X86AddressMode AM;
1976 X86SelectAddress(*Ptr, TM, MRI, STI, AM);
1977
1978 if (AM.IndexReg)
1979 return std::nullopt;
1980
1981 return {// Base
1982 {[=](MachineInstrBuilder &MIB) {
1984 MIB.addUse(AM.Base.Reg);
1985 else {
1987 "Unknown type of address base");
1988 MIB.addFrameIndex(AM.Base.FrameIndex);
1989 }
1990 },
1991 // Scale
1992 [=](MachineInstrBuilder &MIB) { MIB.addImm(AM.Scale); },
1993 // Index
1994 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); },
1995 // Disp
1996 [=](MachineInstrBuilder &MIB) {
1997 if (AM.GV)
1998 MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
1999 else if (AM.CP)
2000 MIB.addConstantPoolIndex(AM.Disp, 0, AM.GVOpFlags);
2001 else
2002 MIB.addImm(AM.Disp);
2003 },
2004 // Segment
2005 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); }}};
2006}
2007
2008InstructionSelector *
2010 const X86Subtarget &Subtarget,
2011 const X86RegisterBankInfo &RBI) {
2012 return new X86InstructionSelector(TM, Subtarget, RBI);
2013}
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
#define LLVM_DEBUG(...)
Definition Debug.h:114
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
CodeModel::Model getCodeModel() const
Returns the code model.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
This class provides the information for the target register banks.
bool canUseCMOV() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
bool isPICStyleRIPRel() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
bool hasAVX() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition TypePool.h:27
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
const GlobalValue * GV
union llvm::X86AddressMode::BaseUnion Base
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType