LLVM 22.0.0git
X86InstructionSelector.cpp
Go to the documentation of this file.
1//===- X86InstructionSelector.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// X86.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86InstrInfo.h"
18#include "X86RegisterBankInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/IntrinsicsX86.h"
42#include "llvm/Support/Debug.h"
46#include <cassert>
47#include <cstdint>
48#include <tuple>
49
50#define DEBUG_TYPE "X86-isel"
51
52using namespace llvm;
53
54namespace {
55
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
59
60class X86InstructionSelector : public InstructionSelector {
61public:
62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63 const X86RegisterBankInfo &RBI);
64
65 bool select(MachineInstr &I) override;
66 static const char *getName() { return DEBUG_TYPE; }
67
68private:
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75 Align Alignment) const;
76 // TODO: remove once p0<->i32/i64 matching is available
77 unsigned getPtrLoadStoreOp(const LLT &Ty, const RegisterBank &RB,
78 unsigned Opc) const;
79
81 MachineFunction &MF) const;
82 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
97 MachineFunction &MF) const;
98 bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
99 MachineFunction &MF) const;
103 MachineFunction &MF);
105 MachineFunction &MF);
106 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
107 MachineFunction &MF) const;
108 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
109 MachineFunction &MF) const;
110 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
111 MachineFunction &MF) const;
112 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
113 const Register DstReg,
114 const TargetRegisterClass *DstRC,
115 const Register SrcReg,
116 const TargetRegisterClass *SrcRC) const;
117 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
120 bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
121 MachineFunction &MF) const;
122 bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
123 MachineFunction &MF) const;
124
125 ComplexRendererFns selectAddr(MachineOperand &Root) const;
126
127 // emit insert subreg instruction and insert it before MachineInstr &I
128 bool emitInsertSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
130 // emit extract subreg instruction and insert it before MachineInstr &I
131 bool emitExtractSubreg(Register DstReg, Register SrcReg, MachineInstr &I,
133
134 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
136 MachineRegisterInfo &MRI) const;
137
138 const X86TargetMachine &TM;
139 const X86Subtarget &STI;
140 const X86InstrInfo &TII;
141 const X86RegisterInfo &TRI;
142 const X86RegisterBankInfo &RBI;
143
144#define GET_GLOBALISEL_PREDICATES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_PREDICATES_DECL
147
148#define GET_GLOBALISEL_TEMPORARIES_DECL
149#include "X86GenGlobalISel.inc"
150#undef GET_GLOBALISEL_TEMPORARIES_DECL
151};
152
153} // end anonymous namespace
154
155#define GET_GLOBALISEL_IMPL
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_IMPL
158
159X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
160 const X86Subtarget &STI,
161 const X86RegisterBankInfo &RBI)
162 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
163 RBI(RBI),
165#include "X86GenGlobalISel.inc"
168#include "X86GenGlobalISel.inc"
170{
171}
172
173// FIXME: This should be target-independent, inferred from the types declared
174// for each class in the bank.
176X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
177 if (RB.getID() == X86::GPRRegBankID) {
178 if (Ty.getSizeInBits() <= 8)
179 return &X86::GR8RegClass;
180 if (Ty.getSizeInBits() == 16)
181 return &X86::GR16RegClass;
182 if (Ty.getSizeInBits() == 32)
183 return &X86::GR32RegClass;
184 if (Ty.getSizeInBits() == 64)
185 return &X86::GR64RegClass;
186 }
187 if (RB.getID() == X86::VECRRegBankID) {
188 if (Ty.getSizeInBits() == 16)
189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
190 if (Ty.getSizeInBits() == 32)
191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
192 if (Ty.getSizeInBits() == 64)
193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
194 if (Ty.getSizeInBits() == 128)
195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
196 if (Ty.getSizeInBits() == 256)
197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
198 if (Ty.getSizeInBits() == 512)
199 return &X86::VR512RegClass;
200 }
201
202 if (RB.getID() == X86::PSRRegBankID) {
203 if (Ty.getSizeInBits() == 80)
204 return &X86::RFP80RegClass;
205 if (Ty.getSizeInBits() == 64)
206 return &X86::RFP64RegClass;
207 if (Ty.getSizeInBits() == 32)
208 return &X86::RFP32RegClass;
209 }
210
211 llvm_unreachable("Unknown RegBank!");
212}
213
214const TargetRegisterClass *
215X86InstructionSelector::getRegClass(LLT Ty, Register Reg,
216 MachineRegisterInfo &MRI) const {
217 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
218 return getRegClass(Ty, RegBank);
219}
220
221static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
222 unsigned SubIdx = X86::NoSubRegister;
223 if (RC == &X86::GR32RegClass) {
224 SubIdx = X86::sub_32bit;
225 } else if (RC == &X86::GR16RegClass) {
226 SubIdx = X86::sub_16bit;
227 } else if (RC == &X86::GR8RegClass) {
228 SubIdx = X86::sub_8bit;
229 }
230
231 return SubIdx;
232}
233
235 assert(Reg.isPhysical());
236 if (X86::GR64RegClass.contains(Reg))
237 return &X86::GR64RegClass;
238 if (X86::GR32RegClass.contains(Reg))
239 return &X86::GR32RegClass;
240 if (X86::GR16RegClass.contains(Reg))
241 return &X86::GR16RegClass;
242 if (X86::GR8RegClass.contains(Reg))
243 return &X86::GR8RegClass;
244
245 llvm_unreachable("Unknown RegClass for PhysReg!");
246}
247
248// FIXME: We need some sort of API in RBI/TRI to allow generic code to
249// constrain operands of simple instructions given a TargetRegisterClass
250// and LLT
251bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,
252 MachineRegisterInfo &MRI) const {
253 for (MachineOperand &MO : I.operands()) {
254 if (!MO.isReg())
255 continue;
256 Register Reg = MO.getReg();
257 if (!Reg)
258 continue;
259 if (Reg.isPhysical())
260 continue;
261 LLT Ty = MRI.getType(Reg);
262 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
263 const TargetRegisterClass *RC =
265 if (!RC) {
266 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
267 RC = getRegClass(Ty, RB);
268 if (!RC) {
270 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
271 break;
272 }
273 }
275 }
276
277 return true;
278}
279
280// Set X86 Opcode and constrain DestReg.
281bool X86InstructionSelector::selectCopy(MachineInstr &I,
282 MachineRegisterInfo &MRI) const {
283 Register DstReg = I.getOperand(0).getReg();
284 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
285 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
286
287 Register SrcReg = I.getOperand(1).getReg();
288 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
289 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
290
291 if (DstReg.isPhysical()) {
292 assert(I.isCopy() && "Generic operators do not allow physical registers");
293
294 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
295 DstRegBank.getID() == X86::GPRRegBankID) {
296
297 const TargetRegisterClass *SrcRC =
298 getRegClass(MRI.getType(SrcReg), SrcRegBank);
299 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
300
301 if (SrcRC != DstRC) {
302 // This case can be generated by ABI lowering, performe anyext
303 Register ExtSrc = MRI.createVirtualRegister(DstRC);
304 BuildMI(*I.getParent(), I, I.getDebugLoc(),
305 TII.get(TargetOpcode::SUBREG_TO_REG))
306 .addDef(ExtSrc)
307 .addImm(0)
308 .addReg(SrcReg)
309 .addImm(getSubRegIndex(SrcRC));
310
311 I.getOperand(1).setReg(ExtSrc);
312 }
313 }
314
315 // Special case GPR16 -> XMM
316 if (SrcSize == 16 && SrcRegBank.getID() == X86::GPRRegBankID &&
317 (DstRegBank.getID() == X86::VECRRegBankID)) {
318
319 const DebugLoc &DL = I.getDebugLoc();
320
321 // Any extend GPR16 -> GPR32
322 Register ExtReg = MRI.createVirtualRegister(&X86::GR32RegClass);
323 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
324 ExtReg)
325 .addImm(0)
326 .addReg(SrcReg)
327 .addImm(X86::sub_16bit);
328
329 // Copy GR32 -> XMM
330 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
331 .addReg(ExtReg);
332
333 I.eraseFromParent();
334 }
335
336 // Special case XMM -> GR16
337 if (DstSize == 16 && DstRegBank.getID() == X86::GPRRegBankID &&
338 (SrcRegBank.getID() == X86::VECRRegBankID)) {
339
340 const DebugLoc &DL = I.getDebugLoc();
341
342 // Move XMM to GR32 register.
343 Register Temp32 = MRI.createVirtualRegister(&X86::GR32RegClass);
344 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Temp32)
345 .addReg(SrcReg);
346
347 // Extract the lower 16 bits
348 if (Register Dst32 = TRI.getMatchingSuperReg(DstReg, X86::sub_16bit,
349 &X86::GR32RegClass)) {
350 // Optimization for Physical Dst (e.g. AX): Copy to EAX directly.
351 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Dst32)
352 .addReg(Temp32);
353 } else {
354 // Handle if there is no super.
355 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)
356 .addReg(Temp32, 0, X86::sub_16bit);
357 }
358
359 I.eraseFromParent();
360 }
361
362 return true;
363 }
364
365 assert((!SrcReg.isPhysical() || I.isCopy()) &&
366 "No phys reg on generic operators");
367 assert((DstSize == SrcSize ||
368 // Copies are a mean to setup initial types, the number of
369 // bits may not exactly match.
370 (SrcReg.isPhysical() &&
371 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
372 "Copy with different width?!");
373
374 const TargetRegisterClass *DstRC =
375 getRegClass(MRI.getType(DstReg), DstRegBank);
376
377 if (SrcRegBank.getID() == X86::GPRRegBankID &&
378 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
379 SrcReg.isPhysical()) {
380 // Change the physical register to performe truncate.
381
382 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
383
384 if (DstRC != SrcRC) {
385 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
386 I.getOperand(1).substPhysReg(SrcReg, TRI);
387 }
388 }
389
390 // No need to constrain SrcReg. It will get constrained when
391 // we hit another of its use or its defs.
392 // Copies do not have constraints.
393 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
394 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
395 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
396 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
397 << " operand\n");
398 return false;
399 }
400 }
401 I.setDesc(TII.get(X86::COPY));
402 return true;
403}
404
405bool X86InstructionSelector::select(MachineInstr &I) {
406 assert(I.getParent() && "Instruction should be in a basic block!");
407 assert(I.getParent()->getParent() && "Instruction should be in a function!");
408
409 MachineBasicBlock &MBB = *I.getParent();
410 MachineFunction &MF = *MBB.getParent();
411 MachineRegisterInfo &MRI = MF.getRegInfo();
412
413 unsigned Opcode = I.getOpcode();
414 if (!isPreISelGenericOpcode(Opcode) && !I.isPreISelOpcode()) {
415 // Certain non-generic instructions also need some special handling.
416
417 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
418 return false;
419
420 if (I.isCopy())
421 return selectCopy(I, MRI);
422
423 if (I.isDebugInstr())
424 return selectDebugInstr(I, MRI);
425
426 return true;
427 }
428
429 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
430 "Generic instruction has unexpected implicit operands\n");
431
432 if (selectImpl(I, *CoverageInfo))
433 return true;
434
435 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
436
437 // TODO: This should be implemented by tblgen.
438 switch (I.getOpcode()) {
439 default:
440 return false;
441 case TargetOpcode::G_STORE:
442 case TargetOpcode::G_LOAD:
443 return selectLoadStoreOp(I, MRI, MF);
444 case TargetOpcode::G_PTR_ADD:
445 case TargetOpcode::G_FRAME_INDEX:
446 return selectFrameIndexOrGep(I, MRI, MF);
447 case TargetOpcode::G_GLOBAL_VALUE:
448 return selectGlobalValue(I, MRI, MF);
449 case TargetOpcode::G_CONSTANT:
450 return selectConstant(I, MRI, MF);
451 case TargetOpcode::G_FCONSTANT:
452 return materializeFP(I, MRI, MF);
453 case TargetOpcode::G_PTRTOINT:
454 case TargetOpcode::G_TRUNC:
455 return selectTruncOrPtrToInt(I, MRI, MF);
456 case TargetOpcode::G_INTTOPTR:
457 case TargetOpcode::G_FREEZE:
458 return selectCopy(I, MRI);
459 case TargetOpcode::G_ZEXT:
460 return selectZext(I, MRI, MF);
461 case TargetOpcode::G_ANYEXT:
462 return selectAnyext(I, MRI, MF);
463 case TargetOpcode::G_ICMP:
464 return selectCmp(I, MRI, MF);
465 case TargetOpcode::G_FCMP:
466 return selectFCmp(I, MRI, MF);
467 case TargetOpcode::G_UADDE:
468 case TargetOpcode::G_UADDO:
469 case TargetOpcode::G_USUBE:
470 case TargetOpcode::G_USUBO:
471 return selectUAddSub(I, MRI, MF);
472 case TargetOpcode::G_UNMERGE_VALUES:
473 return selectUnmergeValues(I, MRI, MF);
474 case TargetOpcode::G_MERGE_VALUES:
475 case TargetOpcode::G_CONCAT_VECTORS:
476 return selectMergeValues(I, MRI, MF);
477 case TargetOpcode::G_EXTRACT:
478 return selectExtract(I, MRI, MF);
479 case TargetOpcode::G_INSERT:
480 return selectInsert(I, MRI, MF);
481 case TargetOpcode::G_BRCOND:
482 return selectCondBranch(I, MRI, MF);
483 case TargetOpcode::G_IMPLICIT_DEF:
484 case TargetOpcode::G_PHI:
485 return selectImplicitDefOrPHI(I, MRI);
486 case TargetOpcode::G_MUL:
487 case TargetOpcode::G_SMULH:
488 case TargetOpcode::G_UMULH:
489 case TargetOpcode::G_SDIV:
490 case TargetOpcode::G_UDIV:
491 case TargetOpcode::G_SREM:
492 case TargetOpcode::G_UREM:
493 return selectMulDivRem(I, MRI, MF);
494 case TargetOpcode::G_SELECT:
495 return selectSelect(I, MRI, MF);
496 }
497
498 return false;
499}
500
501unsigned X86InstructionSelector::getPtrLoadStoreOp(const LLT &Ty,
502 const RegisterBank &RB,
503 unsigned Opc) const {
504 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
505 "Only G_STORE and G_LOAD are expected for selection");
506 if (Ty.isPointer() && X86::GPRRegBankID == RB.getID()) {
507 bool IsLoad = (Opc == TargetOpcode::G_LOAD);
508 switch (Ty.getSizeInBits()) {
509 default:
510 break;
511 case 32:
512 return IsLoad ? X86::MOV32rm : X86::MOV32mr;
513 case 64:
514 return IsLoad ? X86::MOV64rm : X86::MOV64mr;
515 }
516 }
517 return Opc;
518}
519
520unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
521 const RegisterBank &RB,
522 unsigned Opc,
523 Align Alignment) const {
524 bool Isload = (Opc == TargetOpcode::G_LOAD);
525 bool HasAVX = STI.hasAVX();
526 bool HasAVX512 = STI.hasAVX512();
527 bool HasVLX = STI.hasVLX();
528
529 if (Ty == LLT::scalar(8)) {
530 if (X86::GPRRegBankID == RB.getID())
531 return Isload ? X86::MOV8rm : X86::MOV8mr;
532 } else if (Ty == LLT::scalar(16)) {
533 if (X86::GPRRegBankID == RB.getID())
534 return Isload ? X86::MOV16rm : X86::MOV16mr;
535 } else if (Ty == LLT::scalar(32)) {
536 if (X86::GPRRegBankID == RB.getID())
537 return Isload ? X86::MOV32rm : X86::MOV32mr;
538 if (X86::VECRRegBankID == RB.getID())
539 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
540 HasAVX ? X86::VMOVSSrm_alt :
541 X86::MOVSSrm_alt)
542 : (HasAVX512 ? X86::VMOVSSZmr :
543 HasAVX ? X86::VMOVSSmr :
544 X86::MOVSSmr);
545 if (X86::PSRRegBankID == RB.getID())
546 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
547 } else if (Ty == LLT::scalar(64)) {
548 if (X86::GPRRegBankID == RB.getID())
549 return Isload ? X86::MOV64rm : X86::MOV64mr;
550 if (X86::VECRRegBankID == RB.getID())
551 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
552 HasAVX ? X86::VMOVSDrm_alt :
553 X86::MOVSDrm_alt)
554 : (HasAVX512 ? X86::VMOVSDZmr :
555 HasAVX ? X86::VMOVSDmr :
556 X86::MOVSDmr);
557 if (X86::PSRRegBankID == RB.getID())
558 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
559 } else if (Ty == LLT::scalar(80)) {
560 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
561 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
562 if (Alignment >= Align(16))
563 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
564 : HasAVX512
565 ? X86::VMOVAPSZ128rm_NOVLX
566 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
567 : (HasVLX ? X86::VMOVAPSZ128mr
568 : HasAVX512
569 ? X86::VMOVAPSZ128mr_NOVLX
570 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
571 else
572 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
573 : HasAVX512
574 ? X86::VMOVUPSZ128rm_NOVLX
575 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
576 : (HasVLX ? X86::VMOVUPSZ128mr
577 : HasAVX512
578 ? X86::VMOVUPSZ128mr_NOVLX
579 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
580 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
581 if (Alignment >= Align(32))
582 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
583 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
584 : X86::VMOVAPSYrm)
585 : (HasVLX ? X86::VMOVAPSZ256mr
586 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
587 : X86::VMOVAPSYmr);
588 else
589 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
590 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
591 : X86::VMOVUPSYrm)
592 : (HasVLX ? X86::VMOVUPSZ256mr
593 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
594 : X86::VMOVUPSYmr);
595 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
596 if (Alignment >= Align(64))
597 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
598 else
599 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
600 }
601 return Opc;
602}
603
604// Fill in an address from the given instruction.
607 const X86Subtarget &STI, X86AddressMode &AM) {
608 assert(I.getOperand(0).isReg() && "unsupported operand.");
609 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
610 "unsupported type.");
611
612 switch (I.getOpcode()) {
613 default:
614 break;
615 case TargetOpcode::G_FRAME_INDEX:
616 AM.Base.FrameIndex = I.getOperand(1).getIndex();
618 return true;
619 case TargetOpcode::G_PTR_ADD: {
620 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
621 int64_t Imm = *COff;
622 if (isInt<32>(Imm)) { // Check for displacement overflow.
623 AM.Disp = static_cast<int32_t>(Imm);
624 AM.Base.Reg = I.getOperand(1).getReg();
625 return true;
626 }
627 }
628 break;
629 }
630 case TargetOpcode::G_GLOBAL_VALUE: {
631 auto GV = I.getOperand(1).getGlobal();
632 if (GV->isThreadLocal()) {
633 return false; // TODO: we don't support TLS yet.
634 }
635 // Can't handle alternate code models yet.
636 if (TM.getCodeModel() != CodeModel::Small)
637 return false;
638 AM.GV = GV;
640
641 // TODO: The ABI requires an extra load. not supported yet.
643 return false;
644
645 // TODO: This reference is relative to the pic base. not supported yet.
647 return false;
648
649 if (STI.isPICStyleRIPRel()) {
650 // Use rip-relative addressing.
651 assert(AM.Base.Reg == 0 && AM.IndexReg == 0 &&
652 "RIP-relative addresses can't have additional register operands");
653 AM.Base.Reg = X86::RIP;
654 }
655 return true;
656 }
657 case TargetOpcode::G_CONSTANT_POOL: {
658 // TODO: Need a separate move for Large model
659 if (TM.getCodeModel() == CodeModel::Large)
660 return false;
661
662 AM.GVOpFlags = STI.classifyLocalReference(nullptr);
663 if (AM.GVOpFlags == X86II::MO_GOTOFF)
664 AM.Base.Reg = STI.getInstrInfo()->getGlobalBaseReg(I.getMF());
665 else if (STI.is64Bit())
666 AM.Base.Reg = X86::RIP;
667 AM.CP = true;
668 AM.Disp = I.getOperand(1).getIndex();
669 return true;
670 }
671 }
672 // Default behavior.
673 AM.Base.Reg = I.getOperand(0).getReg();
674 return true;
675}
676
677bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
678 MachineRegisterInfo &MRI,
679 MachineFunction &MF) const {
680 unsigned Opc = I.getOpcode();
681
682 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
683 "Only G_STORE and G_LOAD are expected for selection");
684
685 const Register DefReg = I.getOperand(0).getReg();
686 LLT Ty = MRI.getType(DefReg);
687 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
688
689 assert(I.hasOneMemOperand());
690 auto &MemOp = **I.memoperands_begin();
691 if (MemOp.isAtomic()) {
692 // Note: for unordered operations, we rely on the fact the appropriate MMO
693 // is already on the instruction we're mutating, and thus we don't need to
694 // make any changes. So long as we select an opcode which is capable of
695 // loading or storing the appropriate size atomically, the rest of the
696 // backend is required to respect the MMO state.
697 if (!MemOp.isUnordered()) {
698 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
699 return false;
700 }
701 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
702 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
703 return false;
704 }
705 }
706
707 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB, Opc);
708 if (NewOpc == Opc)
709 return false;
710
711 I.setDesc(TII.get(NewOpc));
712 MachineInstrBuilder MIB(MF, I);
713 MachineInstr *Ptr = MRI.getVRegDef(I.getOperand(1).getReg());
714
715 X86AddressMode AM;
716 if (!X86SelectAddress(*Ptr, TM, MRI, STI, AM))
717 return false;
718
719 if (Opc == TargetOpcode::G_LOAD) {
720 I.removeOperand(1);
721 addFullAddress(MIB, AM);
722 } else {
723 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
724 I.removeOperand(1);
725 I.removeOperand(0);
726 addFullAddress(MIB, AM).addUse(DefReg);
727 }
728 bool Constrained = constrainSelectedInstRegOperands(I, TII, TRI, RBI);
729 I.addImplicitDefUseOperands(MF);
730 return Constrained;
731}
732
733static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
734 if (Ty == LLT::pointer(0, 64))
735 return X86::LEA64r;
736 else if (Ty == LLT::pointer(0, 32))
737 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
738 else
739 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
740}
741
742bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
743 MachineRegisterInfo &MRI,
744 MachineFunction &MF) const {
745 unsigned Opc = I.getOpcode();
746
747 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
748 "unexpected instruction");
749
750 const Register DefReg = I.getOperand(0).getReg();
751 LLT Ty = MRI.getType(DefReg);
752
753 // Use LEA to calculate frame index and GEP
754 unsigned NewOpc = getLeaOP(Ty, STI);
755 I.setDesc(TII.get(NewOpc));
756 MachineInstrBuilder MIB(MF, I);
757
758 if (Opc == TargetOpcode::G_FRAME_INDEX) {
759 addOffset(MIB, 0);
760 } else {
761 MachineOperand &InxOp = I.getOperand(2);
762 I.addOperand(InxOp); // set IndexReg
763 InxOp.ChangeToImmediate(1); // set Scale
764 MIB.addImm(0).addReg(0);
765 }
766
768}
769
770bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
771 MachineRegisterInfo &MRI,
772 MachineFunction &MF) const {
773 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
774 "unexpected instruction");
775
776 X86AddressMode AM;
777 if (!X86SelectAddress(I, TM, MRI, STI, AM))
778 return false;
779
780 const Register DefReg = I.getOperand(0).getReg();
781 LLT Ty = MRI.getType(DefReg);
782 unsigned NewOpc = getLeaOP(Ty, STI);
783
784 I.setDesc(TII.get(NewOpc));
785 MachineInstrBuilder MIB(MF, I);
786
787 I.removeOperand(1);
788 addFullAddress(MIB, AM);
789
791}
792
793bool X86InstructionSelector::selectConstant(MachineInstr &I,
794 MachineRegisterInfo &MRI,
795 MachineFunction &MF) const {
796 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
797 "unexpected instruction");
798
799 const Register DefReg = I.getOperand(0).getReg();
800 LLT Ty = MRI.getType(DefReg);
801
802 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
803 return false;
804
805 uint64_t Val = 0;
806 if (I.getOperand(1).isCImm()) {
807 Val = I.getOperand(1).getCImm()->getZExtValue();
808 I.getOperand(1).ChangeToImmediate(Val);
809 } else if (I.getOperand(1).isImm()) {
810 Val = I.getOperand(1).getImm();
811 } else
812 llvm_unreachable("Unsupported operand type.");
813
814 unsigned NewOpc;
815 switch (Ty.getSizeInBits()) {
816 case 8:
817 NewOpc = X86::MOV8ri;
818 break;
819 case 16:
820 NewOpc = X86::MOV16ri;
821 break;
822 case 32:
823 NewOpc = X86::MOV32ri;
824 break;
825 case 64:
826 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
827 if (isInt<32>(Val))
828 NewOpc = X86::MOV64ri32;
829 else
830 NewOpc = X86::MOV64ri;
831 break;
832 default:
833 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
834 }
835
836 I.setDesc(TII.get(NewOpc));
838}
839
840// Helper function for selectTruncOrPtrToInt and selectAnyext.
841// Returns true if DstRC lives on a floating register class and
842// SrcRC lives on a 128-bit vector class.
843static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
844 const TargetRegisterClass *SrcRC) {
845 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
846 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
847 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
848}
849
850bool X86InstructionSelector::selectTurnIntoCOPY(
851 MachineInstr &I, MachineRegisterInfo &MRI, const Register DstReg,
852 const TargetRegisterClass *DstRC, const Register SrcReg,
853 const TargetRegisterClass *SrcRC) const {
854
855 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
856 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
857 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
858 << " operand\n");
859 return false;
860 }
861 I.setDesc(TII.get(X86::COPY));
862 return true;
863}
864
865bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
866 MachineRegisterInfo &MRI,
867 MachineFunction &MF) const {
868 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
869 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
870 "unexpected instruction");
871
872 const Register DstReg = I.getOperand(0).getReg();
873 const Register SrcReg = I.getOperand(1).getReg();
874
875 const LLT DstTy = MRI.getType(DstReg);
876 const LLT SrcTy = MRI.getType(SrcReg);
877
878 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
879 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
880
881 if (DstRB.getID() != SrcRB.getID()) {
882 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
883 << " input/output on different banks\n");
884 return false;
885 }
886
887 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
888 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
889
890 if (!DstRC || !SrcRC)
891 return false;
892
893 // If that's truncation of the value that lives on the vector class and goes
894 // into the floating class, just replace it with copy, as we are able to
895 // select it as a regular move.
896 if (canTurnIntoCOPY(DstRC, SrcRC))
897 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
898
899 if (DstRB.getID() != X86::GPRRegBankID)
900 return false;
901
902 unsigned SubIdx;
903 if (DstRC == SrcRC) {
904 // Nothing to be done
905 SubIdx = X86::NoSubRegister;
906 } else if (DstRC == &X86::GR32RegClass) {
907 SubIdx = X86::sub_32bit;
908 } else if (DstRC == &X86::GR16RegClass) {
909 SubIdx = X86::sub_16bit;
910 } else if (DstRC == &X86::GR8RegClass) {
911 SubIdx = X86::sub_8bit;
912 } else {
913 return false;
914 }
915
916 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
917
918 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
919 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
920 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
921 << "\n");
922 return false;
923 }
924
925 I.getOperand(1).setSubReg(SubIdx);
926
927 I.setDesc(TII.get(X86::COPY));
928 return true;
929}
930
931bool X86InstructionSelector::selectZext(MachineInstr &I,
932 MachineRegisterInfo &MRI,
933 MachineFunction &MF) const {
934 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
935
936 const Register DstReg = I.getOperand(0).getReg();
937 const Register SrcReg = I.getOperand(1).getReg();
938
939 const LLT DstTy = MRI.getType(DstReg);
940 const LLT SrcTy = MRI.getType(SrcReg);
941
942 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
943 "8=>16 Zext is handled by tablegen");
944 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
945 "8=>32 Zext is handled by tablegen");
946 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
947 "16=>32 Zext is handled by tablegen");
948 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
949 "8=>64 Zext is handled by tablegen");
950 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
951 "16=>64 Zext is handled by tablegen");
952 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
953 "32=>64 Zext is handled by tablegen");
954
955 if (SrcTy != LLT::scalar(1))
956 return false;
957
958 unsigned AndOpc;
959 if (DstTy == LLT::scalar(8))
960 AndOpc = X86::AND8ri;
961 else if (DstTy == LLT::scalar(16))
962 AndOpc = X86::AND16ri;
963 else if (DstTy == LLT::scalar(32))
964 AndOpc = X86::AND32ri;
965 else if (DstTy == LLT::scalar(64))
966 AndOpc = X86::AND64ri32;
967 else
968 return false;
969
970 Register DefReg = SrcReg;
971 if (DstTy != LLT::scalar(8)) {
972 Register ImpDefReg =
973 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
974 BuildMI(*I.getParent(), I, I.getDebugLoc(),
975 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
976
977 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
978 BuildMI(*I.getParent(), I, I.getDebugLoc(),
979 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
980 .addReg(ImpDefReg)
981 .addReg(SrcReg)
982 .addImm(X86::sub_8bit);
983 }
984
985 MachineInstr &AndInst =
986 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
987 .addReg(DefReg)
988 .addImm(1);
989
991
992 I.eraseFromParent();
993 return true;
994}
995
996bool X86InstructionSelector::selectAnyext(MachineInstr &I,
997 MachineRegisterInfo &MRI,
998 MachineFunction &MF) const {
999 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
1000
1001 const Register DstReg = I.getOperand(0).getReg();
1002 const Register SrcReg = I.getOperand(1).getReg();
1003
1004 const LLT DstTy = MRI.getType(DstReg);
1005 const LLT SrcTy = MRI.getType(SrcReg);
1006
1007 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1008 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1009
1010 assert(DstRB.getID() == SrcRB.getID() &&
1011 "G_ANYEXT input/output on different banks\n");
1012
1013 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1014 "G_ANYEXT incorrect operand size");
1015
1016 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
1017 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
1018
1019 // If that's ANY_EXT of the value that lives on the floating class and goes
1020 // into the vector class, just replace it with copy, as we are able to select
1021 // it as a regular move.
1022 if (canTurnIntoCOPY(SrcRC, DstRC))
1023 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
1024
1025 if (DstRB.getID() != X86::GPRRegBankID)
1026 return false;
1027
1028 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1029 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1030 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1031 << " operand\n");
1032 return false;
1033 }
1034
1035 if (SrcRC == DstRC) {
1036 I.setDesc(TII.get(X86::COPY));
1037 return true;
1038 }
1039
1040 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1041 TII.get(TargetOpcode::SUBREG_TO_REG))
1042 .addDef(DstReg)
1043 .addImm(0)
1044 .addReg(SrcReg)
1045 .addImm(getSubRegIndex(SrcRC));
1046
1047 I.eraseFromParent();
1048 return true;
1049}
1050
1051bool X86InstructionSelector::selectCmp(MachineInstr &I,
1052 MachineRegisterInfo &MRI,
1053 MachineFunction &MF) const {
1054 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
1055
1056 X86::CondCode CC;
1057 bool SwapArgs;
1058 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
1059 (CmpInst::Predicate)I.getOperand(1).getPredicate());
1060
1061 Register LHS = I.getOperand(2).getReg();
1062 Register RHS = I.getOperand(3).getReg();
1063
1064 if (SwapArgs)
1065 std::swap(LHS, RHS);
1066
1067 unsigned OpCmp;
1068 LLT Ty = MRI.getType(LHS);
1069
1070 switch (Ty.getSizeInBits()) {
1071 default:
1072 return false;
1073 case 8:
1074 OpCmp = X86::CMP8rr;
1075 break;
1076 case 16:
1077 OpCmp = X86::CMP16rr;
1078 break;
1079 case 32:
1080 OpCmp = X86::CMP32rr;
1081 break;
1082 case 64:
1083 OpCmp = X86::CMP64rr;
1084 break;
1085 }
1086
1087 MachineInstr &CmpInst =
1088 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1089 .addReg(LHS)
1090 .addReg(RHS);
1091
1092 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1093 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
1094
1097
1098 I.eraseFromParent();
1099 return true;
1100}
1101
1102bool X86InstructionSelector::selectFCmp(MachineInstr &I,
1103 MachineRegisterInfo &MRI,
1104 MachineFunction &MF) const {
1105 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
1106
1107 Register LhsReg = I.getOperand(2).getReg();
1108 Register RhsReg = I.getOperand(3).getReg();
1110 (CmpInst::Predicate)I.getOperand(1).getPredicate();
1111
1112 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1113 static const uint16_t SETFOpcTable[2][3] = {
1114 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1115 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
1116 const uint16_t *SETFOpc = nullptr;
1117 switch (Predicate) {
1118 default:
1119 break;
1120 case CmpInst::FCMP_OEQ:
1121 SETFOpc = &SETFOpcTable[0][0];
1122 break;
1123 case CmpInst::FCMP_UNE:
1124 SETFOpc = &SETFOpcTable[1][0];
1125 break;
1126 }
1127
1128 assert((LhsReg.isVirtual() && RhsReg.isVirtual()) &&
1129 "Both arguments of FCMP need to be virtual!");
1130 auto *LhsBank = RBI.getRegBank(LhsReg, MRI, TRI);
1131 [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI);
1132 assert((LhsBank == RhsBank) &&
1133 "Both banks assigned to FCMP arguments need to be same!");
1134
1135 // Compute the opcode for the CMP instruction.
1136 unsigned OpCmp;
1137 LLT Ty = MRI.getType(LhsReg);
1138 switch (Ty.getSizeInBits()) {
1139 default:
1140 return false;
1141 case 32:
1142 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32
1143 : X86::UCOMISSrr;
1144 break;
1145 case 64:
1146 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64
1147 : X86::UCOMISDrr;
1148 break;
1149 case 80:
1150 OpCmp = X86::UCOM_FpIr80;
1151 break;
1152 }
1153
1154 Register ResultReg = I.getOperand(0).getReg();
1156 ResultReg,
1157 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1158 if (SETFOpc) {
1159 MachineInstr &CmpInst =
1160 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1161 .addReg(LhsReg)
1162 .addReg(RhsReg);
1163
1164 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1165 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1166 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1167 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
1168 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1169 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1170 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1171 TII.get(SETFOpc[2]), ResultReg)
1172 .addReg(FlagReg1)
1173 .addReg(FlagReg2);
1178
1179 I.eraseFromParent();
1180 return true;
1181 }
1182
1183 X86::CondCode CC;
1184 bool SwapArgs;
1185 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1186 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1187
1188 if (SwapArgs)
1189 std::swap(LhsReg, RhsReg);
1190
1191 // Emit a compare of LHS/RHS.
1192 MachineInstr &CmpInst =
1193 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1194 .addReg(LhsReg)
1195 .addReg(RhsReg);
1196
1197 MachineInstr &Set =
1198 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1201 I.eraseFromParent();
1202 return true;
1203}
1204
1205bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
1206 MachineRegisterInfo &MRI,
1207 MachineFunction &MF) const {
1208 assert((I.getOpcode() == TargetOpcode::G_UADDE ||
1209 I.getOpcode() == TargetOpcode::G_UADDO ||
1210 I.getOpcode() == TargetOpcode::G_USUBE ||
1211 I.getOpcode() == TargetOpcode::G_USUBO) &&
1212 "unexpected instruction");
1213
1214 auto &CarryMI = cast<GAddSubCarryOut>(I);
1215
1216 const Register DstReg = CarryMI.getDstReg();
1217 const Register CarryOutReg = CarryMI.getCarryOutReg();
1218 const Register Op0Reg = CarryMI.getLHSReg();
1219 const Register Op1Reg = CarryMI.getRHSReg();
1220 bool IsSub = CarryMI.isSub();
1221
1222 const LLT DstTy = MRI.getType(DstReg);
1223 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
1224
1225 // TODO: Handle immediate argument variants?
1226 unsigned OpADC, OpADD, OpSBB, OpSUB;
1227 switch (DstTy.getSizeInBits()) {
1228 case 8:
1229 OpADC = X86::ADC8rr;
1230 OpADD = X86::ADD8rr;
1231 OpSBB = X86::SBB8rr;
1232 OpSUB = X86::SUB8rr;
1233 break;
1234 case 16:
1235 OpADC = X86::ADC16rr;
1236 OpADD = X86::ADD16rr;
1237 OpSBB = X86::SBB16rr;
1238 OpSUB = X86::SUB16rr;
1239 break;
1240 case 32:
1241 OpADC = X86::ADC32rr;
1242 OpADD = X86::ADD32rr;
1243 OpSBB = X86::SBB32rr;
1244 OpSUB = X86::SUB32rr;
1245 break;
1246 case 64:
1247 OpADC = X86::ADC64rr;
1248 OpADD = X86::ADD64rr;
1249 OpSBB = X86::SBB64rr;
1250 OpSUB = X86::SUB64rr;
1251 break;
1252 default:
1253 llvm_unreachable("selectUAddSub unsupported type.");
1254 }
1255
1256 const RegisterBank &CarryRB = *RBI.getRegBank(CarryOutReg, MRI, TRI);
1257 const TargetRegisterClass *CarryRC =
1258 getRegClass(MRI.getType(CarryOutReg), CarryRB);
1259
1260 unsigned Opcode = IsSub ? OpSUB : OpADD;
1261
1262 // G_UADDE/G_USUBE - find CarryIn def instruction.
1263 if (auto CarryInMI = dyn_cast<GAddSubCarryInOut>(&I)) {
1264 Register CarryInReg = CarryInMI->getCarryInReg();
1265 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1266 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1267 CarryInReg = Def->getOperand(1).getReg();
1268 Def = MRI.getVRegDef(CarryInReg);
1269 }
1270
1271 // TODO - handle more CF generating instructions
1272 if (Def->getOpcode() == TargetOpcode::G_UADDE ||
1273 Def->getOpcode() == TargetOpcode::G_UADDO ||
1274 Def->getOpcode() == TargetOpcode::G_USUBE ||
1275 Def->getOpcode() == TargetOpcode::G_USUBO) {
1276 // carry set by prev ADD/SUB.
1277
1278 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::CMP8ri))
1279 .addReg(CarryInReg)
1280 .addImm(1);
1281
1282 if (!RBI.constrainGenericRegister(CarryInReg, *CarryRC, MRI))
1283 return false;
1284
1285 Opcode = IsSub ? OpSBB : OpADC;
1286 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1287 // carry is constant, support only 0.
1288 if (*val != 0)
1289 return false;
1290
1291 Opcode = IsSub ? OpSUB : OpADD;
1292 } else
1293 return false;
1294 }
1295
1296 MachineInstr &Inst =
1297 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1298 .addReg(Op0Reg)
1299 .addReg(Op1Reg);
1300
1301 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), CarryOutReg)
1303
1304 if (!constrainSelectedInstRegOperands(Inst, TII, TRI, RBI) ||
1305 !RBI.constrainGenericRegister(CarryOutReg, *CarryRC, MRI))
1306 return false;
1307
1308 I.eraseFromParent();
1309 return true;
1310}
1311
1312bool X86InstructionSelector::selectExtract(MachineInstr &I,
1313 MachineRegisterInfo &MRI,
1314 MachineFunction &MF) const {
1315 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1316 "unexpected instruction");
1317
1318 const Register DstReg = I.getOperand(0).getReg();
1319 const Register SrcReg = I.getOperand(1).getReg();
1320 int64_t Index = I.getOperand(2).getImm();
1321
1322 const LLT DstTy = MRI.getType(DstReg);
1323 const LLT SrcTy = MRI.getType(SrcReg);
1324
1325 // Meanwile handle vector type only.
1326 if (!DstTy.isVector())
1327 return false;
1328
1329 if (Index % DstTy.getSizeInBits() != 0)
1330 return false; // Not extract subvector.
1331
1332 if (Index == 0) {
1333 // Replace by extract subreg copy.
1334 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1335 return false;
1336
1337 I.eraseFromParent();
1338 return true;
1339 }
1340
1341 bool HasAVX = STI.hasAVX();
1342 bool HasAVX512 = STI.hasAVX512();
1343 bool HasVLX = STI.hasVLX();
1344
1345 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1346 if (HasVLX)
1347 I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
1348 else if (HasAVX)
1349 I.setDesc(TII.get(X86::VEXTRACTF128rri));
1350 else
1351 return false;
1352 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1353 if (DstTy.getSizeInBits() == 128)
1354 I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
1355 else if (DstTy.getSizeInBits() == 256)
1356 I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
1357 else
1358 return false;
1359 } else
1360 return false;
1361
1362 // Convert to X86 VEXTRACT immediate.
1363 Index = Index / DstTy.getSizeInBits();
1364 I.getOperand(2).setImm(Index);
1365
1367}
1368
1369bool X86InstructionSelector::emitExtractSubreg(Register DstReg, Register SrcReg,
1370 MachineInstr &I,
1371 MachineRegisterInfo &MRI,
1372 MachineFunction &MF) const {
1373 const LLT DstTy = MRI.getType(DstReg);
1374 const LLT SrcTy = MRI.getType(SrcReg);
1375 unsigned SubIdx = X86::NoSubRegister;
1376
1377 if (!DstTy.isVector() || !SrcTy.isVector())
1378 return false;
1379
1380 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1381 "Incorrect Src/Dst register size");
1382
1383 if (DstTy.getSizeInBits() == 128)
1384 SubIdx = X86::sub_xmm;
1385 else if (DstTy.getSizeInBits() == 256)
1386 SubIdx = X86::sub_ymm;
1387 else
1388 return false;
1389
1390 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1391 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1392
1393 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1394
1395 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1396 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1397 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1398 return false;
1399 }
1400
1401 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1402 .addReg(SrcReg, 0, SubIdx);
1403
1404 return true;
1405}
1406
1407bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,
1408 MachineInstr &I,
1409 MachineRegisterInfo &MRI,
1410 MachineFunction &MF) const {
1411 const LLT DstTy = MRI.getType(DstReg);
1412 const LLT SrcTy = MRI.getType(SrcReg);
1413 unsigned SubIdx = X86::NoSubRegister;
1414
1415 // TODO: support scalar types
1416 if (!DstTy.isVector() || !SrcTy.isVector())
1417 return false;
1418
1419 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1420 "Incorrect Src/Dst register size");
1421
1422 if (SrcTy.getSizeInBits() == 128)
1423 SubIdx = X86::sub_xmm;
1424 else if (SrcTy.getSizeInBits() == 256)
1425 SubIdx = X86::sub_ymm;
1426 else
1427 return false;
1428
1429 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1430 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1431
1432 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1433 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1434 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1435 return false;
1436 }
1437
1438 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1439 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1440 .addReg(SrcReg);
1441
1442 return true;
1443}
1444
1445bool X86InstructionSelector::selectInsert(MachineInstr &I,
1446 MachineRegisterInfo &MRI,
1447 MachineFunction &MF) const {
1448 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1449
1450 const Register DstReg = I.getOperand(0).getReg();
1451 const Register SrcReg = I.getOperand(1).getReg();
1452 const Register InsertReg = I.getOperand(2).getReg();
1453 int64_t Index = I.getOperand(3).getImm();
1454
1455 const LLT DstTy = MRI.getType(DstReg);
1456 const LLT InsertRegTy = MRI.getType(InsertReg);
1457
1458 // Meanwile handle vector type only.
1459 if (!DstTy.isVector())
1460 return false;
1461
1462 if (Index % InsertRegTy.getSizeInBits() != 0)
1463 return false; // Not insert subvector.
1464
1465 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1466 // Replace by subreg copy.
1467 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1468 return false;
1469
1470 I.eraseFromParent();
1471 return true;
1472 }
1473
1474 bool HasAVX = STI.hasAVX();
1475 bool HasAVX512 = STI.hasAVX512();
1476 bool HasVLX = STI.hasVLX();
1477
1478 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1479 if (HasVLX)
1480 I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
1481 else if (HasAVX)
1482 I.setDesc(TII.get(X86::VINSERTF128rri));
1483 else
1484 return false;
1485 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1486 if (InsertRegTy.getSizeInBits() == 128)
1487 I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
1488 else if (InsertRegTy.getSizeInBits() == 256)
1489 I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
1490 else
1491 return false;
1492 } else
1493 return false;
1494
1495 // Convert to X86 VINSERT immediate.
1496 Index = Index / InsertRegTy.getSizeInBits();
1497
1498 I.getOperand(3).setImm(Index);
1499
1501}
1502
1503bool X86InstructionSelector::selectUnmergeValues(
1504 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1505 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1506 "unexpected instruction");
1507
1508 // Split to extracts.
1509 unsigned NumDefs = I.getNumOperands() - 1;
1510 Register SrcReg = I.getOperand(NumDefs).getReg();
1511 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1512
1513 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1514 MachineInstr &ExtrInst =
1515 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1516 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1517 .addReg(SrcReg)
1518 .addImm(Idx * DefSize);
1519
1520 if (!select(ExtrInst))
1521 return false;
1522 }
1523
1524 I.eraseFromParent();
1525 return true;
1526}
1527
1528bool X86InstructionSelector::selectMergeValues(
1529 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1530 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1531 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1532 "unexpected instruction");
1533
1534 // Split to inserts.
1535 Register DstReg = I.getOperand(0).getReg();
1536 Register SrcReg0 = I.getOperand(1).getReg();
1537
1538 const LLT DstTy = MRI.getType(DstReg);
1539 const LLT SrcTy = MRI.getType(SrcReg0);
1540 unsigned SrcSize = SrcTy.getSizeInBits();
1541
1542 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1543
1544 // For the first src use insertSubReg.
1545 Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1546 MRI.setRegBank(DefReg, RegBank);
1547 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1548 return false;
1549
1550 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1551 Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1552 MRI.setRegBank(Tmp, RegBank);
1553
1554 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1555 TII.get(TargetOpcode::G_INSERT), Tmp)
1556 .addReg(DefReg)
1557 .addReg(I.getOperand(Idx).getReg())
1558 .addImm((Idx - 1) * SrcSize);
1559
1560 DefReg = Tmp;
1561
1562 if (!select(InsertInst))
1563 return false;
1564 }
1565
1566 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1567 TII.get(TargetOpcode::COPY), DstReg)
1568 .addReg(DefReg);
1569
1570 if (!select(CopyInst))
1571 return false;
1572
1573 I.eraseFromParent();
1574 return true;
1575}
1576
1577bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1578 MachineRegisterInfo &MRI,
1579 MachineFunction &MF) const {
1580 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1581
1582 const Register CondReg = I.getOperand(0).getReg();
1583 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1584
1585 MachineInstr &TestInst =
1586 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1587 .addReg(CondReg)
1588 .addImm(1);
1589 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1590 .addMBB(DestMBB).addImm(X86::COND_NE);
1591
1592 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1593
1594 I.eraseFromParent();
1595 return true;
1596}
1597
1598bool X86InstructionSelector::materializeFP(MachineInstr &I,
1599 MachineRegisterInfo &MRI,
1600 MachineFunction &MF) const {
1601 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1602 "unexpected instruction");
1603
1604 // Can't handle alternate code models yet.
1606 if (CM != CodeModel::Small && CM != CodeModel::Large)
1607 return false;
1608
1609 const Register DstReg = I.getOperand(0).getReg();
1610 const LLT DstTy = MRI.getType(DstReg);
1611 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1612 // Create the load from the constant pool.
1613 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1614 const auto &DL = MF.getDataLayout();
1615 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
1616 const DebugLoc &DbgLoc = I.getDebugLoc();
1617
1618 unsigned Opc =
1619 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1620
1621 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1622 MachineInstr *LoadInst = nullptr;
1623 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1624
1625 if (CM == CodeModel::Large && STI.is64Bit()) {
1626 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1627 // they cannot be folded into immediate fields.
1628
1629 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1630 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1631 .addConstantPoolIndex(CPI, 0, OpFlag);
1632
1633 MachineMemOperand *MMO = MF.getMachineMemOperand(
1635 LLT::pointer(0, DL.getPointerSizeInBits()), Alignment);
1636
1637 LoadInst =
1638 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1639 AddrReg)
1640 .addMemOperand(MMO);
1641
1642 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1643 // Handle the case when globals fit in our immediate field.
1644 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1645
1646 // x86-32 PIC requires a PIC base register for constant pools.
1647 unsigned PICBase = 0;
1648 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1649 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1650 // In DAGISEL the code that initialize it generated by the CGBR pass.
1651 return false; // TODO support the mode.
1652 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1653 PICBase = X86::RIP;
1654
1655 LoadInst = addConstantPoolReference(
1656 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1657 OpFlag);
1658 } else
1659 return false;
1660
1661 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1662 I.eraseFromParent();
1663 return true;
1664}
1665
1666bool X86InstructionSelector::selectImplicitDefOrPHI(
1667 MachineInstr &I, MachineRegisterInfo &MRI) const {
1668 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1669 I.getOpcode() == TargetOpcode::G_PHI) &&
1670 "unexpected instruction");
1671
1672 Register DstReg = I.getOperand(0).getReg();
1673
1674 if (!MRI.getRegClassOrNull(DstReg)) {
1675 const LLT DstTy = MRI.getType(DstReg);
1676 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1677
1678 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1679 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1680 << " operand\n");
1681 return false;
1682 }
1683 }
1684
1685 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1686 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1687 else
1688 I.setDesc(TII.get(X86::PHI));
1689
1690 return true;
1691}
1692
1693bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,
1694 MachineRegisterInfo &MRI,
1695 MachineFunction &MF) const {
1696 // The implementation of this function is adapted from X86FastISel.
1697 assert((I.getOpcode() == TargetOpcode::G_MUL ||
1698 I.getOpcode() == TargetOpcode::G_SMULH ||
1699 I.getOpcode() == TargetOpcode::G_UMULH ||
1700 I.getOpcode() == TargetOpcode::G_SDIV ||
1701 I.getOpcode() == TargetOpcode::G_SREM ||
1702 I.getOpcode() == TargetOpcode::G_UDIV ||
1703 I.getOpcode() == TargetOpcode::G_UREM) &&
1704 "unexpected instruction");
1705
1706 const Register DstReg = I.getOperand(0).getReg();
1707 const Register Op1Reg = I.getOperand(1).getReg();
1708 const Register Op2Reg = I.getOperand(2).getReg();
1709
1710 const LLT RegTy = MRI.getType(DstReg);
1711 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1712 "Arguments and return value types must match");
1713
1714 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1715 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1716 return false;
1717
1718 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1719 const static unsigned NumOps = 7; // SDiv/SRem/UDiv/URem/Mul/SMulH/UMulh
1720 const static bool S = true; // IsSigned
1721 const static bool U = false; // !IsSigned
1722 const static unsigned Copy = TargetOpcode::COPY;
1723
1724 // For the X86 IDIV instruction, in most cases the dividend
1725 // (numerator) must be in a specific register pair highreg:lowreg,
1726 // producing the quotient in lowreg and the remainder in highreg.
1727 // For most data types, to set up the instruction, the dividend is
1728 // copied into lowreg, and lowreg is sign-extended into highreg. The
1729 // exception is i8, where the dividend is defined as a single register rather
1730 // than a register pair, and we therefore directly sign-extend the dividend
1731 // into lowreg, instead of copying, and ignore the highreg.
1732 const static struct MulDivRemEntry {
1733 // The following portion depends only on the data type.
1734 unsigned SizeInBits;
1735 unsigned LowInReg; // low part of the register pair
1736 unsigned HighInReg; // high part of the register pair
1737 // The following portion depends on both the data type and the operation.
1738 struct MulDivRemResult {
1739 unsigned OpMulDivRem; // The specific MUL/DIV opcode to use.
1740 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1741 // highreg, or copying a zero into highreg.
1742 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1743 // zero/sign-extending into lowreg for i8.
1744 unsigned ResultReg; // Register containing the desired result.
1745 bool IsOpSigned; // Whether to use signed or unsigned form.
1746 } ResultTable[NumOps];
1747 } OpTable[NumTypes] = {
1748 {8,
1749 X86::AX,
1750 0,
1751 {
1752 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1753 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1754 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv
1755 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem
1756 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S}, // Mul
1757 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SMulH
1758 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U}, // UMulH
1759 }}, // i8
1760 {16,
1761 X86::AX,
1762 X86::DX,
1763 {
1764 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv
1765 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem
1766 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1767 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1768 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S}, // Mul
1769 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S}, // SMulH
1770 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U}, // UMulH
1771 }}, // i16
1772 {32,
1773 X86::EAX,
1774 X86::EDX,
1775 {
1776 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv
1777 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem
1778 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1779 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1780 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S}, // Mul
1781 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S}, // SMulH
1782 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U}, // UMulH
1783 }}, // i32
1784 {64,
1785 X86::RAX,
1786 X86::RDX,
1787 {
1788 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv
1789 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem
1790 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1791 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1792 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S}, // Mul
1793 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S}, // SMulH
1794 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U}, // UMulH
1795 }}, // i64
1796 };
1797
1798 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {
1799 return El.SizeInBits == RegTy.getSizeInBits();
1800 });
1801 if (OpEntryIt == std::end(OpTable))
1802 return false;
1803
1804 unsigned OpIndex;
1805 switch (I.getOpcode()) {
1806 default:
1807 llvm_unreachable("Unexpected mul/div/rem opcode");
1808 case TargetOpcode::G_SDIV:
1809 OpIndex = 0;
1810 break;
1811 case TargetOpcode::G_SREM:
1812 OpIndex = 1;
1813 break;
1814 case TargetOpcode::G_UDIV:
1815 OpIndex = 2;
1816 break;
1817 case TargetOpcode::G_UREM:
1818 OpIndex = 3;
1819 break;
1820 case TargetOpcode::G_MUL:
1821 OpIndex = 4;
1822 break;
1823 case TargetOpcode::G_SMULH:
1824 OpIndex = 5;
1825 break;
1826 case TargetOpcode::G_UMULH:
1827 OpIndex = 6;
1828 break;
1829 }
1830
1831 const MulDivRemEntry &TypeEntry = *OpEntryIt;
1832 const MulDivRemEntry::MulDivRemResult &OpEntry =
1833 TypeEntry.ResultTable[OpIndex];
1834
1835 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1836 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1837 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1838 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1839 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1840 << " operand\n");
1841 return false;
1842 }
1843
1844 // Move op1 into low-order input register.
1845 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1846 TypeEntry.LowInReg)
1847 .addReg(Op1Reg);
1848
1849 // Zero-extend or sign-extend into high-order input register.
1850 if (OpEntry.OpSignExtend) {
1851 if (OpEntry.IsOpSigned)
1852 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1853 TII.get(OpEntry.OpSignExtend));
1854 else {
1855 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1856 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1857 Zero32);
1858
1859 // Copy the zero into the appropriate sub/super/identical physical
1860 // register. Unfortunately the operations needed are not uniform enough
1861 // to fit neatly into the table above.
1862 if (RegTy.getSizeInBits() == 16) {
1863 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1864 TypeEntry.HighInReg)
1865 .addReg(Zero32, 0, X86::sub_16bit);
1866 } else if (RegTy.getSizeInBits() == 32) {
1867 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1868 TypeEntry.HighInReg)
1869 .addReg(Zero32);
1870 } else if (RegTy.getSizeInBits() == 64) {
1871 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1872 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1873 .addImm(0)
1874 .addReg(Zero32)
1875 .addImm(X86::sub_32bit);
1876 }
1877 }
1878 }
1879
1880 // Generate the DIV/IDIV/MUL/IMUL instruction.
1881 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))
1882 .addReg(Op2Reg);
1883
1884 // For i8 remainder, we can't reference ah directly, as we'll end
1885 // up with bogus copies like %r9b = COPY %ah. Reference ax
1886 // instead to prevent ah references in a rex instruction.
1887 //
1888 // The current assumption of the fast register allocator is that isel
1889 // won't generate explicit references to the GR8_NOREX registers. If
1890 // the allocator and/or the backend get enhanced to be more robust in
1891 // that regard, this can be, and should be, removed.
1892 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1893 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1894 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1895 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1896 .addReg(X86::AX);
1897
1898 // Shift AX right by 8 bits instead of using AH.
1899 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1900 ResultSuperReg)
1901 .addReg(SourceSuperReg)
1902 .addImm(8);
1903
1904 // Now reference the 8-bit subreg of the result.
1905 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1906 DstReg)
1907 .addReg(ResultSuperReg, 0, X86::sub_8bit);
1908 } else {
1909 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1910 DstReg)
1911 .addReg(OpEntry.ResultReg);
1912 }
1913 I.eraseFromParent();
1914
1915 return true;
1916}
1917
1918bool X86InstructionSelector::selectSelect(MachineInstr &I,
1919 MachineRegisterInfo &MRI,
1920 MachineFunction &MF) const {
1921 GSelect &Sel = cast<GSelect>(I);
1922 Register DstReg = Sel.getReg(0);
1923 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::TEST32rr))
1924 .addReg(Sel.getCondReg())
1925 .addReg(Sel.getCondReg());
1926
1927 unsigned OpCmp;
1928 LLT Ty = MRI.getType(DstReg);
1929 if (Ty.getSizeInBits() == 80) {
1930 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::CMOVE_Fp80),
1931 DstReg)
1932 .addReg(Sel.getTrueReg())
1933 .addReg(Sel.getFalseReg());
1934 } else {
1935 switch (Ty.getSizeInBits()) {
1936 default:
1937 return false;
1938 case 8:
1939 OpCmp = X86::CMOV_GR8;
1940 break;
1941 case 16:
1942 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1943 break;
1944 case 32:
1945 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1946 break;
1947 case 64:
1948 assert(STI.is64Bit() && STI.canUseCMOV());
1949 OpCmp = X86::CMOV64rr;
1950 break;
1951 }
1952 BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
1953 .addReg(Sel.getTrueReg())
1954 .addReg(Sel.getFalseReg())
1956 }
1957 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
1958 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1959 LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
1960 return false;
1961 }
1962
1963 Sel.eraseFromParent();
1964 return true;
1965}
1966
1967InstructionSelector::ComplexRendererFns
1968X86InstructionSelector::selectAddr(MachineOperand &Root) const {
1969 MachineInstr *MI = Root.getParent();
1970 MachineIRBuilder MIRBuilder(*MI);
1971
1972 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1973 MachineInstr *Ptr = MRI.getVRegDef(Root.getReg());
1974 X86AddressMode AM;
1975 X86SelectAddress(*Ptr, TM, MRI, STI, AM);
1976
1977 if (AM.IndexReg)
1978 return std::nullopt;
1979
1980 return {// Base
1981 {[=](MachineInstrBuilder &MIB) {
1983 MIB.addUse(AM.Base.Reg);
1984 else {
1986 "Unknown type of address base");
1987 MIB.addFrameIndex(AM.Base.FrameIndex);
1988 }
1989 },
1990 // Scale
1991 [=](MachineInstrBuilder &MIB) { MIB.addImm(AM.Scale); },
1992 // Index
1993 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); },
1994 // Disp
1995 [=](MachineInstrBuilder &MIB) {
1996 if (AM.GV)
1997 MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
1998 else if (AM.CP)
1999 MIB.addConstantPoolIndex(AM.Disp, 0, AM.GVOpFlags);
2000 else
2001 MIB.addImm(AM.Disp);
2002 },
2003 // Segment
2004 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); }}};
2005}
2006
2007InstructionSelector *
2009 const X86Subtarget &Subtarget,
2010 const X86RegisterBankInfo &RBI) {
2011 return new X86InstructionSelector(TM, Subtarget, RBI);
2012}
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
unsigned OpIndex
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
#define LLVM_DEBUG(...)
Definition Debug.h:114
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
Value * RHS
Value * LHS
This file declares the targeting of the RegisterBankInfo class for X86.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
CodeModel::Model getCodeModel() const
Returns the code model.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
This class provides the information for the target register banks.
bool canUseCMOV() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
bool isPICStyleRIPRel() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
bool hasAVX() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
Definition TypePool.h:27
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition Utils.cpp:294
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
const GlobalValue * GV
union llvm::X86AddressMode::BaseUnion Base
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType