30#define FIXUPLEA_DESC "X86 LEA Fixup"
31#define FIXUPLEA_NAME "x86-fixup-LEAs"
33#define DEBUG_TYPE FIXUPLEA_NAME
35STATISTIC(NumLEAs,
"Number of LEA instructions created");
39 enum RegUsageState { RU_NotUsed, RU_Write, RU_Read };
80 bool UseLEAForSP)
const;
138 MachineFunctionProperties::Property::NoVRegs);
154char FixupLEAPass::ID = 0;
162 switch (
MI.getOpcode()) {
169 TII->get(
MI.getOpcode() == X86::MOV32rr ? X86::LEA32r
181 if (!
MI.isConvertibleTo3Addr())
184 switch (
MI.getOpcode()) {
189 case X86::ADD64ri32_DB:
191 case X86::ADD32ri_DB:
192 if (!
MI.getOperand(2).isImm()) {
205 case X86::ADD64rr_DB:
207 case X86::ADD32rr_DB:
211 return TII->convertToThreeAddress(
MI,
nullptr,
nullptr);
217 return Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
218 Opcode == X86::LEA64_32r;
226 bool IsSlowLEA =
ST.slowLEA();
227 bool IsSlow3OpsLEA =
ST.slow3OpsLEA();
228 bool LEAUsesAG =
ST.leaUsesAG();
231 bool UseLEAForSP =
ST.useLeaForSP();
234 TII =
ST.getInstrInfo();
235 TRI =
ST.getRegisterInfo();
236 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
237 auto *MBFI = (PSI && PSI->hasProfileSummary())
238 ? &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI()
244 bool OptIncDecPerBB =
247 if (!
isLEA(
I->getOpcode()))
250 if (optTwoAddrLEA(
I,
MBB, OptIncDecPerBB, UseLEAForSP))
254 processInstructionForSlowLEA(
I,
MBB);
255 else if (IsSlow3OpsLEA)
256 processInstrForSlow3OpLEA(
I,
MBB, OptIncDecPerBB);
263 processInstruction(
I,
MBB);
272FixupLEAPass::RegUsageState
274 RegUsageState
RegUsage = RU_NotUsed;
278 if (MO.isReg() && MO.getReg() ==
p.getReg()) {
307 int InstrDistance = 1;
309 static const int INSTR_DISTANCE_THRESHOLD = 5;
314 while (Found &&
I != CurInst) {
315 if (CurInst->isCall() || CurInst->isInlineAsm())
317 if (InstrDistance > INSTR_DISTANCE_THRESHOLD)
319 if (usesRegister(p, CurInst) == RU_Write) {
322 InstrDistance += TSM.computeInstrLatency(&*CurInst);
329 return Reg == X86::EBP || Reg == X86::RBP ||
330 Reg == X86::R13D || Reg == X86::R13;
340 Index.getReg() != X86::NoRegister;
381 return X86::ADD64ri32;
391 return IsINC ? X86::INC32r : X86::DEC32r;
393 return IsINC ? X86::INC64r : X86::DEC64r;
400 const int InstrDistanceThreshold = 5;
401 int InstrDistance = 1;
404 unsigned LEAOpcode =
I->getOpcode();
407 Register DestReg =
I->getOperand(0).getReg();
409 while (CurInst !=
MBB.
end()) {
410 if (CurInst->isCall() || CurInst->isInlineAsm())
412 if (InstrDistance > InstrDistanceThreshold)
416 for (
unsigned I = 0, E = CurInst->getNumOperands();
I != E; ++
I) {
419 if (Opnd.
getReg() == DestReg) {
423 unsigned AluOpcode = CurInst->getOpcode();
424 if (AluOpcode != AddOpcode && AluOpcode != SubOpcode)
435 if (!CurInst->registerDefIsDead(X86::EFLAGS,
TRI))
440 if (
TRI->regsOverlap(DestReg, Opnd.
getReg()))
453 bool &BaseIndexDef,
bool &AluDestRef,
456 BaseIndexDef = AluDestRef =
false;
457 *KilledBase = *KilledIndex =
nullptr;
460 Register AluDestReg = AluI->getOperand(0).getReg();
467 if (
TRI->regsOverlap(Reg, AluDestReg))
469 if (
TRI->regsOverlap(Reg, BaseReg)) {
475 if (
TRI->regsOverlap(Reg, IndexReg)) {
479 *KilledIndex = &Opnd;
493 bool BaseIndexDef, AluDestRef;
495 checkRegUsage(
I, AluI, BaseIndexDef, AluDestRef, &KilledBase, &KilledIndex);
502 KilledBase = KilledIndex =
nullptr;
506 Register AluDestReg = AluI->getOperand(0).getReg();
509 if (
I->getOpcode() == X86::LEA64_32r) {
510 BaseReg =
TRI->getSubReg(BaseReg, X86::sub_32bit);
511 IndexReg =
TRI->getSubReg(IndexReg, X86::sub_32bit);
513 if (AluDestReg == IndexReg) {
514 if (BaseReg == IndexReg)
519 if (BaseReg == IndexReg)
520 KilledBase =
nullptr;
524 unsigned NewOpcode = AluI->getOpcode();
525 NewMI1 =
BuildMI(
MBB, InsertPos, AluI->getDebugLoc(),
TII->get(NewOpcode),
530 NewMI2 =
BuildMI(
MBB, InsertPos, AluI->getDebugLoc(),
TII->get(NewOpcode),
551 bool UseLEAForSP)
const {
570 if (UseLEAForSP && (DestReg == X86::ESP || DestReg == X86::RSP))
574 if (
MI.getOpcode() == X86::LEA64_32r) {
576 BaseReg =
TRI->getSubReg(BaseReg, X86::sub_32bit);
578 IndexReg =
TRI->getSubReg(IndexReg, X86::sub_32bit);
586 if (BaseReg != 0 && IndexReg != 0 && Disp.
getImm() == 0 &&
587 (DestReg == BaseReg || DestReg == IndexReg)) {
589 if (DestReg != BaseReg)
592 if (
MI.getOpcode() == X86::LEA64_32r) {
602 }
else if (DestReg == BaseReg && IndexReg == 0) {
610 if (OptIncDec && (Disp.
getImm() == 1 || Disp.
getImm() == -1)) {
611 bool IsINC = Disp.
getImm() == 1;
614 if (
MI.getOpcode() == X86::LEA64_32r) {
624 if (
MI.getOpcode() == X86::LEA64_32r) {
634 }
else if (BaseReg != 0 && IndexReg != 0 && Disp.
getImm() == 0) {
639 return optLEAALU(
I,
MBB);
655 if (AddrOffset >= 0) {
658 if (
p.isReg() &&
p.getReg() != X86::ESP) {
659 seekLEAFixup(p,
I,
MBB);
662 if (
q.isReg() &&
q.getReg() != X86::ESP) {
663 seekLEAFixup(q,
I,
MBB);
676 LLVM_DEBUG(
dbgs() <<
"FixLEA: Candidate to replace:"; MBI->dump(););
683 processInstruction(J,
MBB);
691 const unsigned Opcode =
MI.getOpcode();
707 if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
715 if (SrcR1 != 0 && SrcR2 != 0) {
723 if (
Offset.getImm() != 0) {
743 const unsigned LEAOpcode =
MI.getOpcode();
755 Segment.
getReg() != X86::NoRegister)
762 if (
MI.getOpcode() == X86::LEA64_32r) {
764 BaseReg =
TRI->getSubReg(BaseReg, X86::sub_32bit);
766 IndexReg =
TRI->getSubReg(IndexReg, X86::sub_32bit);
769 bool IsScale1 = Scale.
getImm() == 1;
775 if (IsInefficientBase && DestReg == BaseReg && !IsScale1)
782 bool BaseOrIndexIsDst = DestReg == BaseReg || DestReg == IndexReg;
789 if (IsScale1 && BaseReg == IndexReg &&
804 }
else if (IsScale1 && BaseOrIndexIsDst) {
811 if (DestReg != BaseReg)
814 if (
MI.getOpcode() == X86::LEA64_32r) {
826 }
else if (!IsInefficientBase || (!IsInefficientIndex && IsScale1)) {
833 .
add(IsInefficientBase ? Index :
Base)
835 .
add(IsInefficientBase ?
Base : Index)
846 if (OptIncDec &&
Offset.isImm() &&
869 assert(DestReg != BaseReg &&
"DestReg == BaseReg should be handled already!");
870 assert(IsInefficientBase &&
"efficient base should be handled already!");
873 if (LEAOpcode == X86::LEA64_32r)
878 bool BIK =
Base.isKill() && BaseReg != IndexReg;
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*–===//
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getINCDECFromLEA(unsigned LEAOpcode, bool IsINC)
static bool isLEA(unsigned Opcode)
static bool hasLEAOffset(const MachineOperand &Offset)
static bool hasInefficientLEABaseReg(const MachineOperand &Base, const MachineOperand &Index)
Returns true if this LEA uses base and index registers, and the base register is known to be ineffici...
static unsigned getADDriFromLEA(unsigned LEAOpcode, const MachineOperand &Offset)
static bool getPreviousInstr(MachineBasicBlock::iterator &I, MachineBasicBlock &MBB)
getPreviousInstr - Given a reference to an instruction in a basic block, return a reference to the pr...
static unsigned getADDrrFromLEA(unsigned LEAOpcode)
static bool isInefficientLEAReg(unsigned Reg)
static unsigned getSUBrrFromLEA(unsigned LEAOpcode)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
Emit instructions to copy a pair of physical registers.
This is an alternative analysis pass to MachineBlockFrequencyInfo.
Describe properties that are true of each instruction in the target description file.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Wrapper class representing virtual and physical registers.
StringRef - Represent a constant reference to a string, i.e.
Provide an instruction scheduling machine model to CodeGen passes.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createX86FixupLEAs()
Return a pass that selectively replaces certain instructions (like add, sub, inc, dec,...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.