43 : MachineEvaluator(tri, mri), MF(mf), MFI(mf.getFrameInfo()),
TII(tii) {
61 unsigned InVirtReg, InPhysReg = 0;
75 if (Width == 0 || Width > 64)
80 InPhysReg = getNextPhysReg(InPhysReg, Width);
83 InVirtReg = getVirtRegFor(InPhysReg);
87 VRX.
insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width)));
89 VRX.
insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width)));
94 using namespace Hexagon;
97 return MachineEvaluator::mask(Reg, 0);
99 unsigned ID = RC->getID();
104 case DoubleRegsRegClassID:
105 case VecDblRegsRegClassID:
106 case VecDblRegs128BRegClassID:
108 : BT::BitMask(RW, 2*RW-1);
121 std::vector<BT::RegisterRef> Vector;
124 RegisterRefs(
const MachineInstr &
MI) : Vector(MI.getNumOperands()) {
125 for (
unsigned i = 0, n = Vector.size();
i < n; ++
i) {
134 size_t size()
const {
return Vector.size(); }
138 assert(n < Vector.size());
148 using namespace Hexagon;
150 unsigned NumDefs = 0;
174 return evaluateLoad(MI, Inputs, Outputs);
193 if (evaluateFormalCopy(MI, Inputs, Outputs))
210 RegisterRefs
Reg(MI);
211 #define op(i) MI.getOperand(i)
212 #define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs))
213 #define im(i) MI.getOperand(i).getImm()
226 auto cop = [
this, &
Reg, &
MI, &Inputs](
unsigned N,
240 return eXTR(RC, 0, RW);
245 uint16_t W = RC.
width();
247 return eXTR(RC, W-RW, W);
253 return eXTR(RC,
N*16,
N*16+16);
258 uint16_t
I = Odd, Ws = Rs.width();
282 return rr0(
eIMM(
im(1), W0), Outputs);
288 int FI =
op(1).getIndex();
289 int Off =
op(2).getImm();
294 return rr0(RC, Outputs);
302 return rr0(
rc(1), Outputs);
310 return rr0(RC, Outputs);
315 return rr0(
eINS(RC,
eXTR(
rc(1), 0, W0), 0), Outputs);
327 assert(W0 == 64 && W1 == 32);
330 return rr0(RC, Outputs);
334 return rr0(
eADD(
rc(1),
rc(2)), Outputs);
337 case S4_addi_asl_ri: {
339 return rr0(RC, Outputs);
341 case S4_addi_lsr_ri: {
343 return rr0(RC, Outputs);
347 return rr0(RC, Outputs);
349 case M4_mpyri_addi: {
352 return rr0(RC, Outputs);
354 case M4_mpyrr_addi: {
357 return rr0(RC, Outputs);
359 case M4_mpyri_addr_u2: {
362 return rr0(RC, Outputs);
364 case M4_mpyri_addr: {
367 return rr0(RC, Outputs);
369 case M4_mpyrr_addr: {
372 return rr0(RC, Outputs);
376 return rr0(RC, Outputs);
380 return rr0(RC, Outputs);
384 return rr0(RC, Outputs);
388 return rr0(RC, Outputs);
390 case S2_addasl_rrri: {
392 return rr0(RC, Outputs);
397 return rr0(
eADD(RPC,
eIMM(
im(2), W0)), Outputs);
401 return rr0(
eSUB(
rc(1),
rc(2)), Outputs);
404 case S4_subi_asl_ri: {
406 return rr0(RC, Outputs);
408 case S4_subi_lsr_ri: {
410 return rr0(RC, Outputs);
414 return rr0(RC, Outputs);
418 return rr0(RC, Outputs);
422 return rr0(
eSUB(
eIMM(0, W0),
rc(1)), Outputs);
426 return rr0(hi(M, W0), Outputs);
429 return rr0(
eMLS(
rc(1),
rc(2)), Outputs);
430 case M2_dpmpyss_acc_s0:
432 case M2_dpmpyss_nac_s0:
436 return rr0(lo(M, W0), Outputs);
441 return rr0(RC, Outputs);
446 return rr0(RC, Outputs);
451 return rr0(RC, Outputs);
455 return rr0(lo(M, 32), Outputs);
459 return rr0(lo(M, 32), Outputs);
463 return rr0(lo(M, 32), Outputs);
467 return rr0(hi(M, W0), Outputs);
470 return rr0(
eMLU(
rc(1),
rc(2)), Outputs);
471 case M2_dpmpyuu_acc_s0:
473 case M2_dpmpyuu_nac_s0:
483 return rr0(
eAND(
rc(1),
rc(2)), Outputs);
487 case S4_andi_asl_ri: {
489 return rr0(RC, Outputs);
491 case S4_andi_lsr_ri: {
493 return rr0(RC, Outputs);
507 return rr0(
eORL(
rc(1),
rc(2)), Outputs);
511 case S4_ori_asl_ri: {
513 return rr0(RC, Outputs);
515 case S4_ori_lsr_ri: {
517 return rr0(RC, Outputs);
526 return rr0(RC, Outputs);
530 return rr0(RC, Outputs);
538 return rr0(
eXOR(
rc(1),
rc(2)), Outputs);
549 return rr0(
eNOT(
rc(1)), Outputs);
553 return rr0(
eASL(
rc(1),
im(2)), Outputs);
555 return rr0(
eASL(
rc(1), 16), Outputs);
568 case S2_asl_i_r_xacc:
569 case S2_asl_i_p_xacc:
578 return rr0(
eASR(
rc(1),
im(2)), Outputs);
580 return rr0(
eASR(
rc(1), 16), Outputs);
593 case S2_asr_i_r_rnd: {
599 return rr0(
eXTR(RC, 0, W0), Outputs);
601 case S2_asr_i_r_rnd_goodsyntax: {
604 return rr0(
rc(1), Outputs);
608 return rr0(
eXTR(RC, 0, W0), Outputs);
612 case S2_asr_i_svw_trun:
618 return rr0(
eLSR(
rc(1),
im(2)), Outputs);
631 case S2_lsr_i_r_xacc:
632 case S2_lsr_i_p_xacc:
638 return rr0(RC, Outputs);
643 return rr0(RC, Outputs);
645 case S2_togglebit_i: {
651 return rr0(RC, Outputs);
660 .
fill(W1+(W1-BX), W0, Zero);
663 return rr0(RC, Outputs);
669 uint16_t Wd =
im(2), Of =
im(3);
672 return rr0(
eIMM(0, W0), Outputs);
679 if (Opc == S2_extractu || Opc == S2_extractup)
680 return rr0(
eZXT(RC, Wd), Outputs);
681 return rr0(
eSXT(RC, Wd), Outputs);
685 uint16_t Wd =
im(3), Of =
im(4);
686 assert(Wd < W0 && Of < W0);
691 return rr0(
rc(1), Outputs);
692 return rr0(
eINS(
rc(1),
eXTR(
rc(2), 0, Wd), Of), Outputs);
703 case V6_vcombine_128B:
705 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs);
709 case A2_combine_hh: {
713 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl);
715 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh);
719 return rr0(RC, Outputs);
728 return rr0(RC, Outputs);
732 return rr0(RC, Outputs);
736 return rr0(RC, Outputs);
740 return rr0(RC, Outputs);
744 return rr0(RC, Outputs);
749 assert(WR == 64 && WP == 8);
752 for (uint16_t
i = 0;
i < WP; ++
i) {
757 return rr0(RC, Outputs);
769 if (PC0.
is(0) || PC0.
is(1))
771 R2.
meet(R3, Reg[0].Reg);
772 return rr0(R2, Outputs);
781 return rr0(
eSXT(
rc(1), 8), Outputs);
783 return rr0(
eSXT(
rc(1), 16), Outputs);
786 assert(W0 == 64 && W1 == 32);
788 return rr0(RC, Outputs);
791 return rr0(
eZXT(
rc(1), 8), Outputs);
793 return rr0(
eZXT(
rc(1), 16), Outputs);
800 return rr0(
eCLB(
rc(1),
false, 32), Outputs);
803 return rr0(
eCLB(
rc(1),
true, 32), Outputs);
809 if (TV.
is(0) || TV.
is(1))
810 return rr0(
eCLB(R1, TV, 32), Outputs);
815 return rr0(
eCTB(
rc(1),
false, 32), Outputs);
818 return rr0(
eCTB(
rc(1),
true, 32), Outputs);
825 bool Has0 =
false, All1 =
true;
826 for (uint16_t
i = 0;
i < 8; ++
i) {
838 return rr0(RC, Outputs);
842 bool Has1 =
false, All0 =
true;
843 for (uint16_t
i = 0;
i < 8; ++
i) {
855 return rr0(RC, Outputs);
858 return rr0(
eAND(
rc(1),
rc(2)), Outputs);
862 return rr0(
eNOT(
rc(1)), Outputs);
864 return rr0(
eORL(
rc(1),
rc(2)), Outputs);
868 return rr0(
eXOR(
rc(1),
rc(2)), Outputs);
896 if (V.
is(0) || V.
is(1)) {
898 bool TV = (Opc == S2_tstbit_i);
906 return MachineEvaluator::evaluate(MI, Inputs, Outputs);
917 bool &FallsThru)
const {
921 bool SimpleBranch =
false;
922 bool Negated =
false;
924 case Hexagon::J2_jumpf:
925 case Hexagon::J2_jumpfpt:
926 case Hexagon::J2_jumpfnew:
927 case Hexagon::J2_jumpfnewpt:
929 case Hexagon::J2_jumpt:
930 case Hexagon::J2_jumptpt:
931 case Hexagon::J2_jumptnew:
932 case Hexagon::J2_jumptnewpt:
937 case Hexagon::J2_jump:
956 if (!Test.
is(0) && !Test.
is(1))
960 if (!Test.
is(!Negated)) {
971 bool HexagonEvaluator::evaluateLoad(
const MachineInstr &MI,
974 using namespace Hexagon;
990 case L2_loadalignb_pbr:
991 case L2_loadalignb_pcr:
992 case L2_loadalignb_pi:
994 case L2_loadalignh_pbr:
995 case L2_loadalignh_pcr:
996 case L2_loadalignh_pi:
998 case L2_loadbsw2_pbr:
999 case L2_loadbsw2_pci:
1000 case L2_loadbsw2_pcr:
1001 case L2_loadbsw2_pi:
1002 case L2_loadbsw4_pbr:
1003 case L2_loadbsw4_pci:
1004 case L2_loadbsw4_pcr:
1005 case L2_loadbsw4_pi:
1007 case L2_loadbzw2_pbr:
1008 case L2_loadbzw2_pci:
1009 case L2_loadbzw2_pcr:
1010 case L2_loadbzw2_pi:
1011 case L2_loadbzw4_pbr:
1012 case L2_loadbzw4_pci:
1013 case L2_loadbzw4_pcr:
1014 case L2_loadbzw4_pi:
1033 case L2_loadrub_pbr:
1034 case L2_loadrub_pci:
1035 case L2_loadrub_pcr:
1061 case L2_loadruh_pbr:
1062 case L2_loadruh_pci:
1063 case L2_loadruh_pcr:
1079 case L2_loadw_locked:
1095 case L4_loadd_locked:
1106 assert(MD.isReg() && MD.isDef());
1110 assert(W >= BitNum && BitNum > 0);
1113 for (uint16_t
i = 0;
i < BitNum; ++
i)
1118 for (uint16_t
i = BitNum;
i < W; ++
i)
1121 for (uint16_t
i = BitNum;
i < W; ++
i)
1129 bool HexagonEvaluator::evaluateFormalCopy(
const MachineInstr &MI,
1145 uint16_t EW = F->second.Width;
1154 if (F->second.Type == ExtType::SExt)
1156 else if (F->second.Type == ExtType::ZExt)
1163 unsigned HexagonEvaluator::getNextPhysReg(
unsigned PReg,
unsigned Width)
const {
1164 using namespace Hexagon;
1166 bool Is64 = DoubleRegsRegClass.contains(PReg);
1167 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg));
1169 static const unsigned Phys32[] = { R0, R1,
R2, R3,
R4, R5 };
1170 static const unsigned Phys64[] = { D0, D1, D2 };
1171 const unsigned Num32 =
sizeof(Phys32)/
sizeof(
unsigned);
1172 const unsigned Num64 =
sizeof(Phys64)/
sizeof(
unsigned);
1176 return (Width <= 32) ? Phys32[0] : Phys64[0];
1180 unsigned Idx32 = 0, Idx64 = 0;
1182 while (Idx32 < Num32) {
1183 if (Phys32[Idx32] == PReg)
1189 while (Idx64 < Num64) {
1190 if (Phys64[Idx64] == PReg)
1198 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0;
1199 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0;
1202 unsigned HexagonEvaluator::getVirtRegFor(
unsigned PReg)
const {
1205 if (
I->first == PReg)
LLVM Argument representation.
const TargetRegisterInfo & TRI
RegisterCell & fill(uint16_t B, uint16_t E, const BitValue &V)
livein_iterator livein_end() const
MachineBasicBlock * getMBB() const
RegisterCell eASR(const RegisterCell &A1, uint16_t Sh) const
static RegisterCell self(unsigned Reg, uint16_t Width)
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
BitTracker::BitMask mask(unsigned Reg, unsigned Sub) const override
uint16_t getRegBitWidth(const RegisterRef &RR) const
RegisterCell eXOR(const RegisterCell &A1, const RegisterCell &A2) const
const HexagonInstrInfo & TII
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
RegisterCell eAND(const RegisterCell &A1, const RegisterCell &A2) const
HexagonEvaluator(const HexagonRegisterInfo &tri, MachineRegisterInfo &mri, const HexagonInstrInfo &tii, MachineFunction &mf)
bool evaluate(const MachineInstr &MI, const CellMapType &Inputs, CellMapType &Outputs) const override
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void putCell(const RegisterRef &RR, RegisterCell RC, CellMapType &M) const
RegisterCell eXTR(const RegisterCell &A1, uint16_t B, uint16_t E) const
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
const HexagonInstrInfo * TII
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
This file contains the simple types necessary to represent the attributes associated with functions a...
unsigned getNumOperands() const
Access to explicit operands of the instruction.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
RegisterCell eMLS(const RegisterCell &A1, const RegisterCell &A2) const
bool is(unsigned T) const
bool insert(const value_type &X)
Insert a new element into the SetVector.
RegisterCell eSUB(const RegisterCell &A1, const RegisterCell &A2) const
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
The instances of the Type class are immutable: once they are created, they are never changed...
RegisterCell eSXT(const RegisterCell &A1, uint16_t FromN) const
const MachineOperand & getOperand(unsigned i) const
RegisterCell eASL(const RegisterCell &A1, uint16_t Sh) const
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
RegisterCell getCell(const RegisterRef &RR, const CellMapType &M) const
RegisterCell eCTB(const RegisterCell &A1, bool B, uint16_t W) const
unsigned getIntegerBitWidth() const
RegisterCell eZXT(const RegisterCell &A1, uint16_t FromN) const
RegisterCell eCLB(const RegisterCell &A1, bool B, uint16_t W) const
unsigned getSubReg() const
bool isPointerTy() const
True if this is an instance of PointerType.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
RegisterCell eADD(const RegisterCell &A1, const RegisterCell &A2) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
RegisterCell eORL(const RegisterCell &A1, const RegisterCell &A2) const
Iterator for intrusive lists based on ilist_node.
static BitValue self(const BitRef &Self=BitRef())
MachineOperand class - Representation of each machine instruction operand.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
Type * getType() const
All values are typed, get the type of this value.
RegisterCell eLSR(const RegisterCell &A1, uint16_t Sh) const
livein_iterator livein_begin() const
MachineRegisterInfo & MRI
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
AttributeSet getAttributes() const
Return the attribute list for this Function.
bool isIntegerTy() const
True if this is an instance of IntegerType.
RegisterCell & insert(const RegisterCell &RC, const BitMask &M)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
RegisterCell & cat(const RegisterCell &RC)
BitTracker::RegisterRef RegisterRef
RegisterCell eMLU(const RegisterCell &A1, const RegisterCell &A2) const
APFloat abs(APFloat X)
Returns the absolute value of the argument.
iterator find(const KeyT &Val)
RegisterCell eIMM(int64_t V, uint16_t W) const
BitTracker::CellMapType CellMapType
std::vector< std::pair< unsigned, unsigned > >::const_iterator livein_iterator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A vector that has set insertion semantics.
RegisterCell eNOT(const RegisterCell &A1) const
bool meet(const RegisterCell &RC, unsigned SelfR)
static RegisterCell ref(const RegisterCell &C)
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
BitTracker::RegisterCell RegisterCell
static BitValue ref(const BitValue &V)
RegisterCell eINS(const RegisterCell &A1, const RegisterCell &A2, uint16_t AtN) const