File: | build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/X86/X86InstrInfo.cpp |
Warning: | line 3892, column 19 Value stored to 'NewMI' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the X86 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "X86InstrInfo.h" |
14 | #include "X86.h" |
15 | #include "X86InstrBuilder.h" |
16 | #include "X86InstrFoldTables.h" |
17 | #include "X86MachineFunctionInfo.h" |
18 | #include "X86Subtarget.h" |
19 | #include "X86TargetMachine.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/Sequence.h" |
22 | #include "llvm/CodeGen/LiveIntervals.h" |
23 | #include "llvm/CodeGen/LivePhysRegs.h" |
24 | #include "llvm/CodeGen/LiveVariables.h" |
25 | #include "llvm/CodeGen/MachineConstantPool.h" |
26 | #include "llvm/CodeGen/MachineDominators.h" |
27 | #include "llvm/CodeGen/MachineFrameInfo.h" |
28 | #include "llvm/CodeGen/MachineInstr.h" |
29 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
30 | #include "llvm/CodeGen/MachineModuleInfo.h" |
31 | #include "llvm/CodeGen/MachineOperand.h" |
32 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
33 | #include "llvm/CodeGen/StackMaps.h" |
34 | #include "llvm/IR/DebugInfoMetadata.h" |
35 | #include "llvm/IR/DerivedTypes.h" |
36 | #include "llvm/IR/Function.h" |
37 | #include "llvm/IR/InstrTypes.h" |
38 | #include "llvm/MC/MCAsmInfo.h" |
39 | #include "llvm/MC/MCExpr.h" |
40 | #include "llvm/MC/MCInst.h" |
41 | #include "llvm/Support/CommandLine.h" |
42 | #include "llvm/Support/Debug.h" |
43 | #include "llvm/Support/ErrorHandling.h" |
44 | #include "llvm/Support/raw_ostream.h" |
45 | #include "llvm/Target/TargetOptions.h" |
46 | |
47 | using namespace llvm; |
48 | |
49 | #define DEBUG_TYPE"x86-instr-info" "x86-instr-info" |
50 | |
51 | #define GET_INSTRINFO_CTOR_DTOR |
52 | #include "X86GenInstrInfo.inc" |
53 | |
54 | static cl::opt<bool> |
55 | NoFusing("disable-spill-fusing", |
56 | cl::desc("Disable fusing of spill code into instructions"), |
57 | cl::Hidden); |
58 | static cl::opt<bool> |
59 | PrintFailedFusing("print-failed-fuse-candidates", |
60 | cl::desc("Print instructions that the allocator wants to" |
61 | " fuse, but the X86 backend currently can't"), |
62 | cl::Hidden); |
63 | static cl::opt<bool> |
64 | ReMatPICStubLoad("remat-pic-stub-load", |
65 | cl::desc("Re-materialize load from stub in PIC mode"), |
66 | cl::init(false), cl::Hidden); |
67 | static cl::opt<unsigned> |
68 | PartialRegUpdateClearance("partial-reg-update-clearance", |
69 | cl::desc("Clearance between two register writes " |
70 | "for inserting XOR to avoid partial " |
71 | "register update"), |
72 | cl::init(64), cl::Hidden); |
73 | static cl::opt<unsigned> |
74 | UndefRegClearance("undef-reg-clearance", |
75 | cl::desc("How many idle instructions we would like before " |
76 | "certain undef register reads"), |
77 | cl::init(128), cl::Hidden); |
78 | |
79 | |
80 | // Pin the vtable to this file. |
81 | void X86InstrInfo::anchor() {} |
82 | |
83 | X86InstrInfo::X86InstrInfo(X86Subtarget &STI) |
84 | : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 |
85 | : X86::ADJCALLSTACKDOWN32), |
86 | (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 |
87 | : X86::ADJCALLSTACKUP32), |
88 | X86::CATCHRET, |
89 | (STI.is64Bit() ? X86::RET64 : X86::RET32)), |
90 | Subtarget(STI), RI(STI.getTargetTriple()) { |
91 | } |
92 | |
93 | bool |
94 | X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
95 | Register &SrcReg, Register &DstReg, |
96 | unsigned &SubIdx) const { |
97 | switch (MI.getOpcode()) { |
98 | default: break; |
99 | case X86::MOVSX16rr8: |
100 | case X86::MOVZX16rr8: |
101 | case X86::MOVSX32rr8: |
102 | case X86::MOVZX32rr8: |
103 | case X86::MOVSX64rr8: |
104 | if (!Subtarget.is64Bit()) |
105 | // It's not always legal to reference the low 8-bit of the larger |
106 | // register in 32-bit mode. |
107 | return false; |
108 | [[fallthrough]]; |
109 | case X86::MOVSX32rr16: |
110 | case X86::MOVZX32rr16: |
111 | case X86::MOVSX64rr16: |
112 | case X86::MOVSX64rr32: { |
113 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
114 | // Be conservative. |
115 | return false; |
116 | SrcReg = MI.getOperand(1).getReg(); |
117 | DstReg = MI.getOperand(0).getReg(); |
118 | switch (MI.getOpcode()) { |
119 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 119); |
120 | case X86::MOVSX16rr8: |
121 | case X86::MOVZX16rr8: |
122 | case X86::MOVSX32rr8: |
123 | case X86::MOVZX32rr8: |
124 | case X86::MOVSX64rr8: |
125 | SubIdx = X86::sub_8bit; |
126 | break; |
127 | case X86::MOVSX32rr16: |
128 | case X86::MOVZX32rr16: |
129 | case X86::MOVSX64rr16: |
130 | SubIdx = X86::sub_16bit; |
131 | break; |
132 | case X86::MOVSX64rr32: |
133 | SubIdx = X86::sub_32bit; |
134 | break; |
135 | } |
136 | return true; |
137 | } |
138 | } |
139 | return false; |
140 | } |
141 | |
142 | bool X86InstrInfo::isDataInvariant(MachineInstr &MI) { |
143 | if (MI.mayLoad() || MI.mayStore()) |
144 | return false; |
145 | |
146 | // Some target-independent operations that trivially lower to data-invariant |
147 | // instructions. |
148 | if (MI.isCopyLike() || MI.isInsertSubreg()) |
149 | return true; |
150 | |
151 | unsigned Opcode = MI.getOpcode(); |
152 | using namespace X86; |
153 | // On x86 it is believed that imul is constant time w.r.t. the loaded data. |
154 | // However, they set flags and are perhaps the most surprisingly constant |
155 | // time operations so we call them out here separately. |
156 | if (isIMUL(Opcode)) |
157 | return true; |
158 | // Bit scanning and counting instructions that are somewhat surprisingly |
159 | // constant time as they scan across bits and do other fairly complex |
160 | // operations like popcnt, but are believed to be constant time on x86. |
161 | // However, these set flags. |
162 | if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) || |
163 | isTZCNT(Opcode)) |
164 | return true; |
165 | // Bit manipulation instructions are effectively combinations of basic |
166 | // arithmetic ops, and should still execute in constant time. These also |
167 | // set flags. |
168 | if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) || |
169 | isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) || |
170 | isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) || |
171 | isTZMSK(Opcode)) |
172 | return true; |
173 | // Bit extracting and clearing instructions should execute in constant time, |
174 | // and set flags. |
175 | if (isBEXTR(Opcode) || isBZHI(Opcode)) |
176 | return true; |
177 | // Shift and rotate. |
178 | if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) || |
179 | isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode)) |
180 | return true; |
181 | // Basic arithmetic is constant time on the input but does set flags. |
182 | if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) || |
183 | isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode)) |
184 | return true; |
185 | // Arithmetic with just 32-bit and 64-bit variants and no immediates. |
186 | if (isADCX(Opcode) || isADOX(Opcode) || isANDN(Opcode)) |
187 | return true; |
188 | // Unary arithmetic operations. |
189 | if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode)) |
190 | return true; |
191 | // Unlike other arithmetic, NOT doesn't set EFLAGS. |
192 | if (isNOT(Opcode)) |
193 | return true; |
194 | // Various move instructions used to zero or sign extend things. Note that we |
195 | // intentionally don't support the _NOREX variants as we can't handle that |
196 | // register constraint anyways. |
197 | if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode)) |
198 | return true; |
199 | // Arithmetic instructions that are both constant time and don't set flags. |
200 | if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode)) |
201 | return true; |
202 | // LEA doesn't actually access memory, and its arithmetic is constant time. |
203 | if (isLEA(Opcode)) |
204 | return true; |
205 | // By default, assume that the instruction is not data invariant. |
206 | return false; |
207 | } |
208 | |
209 | bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) { |
210 | switch (MI.getOpcode()) { |
211 | default: |
212 | // By default, assume that the load will immediately leak. |
213 | return false; |
214 | |
215 | // On x86 it is believed that imul is constant time w.r.t. the loaded data. |
216 | // However, they set flags and are perhaps the most surprisingly constant |
217 | // time operations so we call them out here separately. |
218 | case X86::IMUL16rm: |
219 | case X86::IMUL16rmi8: |
220 | case X86::IMUL16rmi: |
221 | case X86::IMUL32rm: |
222 | case X86::IMUL32rmi8: |
223 | case X86::IMUL32rmi: |
224 | case X86::IMUL64rm: |
225 | case X86::IMUL64rmi32: |
226 | case X86::IMUL64rmi8: |
227 | |
228 | // Bit scanning and counting instructions that are somewhat surprisingly |
229 | // constant time as they scan across bits and do other fairly complex |
230 | // operations like popcnt, but are believed to be constant time on x86. |
231 | // However, these set flags. |
232 | case X86::BSF16rm: |
233 | case X86::BSF32rm: |
234 | case X86::BSF64rm: |
235 | case X86::BSR16rm: |
236 | case X86::BSR32rm: |
237 | case X86::BSR64rm: |
238 | case X86::LZCNT16rm: |
239 | case X86::LZCNT32rm: |
240 | case X86::LZCNT64rm: |
241 | case X86::POPCNT16rm: |
242 | case X86::POPCNT32rm: |
243 | case X86::POPCNT64rm: |
244 | case X86::TZCNT16rm: |
245 | case X86::TZCNT32rm: |
246 | case X86::TZCNT64rm: |
247 | |
248 | // Bit manipulation instructions are effectively combinations of basic |
249 | // arithmetic ops, and should still execute in constant time. These also |
250 | // set flags. |
251 | case X86::BLCFILL32rm: |
252 | case X86::BLCFILL64rm: |
253 | case X86::BLCI32rm: |
254 | case X86::BLCI64rm: |
255 | case X86::BLCIC32rm: |
256 | case X86::BLCIC64rm: |
257 | case X86::BLCMSK32rm: |
258 | case X86::BLCMSK64rm: |
259 | case X86::BLCS32rm: |
260 | case X86::BLCS64rm: |
261 | case X86::BLSFILL32rm: |
262 | case X86::BLSFILL64rm: |
263 | case X86::BLSI32rm: |
264 | case X86::BLSI64rm: |
265 | case X86::BLSIC32rm: |
266 | case X86::BLSIC64rm: |
267 | case X86::BLSMSK32rm: |
268 | case X86::BLSMSK64rm: |
269 | case X86::BLSR32rm: |
270 | case X86::BLSR64rm: |
271 | case X86::TZMSK32rm: |
272 | case X86::TZMSK64rm: |
273 | |
274 | // Bit extracting and clearing instructions should execute in constant time, |
275 | // and set flags. |
276 | case X86::BEXTR32rm: |
277 | case X86::BEXTR64rm: |
278 | case X86::BEXTRI32mi: |
279 | case X86::BEXTRI64mi: |
280 | case X86::BZHI32rm: |
281 | case X86::BZHI64rm: |
282 | |
283 | // Basic arithmetic is constant time on the input but does set flags. |
284 | case X86::ADC8rm: |
285 | case X86::ADC16rm: |
286 | case X86::ADC32rm: |
287 | case X86::ADC64rm: |
288 | case X86::ADCX32rm: |
289 | case X86::ADCX64rm: |
290 | case X86::ADD8rm: |
291 | case X86::ADD16rm: |
292 | case X86::ADD32rm: |
293 | case X86::ADD64rm: |
294 | case X86::ADOX32rm: |
295 | case X86::ADOX64rm: |
296 | case X86::AND8rm: |
297 | case X86::AND16rm: |
298 | case X86::AND32rm: |
299 | case X86::AND64rm: |
300 | case X86::ANDN32rm: |
301 | case X86::ANDN64rm: |
302 | case X86::OR8rm: |
303 | case X86::OR16rm: |
304 | case X86::OR32rm: |
305 | case X86::OR64rm: |
306 | case X86::SBB8rm: |
307 | case X86::SBB16rm: |
308 | case X86::SBB32rm: |
309 | case X86::SBB64rm: |
310 | case X86::SUB8rm: |
311 | case X86::SUB16rm: |
312 | case X86::SUB32rm: |
313 | case X86::SUB64rm: |
314 | case X86::XOR8rm: |
315 | case X86::XOR16rm: |
316 | case X86::XOR32rm: |
317 | case X86::XOR64rm: |
318 | |
319 | // Integer multiply w/o affecting flags is still believed to be constant |
320 | // time on x86. Called out separately as this is among the most surprising |
321 | // instructions to exhibit that behavior. |
322 | case X86::MULX32rm: |
323 | case X86::MULX64rm: |
324 | |
325 | // Arithmetic instructions that are both constant time and don't set flags. |
326 | case X86::RORX32mi: |
327 | case X86::RORX64mi: |
328 | case X86::SARX32rm: |
329 | case X86::SARX64rm: |
330 | case X86::SHLX32rm: |
331 | case X86::SHLX64rm: |
332 | case X86::SHRX32rm: |
333 | case X86::SHRX64rm: |
334 | |
335 | // Conversions are believed to be constant time and don't set flags. |
336 | case X86::CVTTSD2SI64rm: |
337 | case X86::VCVTTSD2SI64rm: |
338 | case X86::VCVTTSD2SI64Zrm: |
339 | case X86::CVTTSD2SIrm: |
340 | case X86::VCVTTSD2SIrm: |
341 | case X86::VCVTTSD2SIZrm: |
342 | case X86::CVTTSS2SI64rm: |
343 | case X86::VCVTTSS2SI64rm: |
344 | case X86::VCVTTSS2SI64Zrm: |
345 | case X86::CVTTSS2SIrm: |
346 | case X86::VCVTTSS2SIrm: |
347 | case X86::VCVTTSS2SIZrm: |
348 | case X86::CVTSI2SDrm: |
349 | case X86::VCVTSI2SDrm: |
350 | case X86::VCVTSI2SDZrm: |
351 | case X86::CVTSI2SSrm: |
352 | case X86::VCVTSI2SSrm: |
353 | case X86::VCVTSI2SSZrm: |
354 | case X86::CVTSI642SDrm: |
355 | case X86::VCVTSI642SDrm: |
356 | case X86::VCVTSI642SDZrm: |
357 | case X86::CVTSI642SSrm: |
358 | case X86::VCVTSI642SSrm: |
359 | case X86::VCVTSI642SSZrm: |
360 | case X86::CVTSS2SDrm: |
361 | case X86::VCVTSS2SDrm: |
362 | case X86::VCVTSS2SDZrm: |
363 | case X86::CVTSD2SSrm: |
364 | case X86::VCVTSD2SSrm: |
365 | case X86::VCVTSD2SSZrm: |
366 | // AVX512 added unsigned integer conversions. |
367 | case X86::VCVTTSD2USI64Zrm: |
368 | case X86::VCVTTSD2USIZrm: |
369 | case X86::VCVTTSS2USI64Zrm: |
370 | case X86::VCVTTSS2USIZrm: |
371 | case X86::VCVTUSI2SDZrm: |
372 | case X86::VCVTUSI642SDZrm: |
373 | case X86::VCVTUSI2SSZrm: |
374 | case X86::VCVTUSI642SSZrm: |
375 | |
376 | // Loads to register don't set flags. |
377 | case X86::MOV8rm: |
378 | case X86::MOV8rm_NOREX: |
379 | case X86::MOV16rm: |
380 | case X86::MOV32rm: |
381 | case X86::MOV64rm: |
382 | case X86::MOVSX16rm8: |
383 | case X86::MOVSX32rm16: |
384 | case X86::MOVSX32rm8: |
385 | case X86::MOVSX32rm8_NOREX: |
386 | case X86::MOVSX64rm16: |
387 | case X86::MOVSX64rm32: |
388 | case X86::MOVSX64rm8: |
389 | case X86::MOVZX16rm8: |
390 | case X86::MOVZX32rm16: |
391 | case X86::MOVZX32rm8: |
392 | case X86::MOVZX32rm8_NOREX: |
393 | case X86::MOVZX64rm16: |
394 | case X86::MOVZX64rm8: |
395 | return true; |
396 | } |
397 | } |
398 | |
399 | int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { |
400 | const MachineFunction *MF = MI.getParent()->getParent(); |
401 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
402 | |
403 | if (isFrameInstr(MI)) { |
404 | int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign()); |
405 | SPAdj -= getFrameAdjustment(MI); |
406 | if (!isFrameSetup(MI)) |
407 | SPAdj = -SPAdj; |
408 | return SPAdj; |
409 | } |
410 | |
411 | // To know whether a call adjusts the stack, we need information |
412 | // that is bound to the following ADJCALLSTACKUP pseudo. |
413 | // Look for the next ADJCALLSTACKUP that follows the call. |
414 | if (MI.isCall()) { |
415 | const MachineBasicBlock *MBB = MI.getParent(); |
416 | auto I = ++MachineBasicBlock::const_iterator(MI); |
417 | for (auto E = MBB->end(); I != E; ++I) { |
418 | if (I->getOpcode() == getCallFrameDestroyOpcode() || |
419 | I->isCall()) |
420 | break; |
421 | } |
422 | |
423 | // If we could not find a frame destroy opcode, then it has already |
424 | // been simplified, so we don't care. |
425 | if (I->getOpcode() != getCallFrameDestroyOpcode()) |
426 | return 0; |
427 | |
428 | return -(I->getOperand(1).getImm()); |
429 | } |
430 | |
431 | // Currently handle only PUSHes we can reasonably expect to see |
432 | // in call sequences |
433 | switch (MI.getOpcode()) { |
434 | default: |
435 | return 0; |
436 | case X86::PUSH32i8: |
437 | case X86::PUSH32r: |
438 | case X86::PUSH32rmm: |
439 | case X86::PUSH32rmr: |
440 | case X86::PUSHi32: |
441 | return 4; |
442 | case X86::PUSH64i8: |
443 | case X86::PUSH64r: |
444 | case X86::PUSH64rmm: |
445 | case X86::PUSH64rmr: |
446 | case X86::PUSH64i32: |
447 | return 8; |
448 | } |
449 | } |
450 | |
451 | /// Return true and the FrameIndex if the specified |
452 | /// operand and follow operands form a reference to the stack frame. |
453 | bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op, |
454 | int &FrameIndex) const { |
455 | if (MI.getOperand(Op + X86::AddrBaseReg).isFI() && |
456 | MI.getOperand(Op + X86::AddrScaleAmt).isImm() && |
457 | MI.getOperand(Op + X86::AddrIndexReg).isReg() && |
458 | MI.getOperand(Op + X86::AddrDisp).isImm() && |
459 | MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 && |
460 | MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 && |
461 | MI.getOperand(Op + X86::AddrDisp).getImm() == 0) { |
462 | FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex(); |
463 | return true; |
464 | } |
465 | return false; |
466 | } |
467 | |
468 | static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { |
469 | switch (Opcode) { |
470 | default: |
471 | return false; |
472 | case X86::MOV8rm: |
473 | case X86::KMOVBkm: |
474 | MemBytes = 1; |
475 | return true; |
476 | case X86::MOV16rm: |
477 | case X86::KMOVWkm: |
478 | case X86::VMOVSHZrm: |
479 | case X86::VMOVSHZrm_alt: |
480 | MemBytes = 2; |
481 | return true; |
482 | case X86::MOV32rm: |
483 | case X86::MOVSSrm: |
484 | case X86::MOVSSrm_alt: |
485 | case X86::VMOVSSrm: |
486 | case X86::VMOVSSrm_alt: |
487 | case X86::VMOVSSZrm: |
488 | case X86::VMOVSSZrm_alt: |
489 | case X86::KMOVDkm: |
490 | MemBytes = 4; |
491 | return true; |
492 | case X86::MOV64rm: |
493 | case X86::LD_Fp64m: |
494 | case X86::MOVSDrm: |
495 | case X86::MOVSDrm_alt: |
496 | case X86::VMOVSDrm: |
497 | case X86::VMOVSDrm_alt: |
498 | case X86::VMOVSDZrm: |
499 | case X86::VMOVSDZrm_alt: |
500 | case X86::MMX_MOVD64rm: |
501 | case X86::MMX_MOVQ64rm: |
502 | case X86::KMOVQkm: |
503 | MemBytes = 8; |
504 | return true; |
505 | case X86::MOVAPSrm: |
506 | case X86::MOVUPSrm: |
507 | case X86::MOVAPDrm: |
508 | case X86::MOVUPDrm: |
509 | case X86::MOVDQArm: |
510 | case X86::MOVDQUrm: |
511 | case X86::VMOVAPSrm: |
512 | case X86::VMOVUPSrm: |
513 | case X86::VMOVAPDrm: |
514 | case X86::VMOVUPDrm: |
515 | case X86::VMOVDQArm: |
516 | case X86::VMOVDQUrm: |
517 | case X86::VMOVAPSZ128rm: |
518 | case X86::VMOVUPSZ128rm: |
519 | case X86::VMOVAPSZ128rm_NOVLX: |
520 | case X86::VMOVUPSZ128rm_NOVLX: |
521 | case X86::VMOVAPDZ128rm: |
522 | case X86::VMOVUPDZ128rm: |
523 | case X86::VMOVDQU8Z128rm: |
524 | case X86::VMOVDQU16Z128rm: |
525 | case X86::VMOVDQA32Z128rm: |
526 | case X86::VMOVDQU32Z128rm: |
527 | case X86::VMOVDQA64Z128rm: |
528 | case X86::VMOVDQU64Z128rm: |
529 | MemBytes = 16; |
530 | return true; |
531 | case X86::VMOVAPSYrm: |
532 | case X86::VMOVUPSYrm: |
533 | case X86::VMOVAPDYrm: |
534 | case X86::VMOVUPDYrm: |
535 | case X86::VMOVDQAYrm: |
536 | case X86::VMOVDQUYrm: |
537 | case X86::VMOVAPSZ256rm: |
538 | case X86::VMOVUPSZ256rm: |
539 | case X86::VMOVAPSZ256rm_NOVLX: |
540 | case X86::VMOVUPSZ256rm_NOVLX: |
541 | case X86::VMOVAPDZ256rm: |
542 | case X86::VMOVUPDZ256rm: |
543 | case X86::VMOVDQU8Z256rm: |
544 | case X86::VMOVDQU16Z256rm: |
545 | case X86::VMOVDQA32Z256rm: |
546 | case X86::VMOVDQU32Z256rm: |
547 | case X86::VMOVDQA64Z256rm: |
548 | case X86::VMOVDQU64Z256rm: |
549 | MemBytes = 32; |
550 | return true; |
551 | case X86::VMOVAPSZrm: |
552 | case X86::VMOVUPSZrm: |
553 | case X86::VMOVAPDZrm: |
554 | case X86::VMOVUPDZrm: |
555 | case X86::VMOVDQU8Zrm: |
556 | case X86::VMOVDQU16Zrm: |
557 | case X86::VMOVDQA32Zrm: |
558 | case X86::VMOVDQU32Zrm: |
559 | case X86::VMOVDQA64Zrm: |
560 | case X86::VMOVDQU64Zrm: |
561 | MemBytes = 64; |
562 | return true; |
563 | } |
564 | } |
565 | |
566 | static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { |
567 | switch (Opcode) { |
568 | default: |
569 | return false; |
570 | case X86::MOV8mr: |
571 | case X86::KMOVBmk: |
572 | MemBytes = 1; |
573 | return true; |
574 | case X86::MOV16mr: |
575 | case X86::KMOVWmk: |
576 | case X86::VMOVSHZmr: |
577 | MemBytes = 2; |
578 | return true; |
579 | case X86::MOV32mr: |
580 | case X86::MOVSSmr: |
581 | case X86::VMOVSSmr: |
582 | case X86::VMOVSSZmr: |
583 | case X86::KMOVDmk: |
584 | MemBytes = 4; |
585 | return true; |
586 | case X86::MOV64mr: |
587 | case X86::ST_FpP64m: |
588 | case X86::MOVSDmr: |
589 | case X86::VMOVSDmr: |
590 | case X86::VMOVSDZmr: |
591 | case X86::MMX_MOVD64mr: |
592 | case X86::MMX_MOVQ64mr: |
593 | case X86::MMX_MOVNTQmr: |
594 | case X86::KMOVQmk: |
595 | MemBytes = 8; |
596 | return true; |
597 | case X86::MOVAPSmr: |
598 | case X86::MOVUPSmr: |
599 | case X86::MOVAPDmr: |
600 | case X86::MOVUPDmr: |
601 | case X86::MOVDQAmr: |
602 | case X86::MOVDQUmr: |
603 | case X86::VMOVAPSmr: |
604 | case X86::VMOVUPSmr: |
605 | case X86::VMOVAPDmr: |
606 | case X86::VMOVUPDmr: |
607 | case X86::VMOVDQAmr: |
608 | case X86::VMOVDQUmr: |
609 | case X86::VMOVUPSZ128mr: |
610 | case X86::VMOVAPSZ128mr: |
611 | case X86::VMOVUPSZ128mr_NOVLX: |
612 | case X86::VMOVAPSZ128mr_NOVLX: |
613 | case X86::VMOVUPDZ128mr: |
614 | case X86::VMOVAPDZ128mr: |
615 | case X86::VMOVDQA32Z128mr: |
616 | case X86::VMOVDQU32Z128mr: |
617 | case X86::VMOVDQA64Z128mr: |
618 | case X86::VMOVDQU64Z128mr: |
619 | case X86::VMOVDQU8Z128mr: |
620 | case X86::VMOVDQU16Z128mr: |
621 | MemBytes = 16; |
622 | return true; |
623 | case X86::VMOVUPSYmr: |
624 | case X86::VMOVAPSYmr: |
625 | case X86::VMOVUPDYmr: |
626 | case X86::VMOVAPDYmr: |
627 | case X86::VMOVDQUYmr: |
628 | case X86::VMOVDQAYmr: |
629 | case X86::VMOVUPSZ256mr: |
630 | case X86::VMOVAPSZ256mr: |
631 | case X86::VMOVUPSZ256mr_NOVLX: |
632 | case X86::VMOVAPSZ256mr_NOVLX: |
633 | case X86::VMOVUPDZ256mr: |
634 | case X86::VMOVAPDZ256mr: |
635 | case X86::VMOVDQU8Z256mr: |
636 | case X86::VMOVDQU16Z256mr: |
637 | case X86::VMOVDQA32Z256mr: |
638 | case X86::VMOVDQU32Z256mr: |
639 | case X86::VMOVDQA64Z256mr: |
640 | case X86::VMOVDQU64Z256mr: |
641 | MemBytes = 32; |
642 | return true; |
643 | case X86::VMOVUPSZmr: |
644 | case X86::VMOVAPSZmr: |
645 | case X86::VMOVUPDZmr: |
646 | case X86::VMOVAPDZmr: |
647 | case X86::VMOVDQU8Zmr: |
648 | case X86::VMOVDQU16Zmr: |
649 | case X86::VMOVDQA32Zmr: |
650 | case X86::VMOVDQU32Zmr: |
651 | case X86::VMOVDQA64Zmr: |
652 | case X86::VMOVDQU64Zmr: |
653 | MemBytes = 64; |
654 | return true; |
655 | } |
656 | return false; |
657 | } |
658 | |
659 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
660 | int &FrameIndex) const { |
661 | unsigned Dummy; |
662 | return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy); |
663 | } |
664 | |
665 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
666 | int &FrameIndex, |
667 | unsigned &MemBytes) const { |
668 | if (isFrameLoadOpcode(MI.getOpcode(), MemBytes)) |
669 | if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) |
670 | return MI.getOperand(0).getReg(); |
671 | return 0; |
672 | } |
673 | |
674 | unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, |
675 | int &FrameIndex) const { |
676 | unsigned Dummy; |
677 | if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) { |
678 | unsigned Reg; |
679 | if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) |
680 | return Reg; |
681 | // Check for post-frame index elimination operations |
682 | SmallVector<const MachineMemOperand *, 1> Accesses; |
683 | if (hasLoadFromStackSlot(MI, Accesses)) { |
684 | FrameIndex = |
685 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
686 | ->getFrameIndex(); |
687 | return MI.getOperand(0).getReg(); |
688 | } |
689 | } |
690 | return 0; |
691 | } |
692 | |
693 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
694 | int &FrameIndex) const { |
695 | unsigned Dummy; |
696 | return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy); |
697 | } |
698 | |
699 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
700 | int &FrameIndex, |
701 | unsigned &MemBytes) const { |
702 | if (isFrameStoreOpcode(MI.getOpcode(), MemBytes)) |
703 | if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 && |
704 | isFrameOperand(MI, 0, FrameIndex)) |
705 | return MI.getOperand(X86::AddrNumOperands).getReg(); |
706 | return 0; |
707 | } |
708 | |
709 | unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, |
710 | int &FrameIndex) const { |
711 | unsigned Dummy; |
712 | if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) { |
713 | unsigned Reg; |
714 | if ((Reg = isStoreToStackSlot(MI, FrameIndex))) |
715 | return Reg; |
716 | // Check for post-frame index elimination operations |
717 | SmallVector<const MachineMemOperand *, 1> Accesses; |
718 | if (hasStoreToStackSlot(MI, Accesses)) { |
719 | FrameIndex = |
720 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
721 | ->getFrameIndex(); |
722 | return MI.getOperand(X86::AddrNumOperands).getReg(); |
723 | } |
724 | } |
725 | return 0; |
726 | } |
727 | |
728 | /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. |
729 | static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) { |
730 | // Don't waste compile time scanning use-def chains of physregs. |
731 | if (!BaseReg.isVirtual()) |
732 | return false; |
733 | bool isPICBase = false; |
734 | for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), |
735 | E = MRI.def_instr_end(); I != E; ++I) { |
736 | MachineInstr *DefMI = &*I; |
737 | if (DefMI->getOpcode() != X86::MOVPC32r) |
738 | return false; |
739 | assert(!isPICBase && "More than one PIC base?")(static_cast <bool> (!isPICBase && "More than one PIC base?" ) ? void (0) : __assert_fail ("!isPICBase && \"More than one PIC base?\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 739, __extension__ __PRETTY_FUNCTION__ )); |
740 | isPICBase = true; |
741 | } |
742 | return isPICBase; |
743 | } |
744 | |
745 | bool X86InstrInfo::isReallyTriviallyReMaterializable( |
746 | const MachineInstr &MI) const { |
747 | switch (MI.getOpcode()) { |
748 | default: |
749 | // This function should only be called for opcodes with the ReMaterializable |
750 | // flag set. |
751 | llvm_unreachable("Unknown rematerializable operation!")::llvm::llvm_unreachable_internal("Unknown rematerializable operation!" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 751); |
752 | break; |
753 | |
754 | case X86::LOAD_STACK_GUARD: |
755 | case X86::AVX1_SETALLONES: |
756 | case X86::AVX2_SETALLONES: |
757 | case X86::AVX512_128_SET0: |
758 | case X86::AVX512_256_SET0: |
759 | case X86::AVX512_512_SET0: |
760 | case X86::AVX512_512_SETALLONES: |
761 | case X86::AVX512_FsFLD0SD: |
762 | case X86::AVX512_FsFLD0SH: |
763 | case X86::AVX512_FsFLD0SS: |
764 | case X86::AVX512_FsFLD0F128: |
765 | case X86::AVX_SET0: |
766 | case X86::FsFLD0SD: |
767 | case X86::FsFLD0SS: |
768 | case X86::FsFLD0SH: |
769 | case X86::FsFLD0F128: |
770 | case X86::KSET0D: |
771 | case X86::KSET0Q: |
772 | case X86::KSET0W: |
773 | case X86::KSET1D: |
774 | case X86::KSET1Q: |
775 | case X86::KSET1W: |
776 | case X86::MMX_SET0: |
777 | case X86::MOV32ImmSExti8: |
778 | case X86::MOV32r0: |
779 | case X86::MOV32r1: |
780 | case X86::MOV32r_1: |
781 | case X86::MOV32ri64: |
782 | case X86::MOV64ImmSExti8: |
783 | case X86::V_SET0: |
784 | case X86::V_SETALLONES: |
785 | case X86::MOV16ri: |
786 | case X86::MOV32ri: |
787 | case X86::MOV64ri: |
788 | case X86::MOV64ri32: |
789 | case X86::MOV8ri: |
790 | case X86::PTILEZEROV: |
791 | return true; |
792 | |
793 | case X86::MOV8rm: |
794 | case X86::MOV8rm_NOREX: |
795 | case X86::MOV16rm: |
796 | case X86::MOV32rm: |
797 | case X86::MOV64rm: |
798 | case X86::MOVSSrm: |
799 | case X86::MOVSSrm_alt: |
800 | case X86::MOVSDrm: |
801 | case X86::MOVSDrm_alt: |
802 | case X86::MOVAPSrm: |
803 | case X86::MOVUPSrm: |
804 | case X86::MOVAPDrm: |
805 | case X86::MOVUPDrm: |
806 | case X86::MOVDQArm: |
807 | case X86::MOVDQUrm: |
808 | case X86::VMOVSSrm: |
809 | case X86::VMOVSSrm_alt: |
810 | case X86::VMOVSDrm: |
811 | case X86::VMOVSDrm_alt: |
812 | case X86::VMOVAPSrm: |
813 | case X86::VMOVUPSrm: |
814 | case X86::VMOVAPDrm: |
815 | case X86::VMOVUPDrm: |
816 | case X86::VMOVDQArm: |
817 | case X86::VMOVDQUrm: |
818 | case X86::VMOVAPSYrm: |
819 | case X86::VMOVUPSYrm: |
820 | case X86::VMOVAPDYrm: |
821 | case X86::VMOVUPDYrm: |
822 | case X86::VMOVDQAYrm: |
823 | case X86::VMOVDQUYrm: |
824 | case X86::MMX_MOVD64rm: |
825 | case X86::MMX_MOVQ64rm: |
826 | // AVX-512 |
827 | case X86::VMOVSSZrm: |
828 | case X86::VMOVSSZrm_alt: |
829 | case X86::VMOVSDZrm: |
830 | case X86::VMOVSDZrm_alt: |
831 | case X86::VMOVSHZrm: |
832 | case X86::VMOVSHZrm_alt: |
833 | case X86::VMOVAPDZ128rm: |
834 | case X86::VMOVAPDZ256rm: |
835 | case X86::VMOVAPDZrm: |
836 | case X86::VMOVAPSZ128rm: |
837 | case X86::VMOVAPSZ256rm: |
838 | case X86::VMOVAPSZ128rm_NOVLX: |
839 | case X86::VMOVAPSZ256rm_NOVLX: |
840 | case X86::VMOVAPSZrm: |
841 | case X86::VMOVDQA32Z128rm: |
842 | case X86::VMOVDQA32Z256rm: |
843 | case X86::VMOVDQA32Zrm: |
844 | case X86::VMOVDQA64Z128rm: |
845 | case X86::VMOVDQA64Z256rm: |
846 | case X86::VMOVDQA64Zrm: |
847 | case X86::VMOVDQU16Z128rm: |
848 | case X86::VMOVDQU16Z256rm: |
849 | case X86::VMOVDQU16Zrm: |
850 | case X86::VMOVDQU32Z128rm: |
851 | case X86::VMOVDQU32Z256rm: |
852 | case X86::VMOVDQU32Zrm: |
853 | case X86::VMOVDQU64Z128rm: |
854 | case X86::VMOVDQU64Z256rm: |
855 | case X86::VMOVDQU64Zrm: |
856 | case X86::VMOVDQU8Z128rm: |
857 | case X86::VMOVDQU8Z256rm: |
858 | case X86::VMOVDQU8Zrm: |
859 | case X86::VMOVUPDZ128rm: |
860 | case X86::VMOVUPDZ256rm: |
861 | case X86::VMOVUPDZrm: |
862 | case X86::VMOVUPSZ128rm: |
863 | case X86::VMOVUPSZ256rm: |
864 | case X86::VMOVUPSZ128rm_NOVLX: |
865 | case X86::VMOVUPSZ256rm_NOVLX: |
866 | case X86::VMOVUPSZrm: { |
867 | // Loads from constant pools are trivially rematerializable. |
868 | if (MI.getOperand(1 + X86::AddrBaseReg).isReg() && |
869 | MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
870 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
871 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
872 | MI.isDereferenceableInvariantLoad()) { |
873 | Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
874 | if (BaseReg == 0 || BaseReg == X86::RIP) |
875 | return true; |
876 | // Allow re-materialization of PIC load. |
877 | if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal()) |
878 | return false; |
879 | const MachineFunction &MF = *MI.getParent()->getParent(); |
880 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
881 | return regIsPICBase(BaseReg, MRI); |
882 | } |
883 | return false; |
884 | } |
885 | |
886 | case X86::LEA32r: |
887 | case X86::LEA64r: { |
888 | if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
889 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
890 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
891 | !MI.getOperand(1 + X86::AddrDisp).isReg()) { |
892 | // lea fi#, lea GV, etc. are all rematerializable. |
893 | if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) |
894 | return true; |
895 | Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
896 | if (BaseReg == 0) |
897 | return true; |
898 | // Allow re-materialization of lea PICBase + x. |
899 | const MachineFunction &MF = *MI.getParent()->getParent(); |
900 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
901 | return regIsPICBase(BaseReg, MRI); |
902 | } |
903 | return false; |
904 | } |
905 | } |
906 | } |
907 | |
908 | void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, |
909 | MachineBasicBlock::iterator I, |
910 | Register DestReg, unsigned SubIdx, |
911 | const MachineInstr &Orig, |
912 | const TargetRegisterInfo &TRI) const { |
913 | bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI); |
914 | if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) != |
915 | MachineBasicBlock::LQR_Dead) { |
916 | // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side |
917 | // effects. |
918 | int Value; |
919 | switch (Orig.getOpcode()) { |
920 | case X86::MOV32r0: Value = 0; break; |
921 | case X86::MOV32r1: Value = 1; break; |
922 | case X86::MOV32r_1: Value = -1; break; |
923 | default: |
924 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 924); |
925 | } |
926 | |
927 | const DebugLoc &DL = Orig.getDebugLoc(); |
928 | BuildMI(MBB, I, DL, get(X86::MOV32ri)) |
929 | .add(Orig.getOperand(0)) |
930 | .addImm(Value); |
931 | } else { |
932 | MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); |
933 | MBB.insert(I, MI); |
934 | } |
935 | |
936 | MachineInstr &NewMI = *std::prev(I); |
937 | NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); |
938 | } |
939 | |
940 | /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. |
941 | bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { |
942 | for (const MachineOperand &MO : MI.operands()) { |
943 | if (MO.isReg() && MO.isDef() && |
944 | MO.getReg() == X86::EFLAGS && !MO.isDead()) { |
945 | return true; |
946 | } |
947 | } |
948 | return false; |
949 | } |
950 | |
951 | /// Check whether the shift count for a machine operand is non-zero. |
952 | inline static unsigned getTruncatedShiftCount(const MachineInstr &MI, |
953 | unsigned ShiftAmtOperandIdx) { |
954 | // The shift count is six bits with the REX.W prefix and five bits without. |
955 | unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31; |
956 | unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm(); |
957 | return Imm & ShiftCountMask; |
958 | } |
959 | |
960 | /// Check whether the given shift count is appropriate |
961 | /// can be represented by a LEA instruction. |
962 | inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { |
963 | // Left shift instructions can be transformed into load-effective-address |
964 | // instructions if we can encode them appropriately. |
965 | // A LEA instruction utilizes a SIB byte to encode its scale factor. |
966 | // The SIB.scale field is two bits wide which means that we can encode any |
967 | // shift amount less than 4. |
968 | return ShAmt < 4 && ShAmt > 0; |
969 | } |
970 | |
971 | static bool findRedundantFlagInstr(MachineInstr &CmpInstr, |
972 | MachineInstr &CmpValDefInstr, |
973 | const MachineRegisterInfo *MRI, |
974 | MachineInstr **AndInstr, |
975 | const TargetRegisterInfo *TRI, |
976 | bool &NoSignFlag, bool &ClearsOverflowFlag) { |
977 | if (CmpValDefInstr.getOpcode() != X86::SUBREG_TO_REG) |
978 | return false; |
979 | |
980 | if (CmpInstr.getOpcode() != X86::TEST64rr) |
981 | return false; |
982 | |
983 | // CmpInstr is a TEST64rr instruction, and `X86InstrInfo::analyzeCompare` |
984 | // guarantees that it's analyzable only if two registers are identical. |
985 | assert((static_cast <bool> ((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && "CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` " "requires two reg operands are the same.") ? void (0) : __assert_fail ("(CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && \"CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` \" \"requires two reg operands are the same.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 988, __extension__ __PRETTY_FUNCTION__ )) |
986 | (CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) &&(static_cast <bool> ((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && "CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` " "requires two reg operands are the same.") ? void (0) : __assert_fail ("(CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && \"CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` \" \"requires two reg operands are the same.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 988, __extension__ __PRETTY_FUNCTION__ )) |
987 | "CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` "(static_cast <bool> ((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && "CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` " "requires two reg operands are the same.") ? void (0) : __assert_fail ("(CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && \"CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` \" \"requires two reg operands are the same.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 988, __extension__ __PRETTY_FUNCTION__ )) |
988 | "requires two reg operands are the same.")(static_cast <bool> ((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && "CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` " "requires two reg operands are the same.") ? void (0) : __assert_fail ("(CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) && \"CmpInstr is an analyzable TEST64rr, and `X86InstrInfo::analyzeCompare` \" \"requires two reg operands are the same.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 988, __extension__ __PRETTY_FUNCTION__ )); |
989 | |
990 | // Caller (`X86InstrInfo::optimizeCompareInstr`) guarantees that |
991 | // `CmpValDefInstr` defines the value that's used by `CmpInstr`; in this case |
992 | // if `CmpValDefInstr` sets the EFLAGS, it is likely that `CmpInstr` is |
993 | // redundant. |
994 | assert((static_cast <bool> ((MRI->getVRegDef(CmpInstr.getOperand (0).getReg()) == &CmpValDefInstr) && "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG." ) ? void (0) : __assert_fail ("(MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) && \"Caller guarantees that TEST64rr is a user of SUBREG_TO_REG.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 996, __extension__ __PRETTY_FUNCTION__ )) |
995 | (MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) &&(static_cast <bool> ((MRI->getVRegDef(CmpInstr.getOperand (0).getReg()) == &CmpValDefInstr) && "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG." ) ? void (0) : __assert_fail ("(MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) && \"Caller guarantees that TEST64rr is a user of SUBREG_TO_REG.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 996, __extension__ __PRETTY_FUNCTION__ )) |
996 | "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG.")(static_cast <bool> ((MRI->getVRegDef(CmpInstr.getOperand (0).getReg()) == &CmpValDefInstr) && "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG." ) ? void (0) : __assert_fail ("(MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) && \"Caller guarantees that TEST64rr is a user of SUBREG_TO_REG.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 996, __extension__ __PRETTY_FUNCTION__ )); |
997 | |
998 | // As seen in X86 td files, CmpValDefInstr.getOperand(1).getImm() is typically |
999 | // 0. |
1000 | if (CmpValDefInstr.getOperand(1).getImm() != 0) |
1001 | return false; |
1002 | |
1003 | // As seen in X86 td files, CmpValDefInstr.getOperand(3) is typically |
1004 | // sub_32bit or sub_xmm. |
1005 | if (CmpValDefInstr.getOperand(3).getImm() != X86::sub_32bit) |
1006 | return false; |
1007 | |
1008 | MachineInstr *VregDefInstr = |
1009 | MRI->getVRegDef(CmpValDefInstr.getOperand(2).getReg()); |
1010 | |
1011 | assert(VregDefInstr && "Must have a definition (SSA)")(static_cast <bool> (VregDefInstr && "Must have a definition (SSA)" ) ? void (0) : __assert_fail ("VregDefInstr && \"Must have a definition (SSA)\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1011, __extension__ __PRETTY_FUNCTION__)); |
1012 | |
1013 | // Requires `CmpValDefInstr` and `VregDefInstr` are from the same MBB |
1014 | // to simplify the subsequent analysis. |
1015 | // |
1016 | // FIXME: If `VregDefInstr->getParent()` is the only predecessor of |
1017 | // `CmpValDefInstr.getParent()`, this could be handled. |
1018 | if (VregDefInstr->getParent() != CmpValDefInstr.getParent()) |
1019 | return false; |
1020 | |
1021 | if (X86::isAND(VregDefInstr->getOpcode())) { |
1022 | // Get a sequence of instructions like |
1023 | // %reg = and* ... // Set EFLAGS |
1024 | // ... // EFLAGS not changed |
1025 | // %extended_reg = subreg_to_reg 0, %reg, %subreg.sub_32bit |
1026 | // test64rr %extended_reg, %extended_reg, implicit-def $eflags |
1027 | // |
1028 | // If subsequent readers use a subset of bits that don't change |
1029 | // after `and*` instructions, it's likely that the test64rr could |
1030 | // be optimized away. |
1031 | for (const MachineInstr &Instr : |
1032 | make_range(std::next(MachineBasicBlock::iterator(VregDefInstr)), |
1033 | MachineBasicBlock::iterator(CmpValDefInstr))) { |
1034 | // There are instructions between 'VregDefInstr' and |
1035 | // 'CmpValDefInstr' that modifies EFLAGS. |
1036 | if (Instr.modifiesRegister(X86::EFLAGS, TRI)) |
1037 | return false; |
1038 | } |
1039 | |
1040 | *AndInstr = VregDefInstr; |
1041 | |
1042 | // AND instruction will essentially update SF and clear OF, so |
1043 | // NoSignFlag should be false in the sense that SF is modified by `AND`. |
1044 | // |
1045 | // However, the implementation artifically sets `NoSignFlag` to true |
1046 | // to poison the SF bit; that is to say, if SF is looked at later, the |
1047 | // optimization (to erase TEST64rr) will be disabled. |
1048 | // |
1049 | // The reason to poison SF bit is that SF bit value could be different |
1050 | // in the `AND` and `TEST` operation; signed bit is not known for `AND`, |
1051 | // and is known to be 0 as a result of `TEST64rr`. |
1052 | // |
1053 | // FIXME: As opposed to poisoning the SF bit directly, consider peeking into |
1054 | // the AND instruction and using the static information to guide peephole |
1055 | // optimization if possible. For example, it's possible to fold a |
1056 | // conditional move into a copy if the relevant EFLAG bits could be deduced |
1057 | // from an immediate operand of and operation. |
1058 | // |
1059 | NoSignFlag = true; |
1060 | // ClearsOverflowFlag is true for AND operation (no surprise). |
1061 | ClearsOverflowFlag = true; |
1062 | return true; |
1063 | } |
1064 | return false; |
1065 | } |
1066 | |
1067 | bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, |
1068 | unsigned Opc, bool AllowSP, Register &NewSrc, |
1069 | bool &isKill, MachineOperand &ImplicitOp, |
1070 | LiveVariables *LV, LiveIntervals *LIS) const { |
1071 | MachineFunction &MF = *MI.getParent()->getParent(); |
1072 | const TargetRegisterClass *RC; |
1073 | if (AllowSP) { |
1074 | RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; |
1075 | } else { |
1076 | RC = Opc != X86::LEA32r ? |
1077 | &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; |
1078 | } |
1079 | Register SrcReg = Src.getReg(); |
1080 | isKill = MI.killsRegister(SrcReg); |
1081 | |
1082 | // For both LEA64 and LEA32 the register already has essentially the right |
1083 | // type (32-bit or 64-bit) we may just need to forbid SP. |
1084 | if (Opc != X86::LEA64_32r) { |
1085 | NewSrc = SrcReg; |
1086 | assert(!Src.isUndef() && "Undef op doesn't need optimization")(static_cast <bool> (!Src.isUndef() && "Undef op doesn't need optimization" ) ? void (0) : __assert_fail ("!Src.isUndef() && \"Undef op doesn't need optimization\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1086, __extension__ __PRETTY_FUNCTION__)); |
1087 | |
1088 | if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) |
1089 | return false; |
1090 | |
1091 | return true; |
1092 | } |
1093 | |
1094 | // This is for an LEA64_32r and incoming registers are 32-bit. One way or |
1095 | // another we need to add 64-bit registers to the final MI. |
1096 | if (SrcReg.isPhysical()) { |
1097 | ImplicitOp = Src; |
1098 | ImplicitOp.setImplicit(); |
1099 | |
1100 | NewSrc = getX86SubSuperRegister(SrcReg, 64); |
1101 | assert(!Src.isUndef() && "Undef op doesn't need optimization")(static_cast <bool> (!Src.isUndef() && "Undef op doesn't need optimization" ) ? void (0) : __assert_fail ("!Src.isUndef() && \"Undef op doesn't need optimization\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1101, __extension__ __PRETTY_FUNCTION__)); |
1102 | } else { |
1103 | // Virtual register of the wrong class, we have to create a temporary 64-bit |
1104 | // vreg to feed into the LEA. |
1105 | NewSrc = MF.getRegInfo().createVirtualRegister(RC); |
1106 | MachineInstr *Copy = |
1107 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1108 | .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) |
1109 | .addReg(SrcReg, getKillRegState(isKill)); |
1110 | |
1111 | // Which is obviously going to be dead after we're done with it. |
1112 | isKill = true; |
1113 | |
1114 | if (LV) |
1115 | LV->replaceKillInstruction(SrcReg, MI, *Copy); |
1116 | |
1117 | if (LIS) { |
1118 | SlotIndex CopyIdx = LIS->InsertMachineInstrInMaps(*Copy); |
1119 | SlotIndex Idx = LIS->getInstructionIndex(MI); |
1120 | LiveInterval &LI = LIS->getInterval(SrcReg); |
1121 | LiveRange::Segment *S = LI.getSegmentContaining(Idx); |
1122 | if (S->end.getBaseIndex() == Idx) |
1123 | S->end = CopyIdx.getRegSlot(); |
1124 | } |
1125 | } |
1126 | |
1127 | // We've set all the parameters without issue. |
1128 | return true; |
1129 | } |
1130 | |
1131 | MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, |
1132 | MachineInstr &MI, |
1133 | LiveVariables *LV, |
1134 | LiveIntervals *LIS, |
1135 | bool Is8BitOp) const { |
1136 | // We handle 8-bit adds and various 16-bit opcodes in the switch below. |
1137 | MachineBasicBlock &MBB = *MI.getParent(); |
1138 | MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); |
1139 | assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits((static_cast <bool> ((Is8BitOp || RegInfo.getTargetRegisterInfo ()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0 ).getReg())) == 16) && "Unexpected type for LEA transform" ) ? void (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1141, __extension__ __PRETTY_FUNCTION__)) |
1140 | *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&(static_cast <bool> ((Is8BitOp || RegInfo.getTargetRegisterInfo ()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0 ).getReg())) == 16) && "Unexpected type for LEA transform" ) ? void (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1141, __extension__ __PRETTY_FUNCTION__)) |
1141 | "Unexpected type for LEA transform")(static_cast <bool> ((Is8BitOp || RegInfo.getTargetRegisterInfo ()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0 ).getReg())) == 16) && "Unexpected type for LEA transform" ) ? void (0) : __assert_fail ("(Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && \"Unexpected type for LEA transform\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1141, __extension__ __PRETTY_FUNCTION__)); |
1142 | |
1143 | // TODO: For a 32-bit target, we need to adjust the LEA variables with |
1144 | // something like this: |
1145 | // Opcode = X86::LEA32r; |
1146 | // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); |
1147 | // OutRegLEA = |
1148 | // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass) |
1149 | // : RegInfo.createVirtualRegister(&X86::GR32RegClass); |
1150 | if (!Subtarget.is64Bit()) |
1151 | return nullptr; |
1152 | |
1153 | unsigned Opcode = X86::LEA64_32r; |
1154 | Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
1155 | Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
1156 | Register InRegLEA2; |
1157 | |
1158 | // Build and insert into an implicit UNDEF value. This is OK because |
1159 | // we will be shifting and then extracting the lower 8/16-bits. |
1160 | // This has the potential to cause partial register stall. e.g. |
1161 | // movw (%rbp,%rcx,2), %dx |
1162 | // leal -65(%rdx), %esi |
1163 | // But testing has shown this *does* help performance in 64-bit mode (at |
1164 | // least on modern x86 machines). |
1165 | MachineBasicBlock::iterator MBBI = MI.getIterator(); |
1166 | Register Dest = MI.getOperand(0).getReg(); |
1167 | Register Src = MI.getOperand(1).getReg(); |
1168 | Register Src2; |
1169 | bool IsDead = MI.getOperand(0).isDead(); |
1170 | bool IsKill = MI.getOperand(1).isKill(); |
1171 | unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; |
1172 | assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization")(static_cast <bool> (!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization") ? void (0) : __assert_fail ("!MI.getOperand(1).isUndef() && \"Undef op doesn't need optimization\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1172, __extension__ __PRETTY_FUNCTION__)); |
1173 | MachineInstr *ImpDef = |
1174 | BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA); |
1175 | MachineInstr *InsMI = |
1176 | BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1177 | .addReg(InRegLEA, RegState::Define, SubReg) |
1178 | .addReg(Src, getKillRegState(IsKill)); |
1179 | MachineInstr *ImpDef2 = nullptr; |
1180 | MachineInstr *InsMI2 = nullptr; |
1181 | |
1182 | MachineInstrBuilder MIB = |
1183 | BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA); |
1184 | switch (MIOpc) { |
1185 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1185); |
1186 | case X86::SHL8ri: |
1187 | case X86::SHL16ri: { |
1188 | unsigned ShAmt = MI.getOperand(2).getImm(); |
1189 | MIB.addReg(0) |
1190 | .addImm(1LL << ShAmt) |
1191 | .addReg(InRegLEA, RegState::Kill) |
1192 | .addImm(0) |
1193 | .addReg(0); |
1194 | break; |
1195 | } |
1196 | case X86::INC8r: |
1197 | case X86::INC16r: |
1198 | addRegOffset(MIB, InRegLEA, true, 1); |
1199 | break; |
1200 | case X86::DEC8r: |
1201 | case X86::DEC16r: |
1202 | addRegOffset(MIB, InRegLEA, true, -1); |
1203 | break; |
1204 | case X86::ADD8ri: |
1205 | case X86::ADD8ri_DB: |
1206 | case X86::ADD16ri: |
1207 | case X86::ADD16ri8: |
1208 | case X86::ADD16ri_DB: |
1209 | case X86::ADD16ri8_DB: |
1210 | addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm()); |
1211 | break; |
1212 | case X86::ADD8rr: |
1213 | case X86::ADD8rr_DB: |
1214 | case X86::ADD16rr: |
1215 | case X86::ADD16rr_DB: { |
1216 | Src2 = MI.getOperand(2).getReg(); |
1217 | bool IsKill2 = MI.getOperand(2).isKill(); |
1218 | assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization")(static_cast <bool> (!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization") ? void (0) : __assert_fail ("!MI.getOperand(2).isUndef() && \"Undef op doesn't need optimization\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1218, __extension__ __PRETTY_FUNCTION__)); |
1219 | if (Src == Src2) { |
1220 | // ADD8rr/ADD16rr killed %reg1028, %reg1028 |
1221 | // just a single insert_subreg. |
1222 | addRegReg(MIB, InRegLEA, true, InRegLEA, false); |
1223 | } else { |
1224 | if (Subtarget.is64Bit()) |
1225 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
1226 | else |
1227 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); |
1228 | // Build and insert into an implicit UNDEF value. This is OK because |
1229 | // we will be shifting and then extracting the lower 8/16-bits. |
1230 | ImpDef2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), |
1231 | InRegLEA2); |
1232 | InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1233 | .addReg(InRegLEA2, RegState::Define, SubReg) |
1234 | .addReg(Src2, getKillRegState(IsKill2)); |
1235 | addRegReg(MIB, InRegLEA, true, InRegLEA2, true); |
1236 | } |
1237 | if (LV && IsKill2 && InsMI2) |
1238 | LV->replaceKillInstruction(Src2, MI, *InsMI2); |
1239 | break; |
1240 | } |
1241 | } |
1242 | |
1243 | MachineInstr *NewMI = MIB; |
1244 | MachineInstr *ExtMI = |
1245 | BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1246 | .addReg(Dest, RegState::Define | getDeadRegState(IsDead)) |
1247 | .addReg(OutRegLEA, RegState::Kill, SubReg); |
1248 | |
1249 | if (LV) { |
1250 | // Update live variables. |
1251 | LV->getVarInfo(InRegLEA).Kills.push_back(NewMI); |
1252 | if (InRegLEA2) |
1253 | LV->getVarInfo(InRegLEA2).Kills.push_back(NewMI); |
1254 | LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI); |
1255 | if (IsKill) |
1256 | LV->replaceKillInstruction(Src, MI, *InsMI); |
1257 | if (IsDead) |
1258 | LV->replaceKillInstruction(Dest, MI, *ExtMI); |
1259 | } |
1260 | |
1261 | if (LIS) { |
1262 | LIS->InsertMachineInstrInMaps(*ImpDef); |
1263 | SlotIndex InsIdx = LIS->InsertMachineInstrInMaps(*InsMI); |
1264 | if (ImpDef2) |
1265 | LIS->InsertMachineInstrInMaps(*ImpDef2); |
1266 | SlotIndex Ins2Idx; |
1267 | if (InsMI2) |
1268 | Ins2Idx = LIS->InsertMachineInstrInMaps(*InsMI2); |
1269 | SlotIndex NewIdx = LIS->ReplaceMachineInstrInMaps(MI, *NewMI); |
1270 | SlotIndex ExtIdx = LIS->InsertMachineInstrInMaps(*ExtMI); |
1271 | LIS->getInterval(InRegLEA); |
1272 | LIS->getInterval(OutRegLEA); |
1273 | if (InRegLEA2) |
1274 | LIS->getInterval(InRegLEA2); |
1275 | |
1276 | // Move the use of Src up to InsMI. |
1277 | LiveInterval &SrcLI = LIS->getInterval(Src); |
1278 | LiveRange::Segment *SrcSeg = SrcLI.getSegmentContaining(NewIdx); |
1279 | if (SrcSeg->end == NewIdx.getRegSlot()) |
1280 | SrcSeg->end = InsIdx.getRegSlot(); |
1281 | |
1282 | if (InsMI2) { |
1283 | // Move the use of Src2 up to InsMI2. |
1284 | LiveInterval &Src2LI = LIS->getInterval(Src2); |
1285 | LiveRange::Segment *Src2Seg = Src2LI.getSegmentContaining(NewIdx); |
1286 | if (Src2Seg->end == NewIdx.getRegSlot()) |
1287 | Src2Seg->end = Ins2Idx.getRegSlot(); |
1288 | } |
1289 | |
1290 | // Move the definition of Dest down to ExtMI. |
1291 | LiveInterval &DestLI = LIS->getInterval(Dest); |
1292 | LiveRange::Segment *DestSeg = |
1293 | DestLI.getSegmentContaining(NewIdx.getRegSlot()); |
1294 | assert(DestSeg->start == NewIdx.getRegSlot() &&(static_cast <bool> (DestSeg->start == NewIdx.getRegSlot () && DestSeg->valno->def == NewIdx.getRegSlot( )) ? void (0) : __assert_fail ("DestSeg->start == NewIdx.getRegSlot() && DestSeg->valno->def == NewIdx.getRegSlot()" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1295, __extension__ __PRETTY_FUNCTION__)) |
1295 | DestSeg->valno->def == NewIdx.getRegSlot())(static_cast <bool> (DestSeg->start == NewIdx.getRegSlot () && DestSeg->valno->def == NewIdx.getRegSlot( )) ? void (0) : __assert_fail ("DestSeg->start == NewIdx.getRegSlot() && DestSeg->valno->def == NewIdx.getRegSlot()" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1295, __extension__ __PRETTY_FUNCTION__)); |
1296 | DestSeg->start = ExtIdx.getRegSlot(); |
1297 | DestSeg->valno->def = ExtIdx.getRegSlot(); |
1298 | } |
1299 | |
1300 | return ExtMI; |
1301 | } |
1302 | |
1303 | /// This method must be implemented by targets that |
1304 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
1305 | /// may be able to convert a two-address instruction into a true |
1306 | /// three-address instruction on demand. This allows the X86 target (for |
1307 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
1308 | /// would require register copies due to two-addressness. |
1309 | /// |
1310 | /// This method returns a null pointer if the transformation cannot be |
1311 | /// performed, otherwise it returns the new instruction. |
1312 | /// |
1313 | MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI, |
1314 | LiveVariables *LV, |
1315 | LiveIntervals *LIS) const { |
1316 | // The following opcodes also sets the condition code register(s). Only |
1317 | // convert them to equivalent lea if the condition code register def's |
1318 | // are dead! |
1319 | if (hasLiveCondCodeDef(MI)) |
1320 | return nullptr; |
1321 | |
1322 | MachineFunction &MF = *MI.getParent()->getParent(); |
1323 | // All instructions input are two-addr instructions. Get the known operands. |
1324 | const MachineOperand &Dest = MI.getOperand(0); |
1325 | const MachineOperand &Src = MI.getOperand(1); |
1326 | |
1327 | // Ideally, operations with undef should be folded before we get here, but we |
1328 | // can't guarantee it. Bail out because optimizing undefs is a waste of time. |
1329 | // Without this, we have to forward undef state to new register operands to |
1330 | // avoid machine verifier errors. |
1331 | if (Src.isUndef()) |
1332 | return nullptr; |
1333 | if (MI.getNumOperands() > 2) |
1334 | if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef()) |
1335 | return nullptr; |
1336 | |
1337 | MachineInstr *NewMI = nullptr; |
1338 | Register SrcReg, SrcReg2; |
1339 | bool Is64Bit = Subtarget.is64Bit(); |
1340 | |
1341 | bool Is8BitOp = false; |
1342 | unsigned NumRegOperands = 2; |
1343 | unsigned MIOpc = MI.getOpcode(); |
1344 | switch (MIOpc) { |
1345 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1345); |
1346 | case X86::SHL64ri: { |
1347 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown shift instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1347, __extension__ __PRETTY_FUNCTION__)); |
1348 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1349 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
1350 | |
1351 | // LEA can't handle RSP. |
1352 | if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass( |
1353 | Src.getReg(), &X86::GR64_NOSPRegClass)) |
1354 | return nullptr; |
1355 | |
1356 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) |
1357 | .add(Dest) |
1358 | .addReg(0) |
1359 | .addImm(1LL << ShAmt) |
1360 | .add(Src) |
1361 | .addImm(0) |
1362 | .addReg(0); |
1363 | break; |
1364 | } |
1365 | case X86::SHL32ri: { |
1366 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown shift instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1366, __extension__ __PRETTY_FUNCTION__)); |
1367 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1368 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
1369 | |
1370 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1371 | |
1372 | // LEA can't handle ESP. |
1373 | bool isKill; |
1374 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1375 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, |
1376 | ImplicitOp, LV, LIS)) |
1377 | return nullptr; |
1378 | |
1379 | MachineInstrBuilder MIB = |
1380 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1381 | .add(Dest) |
1382 | .addReg(0) |
1383 | .addImm(1LL << ShAmt) |
1384 | .addReg(SrcReg, getKillRegState(isKill)) |
1385 | .addImm(0) |
1386 | .addReg(0); |
1387 | if (ImplicitOp.getReg() != 0) |
1388 | MIB.add(ImplicitOp); |
1389 | NewMI = MIB; |
1390 | |
1391 | // Add kills if classifyLEAReg created a new register. |
1392 | if (LV && SrcReg != Src.getReg()) |
1393 | LV->getVarInfo(SrcReg).Kills.push_back(NewMI); |
1394 | break; |
1395 | } |
1396 | case X86::SHL8ri: |
1397 | Is8BitOp = true; |
1398 | [[fallthrough]]; |
1399 | case X86::SHL16ri: { |
1400 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown shift instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown shift instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1400, __extension__ __PRETTY_FUNCTION__)); |
1401 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1402 | if (!isTruncatedShiftCountForLEA(ShAmt)) |
1403 | return nullptr; |
1404 | return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp); |
1405 | } |
1406 | case X86::INC64r: |
1407 | case X86::INC32r: { |
1408 | assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!")(static_cast <bool> (MI.getNumOperands() >= 2 && "Unknown inc instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 2 && \"Unknown inc instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1408, __extension__ __PRETTY_FUNCTION__)); |
1409 | unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : |
1410 | (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
1411 | bool isKill; |
1412 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1413 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, |
1414 | ImplicitOp, LV, LIS)) |
1415 | return nullptr; |
1416 | |
1417 | MachineInstrBuilder MIB = |
1418 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1419 | .add(Dest) |
1420 | .addReg(SrcReg, getKillRegState(isKill)); |
1421 | if (ImplicitOp.getReg() != 0) |
1422 | MIB.add(ImplicitOp); |
1423 | |
1424 | NewMI = addOffset(MIB, 1); |
1425 | |
1426 | // Add kills if classifyLEAReg created a new register. |
1427 | if (LV && SrcReg != Src.getReg()) |
1428 | LV->getVarInfo(SrcReg).Kills.push_back(NewMI); |
1429 | break; |
1430 | } |
1431 | case X86::DEC64r: |
1432 | case X86::DEC32r: { |
1433 | assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!")(static_cast <bool> (MI.getNumOperands() >= 2 && "Unknown dec instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 2 && \"Unknown dec instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1433, __extension__ __PRETTY_FUNCTION__)); |
1434 | unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r |
1435 | : (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
1436 | |
1437 | bool isKill; |
1438 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1439 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, isKill, |
1440 | ImplicitOp, LV, LIS)) |
1441 | return nullptr; |
1442 | |
1443 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1444 | .add(Dest) |
1445 | .addReg(SrcReg, getKillRegState(isKill)); |
1446 | if (ImplicitOp.getReg() != 0) |
1447 | MIB.add(ImplicitOp); |
1448 | |
1449 | NewMI = addOffset(MIB, -1); |
1450 | |
1451 | // Add kills if classifyLEAReg created a new register. |
1452 | if (LV && SrcReg != Src.getReg()) |
1453 | LV->getVarInfo(SrcReg).Kills.push_back(NewMI); |
1454 | break; |
1455 | } |
1456 | case X86::DEC8r: |
1457 | case X86::INC8r: |
1458 | Is8BitOp = true; |
1459 | [[fallthrough]]; |
1460 | case X86::DEC16r: |
1461 | case X86::INC16r: |
1462 | return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp); |
1463 | case X86::ADD64rr: |
1464 | case X86::ADD64rr_DB: |
1465 | case X86::ADD32rr: |
1466 | case X86::ADD32rr_DB: { |
1467 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown add instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1467, __extension__ __PRETTY_FUNCTION__)); |
1468 | unsigned Opc; |
1469 | if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) |
1470 | Opc = X86::LEA64r; |
1471 | else |
1472 | Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1473 | |
1474 | const MachineOperand &Src2 = MI.getOperand(2); |
1475 | bool isKill2; |
1476 | MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); |
1477 | if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/false, SrcReg2, isKill2, |
1478 | ImplicitOp2, LV, LIS)) |
1479 | return nullptr; |
1480 | |
1481 | bool isKill; |
1482 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1483 | if (Src.getReg() == Src2.getReg()) { |
1484 | // Don't call classify LEAReg a second time on the same register, in case |
1485 | // the first call inserted a COPY from Src2 and marked it as killed. |
1486 | isKill = isKill2; |
1487 | SrcReg = SrcReg2; |
1488 | } else { |
1489 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, |
1490 | ImplicitOp, LV, LIS)) |
1491 | return nullptr; |
1492 | } |
1493 | |
1494 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); |
1495 | if (ImplicitOp.getReg() != 0) |
1496 | MIB.add(ImplicitOp); |
1497 | if (ImplicitOp2.getReg() != 0) |
1498 | MIB.add(ImplicitOp2); |
1499 | |
1500 | NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); |
1501 | |
1502 | // Add kills if classifyLEAReg created a new register. |
1503 | if (LV) { |
1504 | if (SrcReg2 != Src2.getReg()) |
1505 | LV->getVarInfo(SrcReg2).Kills.push_back(NewMI); |
1506 | if (SrcReg != SrcReg2 && SrcReg != Src.getReg()) |
1507 | LV->getVarInfo(SrcReg).Kills.push_back(NewMI); |
1508 | } |
1509 | NumRegOperands = 3; |
1510 | break; |
1511 | } |
1512 | case X86::ADD8rr: |
1513 | case X86::ADD8rr_DB: |
1514 | Is8BitOp = true; |
1515 | [[fallthrough]]; |
1516 | case X86::ADD16rr: |
1517 | case X86::ADD16rr_DB: |
1518 | return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp); |
1519 | case X86::ADD64ri32: |
1520 | case X86::ADD64ri8: |
1521 | case X86::ADD64ri32_DB: |
1522 | case X86::ADD64ri8_DB: |
1523 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown add instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1523, __extension__ __PRETTY_FUNCTION__)); |
1524 | NewMI = addOffset( |
1525 | BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), |
1526 | MI.getOperand(2)); |
1527 | break; |
1528 | case X86::ADD32ri: |
1529 | case X86::ADD32ri8: |
1530 | case X86::ADD32ri_DB: |
1531 | case X86::ADD32ri8_DB: { |
1532 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown add instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1532, __extension__ __PRETTY_FUNCTION__)); |
1533 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1534 | |
1535 | bool isKill; |
1536 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1537 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, |
1538 | ImplicitOp, LV, LIS)) |
1539 | return nullptr; |
1540 | |
1541 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1542 | .add(Dest) |
1543 | .addReg(SrcReg, getKillRegState(isKill)); |
1544 | if (ImplicitOp.getReg() != 0) |
1545 | MIB.add(ImplicitOp); |
1546 | |
1547 | NewMI = addOffset(MIB, MI.getOperand(2)); |
1548 | |
1549 | // Add kills if classifyLEAReg created a new register. |
1550 | if (LV && SrcReg != Src.getReg()) |
1551 | LV->getVarInfo(SrcReg).Kills.push_back(NewMI); |
1552 | break; |
1553 | } |
1554 | case X86::ADD8ri: |
1555 | case X86::ADD8ri_DB: |
1556 | Is8BitOp = true; |
1557 | [[fallthrough]]; |
1558 | case X86::ADD16ri: |
1559 | case X86::ADD16ri8: |
1560 | case X86::ADD16ri_DB: |
1561 | case X86::ADD16ri8_DB: |
1562 | return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp); |
1563 | case X86::SUB8ri: |
1564 | case X86::SUB16ri8: |
1565 | case X86::SUB16ri: |
1566 | /// FIXME: Support these similar to ADD8ri/ADD16ri*. |
1567 | return nullptr; |
1568 | case X86::SUB32ri8: |
1569 | case X86::SUB32ri: { |
1570 | if (!MI.getOperand(2).isImm()) |
1571 | return nullptr; |
1572 | int64_t Imm = MI.getOperand(2).getImm(); |
1573 | if (!isInt<32>(-Imm)) |
1574 | return nullptr; |
1575 | |
1576 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown add instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown add instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1576, __extension__ __PRETTY_FUNCTION__)); |
1577 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1578 | |
1579 | bool isKill; |
1580 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1581 | if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, isKill, |
1582 | ImplicitOp, LV, LIS)) |
1583 | return nullptr; |
1584 | |
1585 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1586 | .add(Dest) |
1587 | .addReg(SrcReg, getKillRegState(isKill)); |
1588 | if (ImplicitOp.getReg() != 0) |
1589 | MIB.add(ImplicitOp); |
1590 | |
1591 | NewMI = addOffset(MIB, -Imm); |
1592 | break; |
1593 | } |
1594 | |
1595 | case X86::SUB64ri8: |
1596 | case X86::SUB64ri32: { |
1597 | if (!MI.getOperand(2).isImm()) |
1598 | return nullptr; |
1599 | int64_t Imm = MI.getOperand(2).getImm(); |
1600 | if (!isInt<32>(-Imm)) |
1601 | return nullptr; |
1602 | |
1603 | assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!")(static_cast <bool> (MI.getNumOperands() >= 3 && "Unknown sub instruction!") ? void (0) : __assert_fail ("MI.getNumOperands() >= 3 && \"Unknown sub instruction!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1603, __extension__ __PRETTY_FUNCTION__)); |
1604 | |
1605 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), |
1606 | get(X86::LEA64r)).add(Dest).add(Src); |
1607 | NewMI = addOffset(MIB, -Imm); |
1608 | break; |
1609 | } |
1610 | |
1611 | case X86::VMOVDQU8Z128rmk: |
1612 | case X86::VMOVDQU8Z256rmk: |
1613 | case X86::VMOVDQU8Zrmk: |
1614 | case X86::VMOVDQU16Z128rmk: |
1615 | case X86::VMOVDQU16Z256rmk: |
1616 | case X86::VMOVDQU16Zrmk: |
1617 | case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk: |
1618 | case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk: |
1619 | case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk: |
1620 | case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk: |
1621 | case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk: |
1622 | case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk: |
1623 | case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk: |
1624 | case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk: |
1625 | case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk: |
1626 | case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk: |
1627 | case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk: |
1628 | case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: |
1629 | case X86::VBROADCASTSDZ256rmk: |
1630 | case X86::VBROADCASTSDZrmk: |
1631 | case X86::VBROADCASTSSZ128rmk: |
1632 | case X86::VBROADCASTSSZ256rmk: |
1633 | case X86::VBROADCASTSSZrmk: |
1634 | case X86::VPBROADCASTDZ128rmk: |
1635 | case X86::VPBROADCASTDZ256rmk: |
1636 | case X86::VPBROADCASTDZrmk: |
1637 | case X86::VPBROADCASTQZ128rmk: |
1638 | case X86::VPBROADCASTQZ256rmk: |
1639 | case X86::VPBROADCASTQZrmk: { |
1640 | unsigned Opc; |
1641 | switch (MIOpc) { |
1642 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1642); |
1643 | case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break; |
1644 | case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break; |
1645 | case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break; |
1646 | case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break; |
1647 | case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break; |
1648 | case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break; |
1649 | case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1650 | case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1651 | case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1652 | case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1653 | case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1654 | case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1655 | case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1656 | case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1657 | case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1658 | case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1659 | case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1660 | case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1661 | case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1662 | case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1663 | case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1664 | case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1665 | case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1666 | case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1667 | case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1668 | case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1669 | case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1670 | case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1671 | case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1672 | case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1673 | case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break; |
1674 | case X86::VBROADCASTSDZrmk: Opc = X86::VBLENDMPDZrmbk; break; |
1675 | case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break; |
1676 | case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break; |
1677 | case X86::VBROADCASTSSZrmk: Opc = X86::VBLENDMPSZrmbk; break; |
1678 | case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break; |
1679 | case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break; |
1680 | case X86::VPBROADCASTDZrmk: Opc = X86::VPBLENDMDZrmbk; break; |
1681 | case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break; |
1682 | case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break; |
1683 | case X86::VPBROADCASTQZrmk: Opc = X86::VPBLENDMQZrmbk; break; |
1684 | } |
1685 | |
1686 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1687 | .add(Dest) |
1688 | .add(MI.getOperand(2)) |
1689 | .add(Src) |
1690 | .add(MI.getOperand(3)) |
1691 | .add(MI.getOperand(4)) |
1692 | .add(MI.getOperand(5)) |
1693 | .add(MI.getOperand(6)) |
1694 | .add(MI.getOperand(7)); |
1695 | break; |
1696 | } |
1697 | |
1698 | case X86::VMOVDQU8Z128rrk: |
1699 | case X86::VMOVDQU8Z256rrk: |
1700 | case X86::VMOVDQU8Zrrk: |
1701 | case X86::VMOVDQU16Z128rrk: |
1702 | case X86::VMOVDQU16Z256rrk: |
1703 | case X86::VMOVDQU16Zrrk: |
1704 | case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk: |
1705 | case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk: |
1706 | case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk: |
1707 | case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk: |
1708 | case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk: |
1709 | case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk: |
1710 | case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk: |
1711 | case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk: |
1712 | case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk: |
1713 | case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk: |
1714 | case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk: |
1715 | case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: { |
1716 | unsigned Opc; |
1717 | switch (MIOpc) { |
1718 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1718); |
1719 | case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break; |
1720 | case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break; |
1721 | case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break; |
1722 | case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break; |
1723 | case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break; |
1724 | case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break; |
1725 | case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1726 | case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1727 | case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1728 | case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1729 | case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1730 | case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1731 | case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1732 | case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1733 | case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1734 | case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1735 | case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1736 | case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1737 | case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1738 | case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1739 | case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1740 | case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1741 | case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1742 | case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1743 | case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1744 | case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1745 | case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1746 | case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1747 | case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1748 | case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1749 | } |
1750 | |
1751 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1752 | .add(Dest) |
1753 | .add(MI.getOperand(2)) |
1754 | .add(Src) |
1755 | .add(MI.getOperand(3)); |
1756 | NumRegOperands = 4; |
1757 | break; |
1758 | } |
1759 | } |
1760 | |
1761 | if (!NewMI) return nullptr; |
1762 | |
1763 | if (LV) { // Update live variables |
1764 | for (unsigned I = 0; I < NumRegOperands; ++I) { |
1765 | MachineOperand &Op = MI.getOperand(I); |
1766 | if (Op.isDead() || Op.isKill()) |
1767 | LV->replaceKillInstruction(Op.getReg(), MI, *NewMI); |
1768 | } |
1769 | } |
1770 | |
1771 | MachineBasicBlock &MBB = *MI.getParent(); |
1772 | MBB.insert(MI.getIterator(), NewMI); // Insert the new inst |
1773 | |
1774 | if (LIS) { |
1775 | LIS->ReplaceMachineInstrInMaps(MI, *NewMI); |
1776 | if (SrcReg) |
1777 | LIS->getInterval(SrcReg); |
1778 | if (SrcReg2) |
1779 | LIS->getInterval(SrcReg2); |
1780 | } |
1781 | |
1782 | return NewMI; |
1783 | } |
1784 | |
1785 | /// This determines which of three possible cases of a three source commute |
1786 | /// the source indexes correspond to taking into account any mask operands. |
1787 | /// All prevents commuting a passthru operand. Returns -1 if the commute isn't |
1788 | /// possible. |
1789 | /// Case 0 - Possible to commute the first and second operands. |
1790 | /// Case 1 - Possible to commute the first and third operands. |
1791 | /// Case 2 - Possible to commute the second and third operands. |
1792 | static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, |
1793 | unsigned SrcOpIdx2) { |
1794 | // Put the lowest index to SrcOpIdx1 to simplify the checks below. |
1795 | if (SrcOpIdx1 > SrcOpIdx2) |
1796 | std::swap(SrcOpIdx1, SrcOpIdx2); |
1797 | |
1798 | unsigned Op1 = 1, Op2 = 2, Op3 = 3; |
1799 | if (X86II::isKMasked(TSFlags)) { |
1800 | Op2++; |
1801 | Op3++; |
1802 | } |
1803 | |
1804 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2) |
1805 | return 0; |
1806 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3) |
1807 | return 1; |
1808 | if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3) |
1809 | return 2; |
1810 | llvm_unreachable("Unknown three src commute case.")::llvm::llvm_unreachable_internal("Unknown three src commute case." , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1810); |
1811 | } |
1812 | |
1813 | unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands( |
1814 | const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, |
1815 | const X86InstrFMA3Group &FMA3Group) const { |
1816 | |
1817 | unsigned Opc = MI.getOpcode(); |
1818 | |
1819 | // TODO: Commuting the 1st operand of FMA*_Int requires some additional |
1820 | // analysis. The commute optimization is legal only if all users of FMA*_Int |
1821 | // use only the lowest element of the FMA*_Int instruction. Such analysis are |
1822 | // not implemented yet. So, just return 0 in that case. |
1823 | // When such analysis are available this place will be the right place for |
1824 | // calling it. |
1825 | assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&(static_cast <bool> (!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && "Intrinsic instructions can't commute operand 1" ) ? void (0) : __assert_fail ("!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && \"Intrinsic instructions can't commute operand 1\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1826, __extension__ __PRETTY_FUNCTION__)) |
1826 | "Intrinsic instructions can't commute operand 1")(static_cast <bool> (!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && "Intrinsic instructions can't commute operand 1" ) ? void (0) : __assert_fail ("!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && \"Intrinsic instructions can't commute operand 1\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1826, __extension__ __PRETTY_FUNCTION__)); |
1827 | |
1828 | // Determine which case this commute is or if it can't be done. |
1829 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1830 | SrcOpIdx2); |
1831 | assert(Case < 3 && "Unexpected case number!")(static_cast <bool> (Case < 3 && "Unexpected case number!" ) ? void (0) : __assert_fail ("Case < 3 && \"Unexpected case number!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1831, __extension__ __PRETTY_FUNCTION__)); |
1832 | |
1833 | // Define the FMA forms mapping array that helps to map input FMA form |
1834 | // to output FMA form to preserve the operation semantics after |
1835 | // commuting the operands. |
1836 | const unsigned Form132Index = 0; |
1837 | const unsigned Form213Index = 1; |
1838 | const unsigned Form231Index = 2; |
1839 | static const unsigned FormMapping[][3] = { |
1840 | // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2; |
1841 | // FMA132 A, C, b; ==> FMA231 C, A, b; |
1842 | // FMA213 B, A, c; ==> FMA213 A, B, c; |
1843 | // FMA231 C, A, b; ==> FMA132 A, C, b; |
1844 | { Form231Index, Form213Index, Form132Index }, |
1845 | // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3; |
1846 | // FMA132 A, c, B; ==> FMA132 B, c, A; |
1847 | // FMA213 B, a, C; ==> FMA231 C, a, B; |
1848 | // FMA231 C, a, B; ==> FMA213 B, a, C; |
1849 | { Form132Index, Form231Index, Form213Index }, |
1850 | // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3; |
1851 | // FMA132 a, C, B; ==> FMA213 a, B, C; |
1852 | // FMA213 b, A, C; ==> FMA132 b, C, A; |
1853 | // FMA231 c, A, B; ==> FMA231 c, B, A; |
1854 | { Form213Index, Form132Index, Form231Index } |
1855 | }; |
1856 | |
1857 | unsigned FMAForms[3]; |
1858 | FMAForms[0] = FMA3Group.get132Opcode(); |
1859 | FMAForms[1] = FMA3Group.get213Opcode(); |
1860 | FMAForms[2] = FMA3Group.get231Opcode(); |
1861 | |
1862 | // Everything is ready, just adjust the FMA opcode and return it. |
1863 | for (unsigned FormIndex = 0; FormIndex < 3; FormIndex++) |
1864 | if (Opc == FMAForms[FormIndex]) |
1865 | return FMAForms[FormMapping[Case][FormIndex]]; |
1866 | |
1867 | llvm_unreachable("Illegal FMA3 format")::llvm::llvm_unreachable_internal("Illegal FMA3 format", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1867); |
1868 | } |
1869 | |
1870 | static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, |
1871 | unsigned SrcOpIdx2) { |
1872 | // Determine which case this commute is or if it can't be done. |
1873 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1874 | SrcOpIdx2); |
1875 | assert(Case < 3 && "Unexpected case value!")(static_cast <bool> (Case < 3 && "Unexpected case value!" ) ? void (0) : __assert_fail ("Case < 3 && \"Unexpected case value!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 1875, __extension__ __PRETTY_FUNCTION__)); |
1876 | |
1877 | // For each case we need to swap two pairs of bits in the final immediate. |
1878 | static const uint8_t SwapMasks[3][4] = { |
1879 | { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5. |
1880 | { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6. |
1881 | { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6. |
1882 | }; |
1883 | |
1884 | uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm(); |
1885 | // Clear out the bits we are swapping. |
1886 | uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] | |
1887 | SwapMasks[Case][2] | SwapMasks[Case][3]); |
1888 | // If the immediate had a bit of the pair set, then set the opposite bit. |
1889 | if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1]; |
1890 | if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0]; |
1891 | if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3]; |
1892 | if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2]; |
1893 | MI.getOperand(MI.getNumOperands()-1).setImm(NewImm); |
1894 | } |
1895 | |
1896 | // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be |
1897 | // commuted. |
1898 | static bool isCommutableVPERMV3Instruction(unsigned Opcode) { |
1899 | #define VPERM_CASES(Suffix) \ |
1900 | case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \ |
1901 | case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \ |
1902 | case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \ |
1903 | case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \ |
1904 | case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \ |
1905 | case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \ |
1906 | case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \ |
1907 | case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \ |
1908 | case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \ |
1909 | case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \ |
1910 | case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \ |
1911 | case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz: |
1912 | |
1913 | #define VPERM_CASES_BROADCAST(Suffix) \ |
1914 | VPERM_CASES(Suffix) \ |
1915 | case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \ |
1916 | case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \ |
1917 | case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \ |
1918 | case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \ |
1919 | case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \ |
1920 | case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz: |
1921 | |
1922 | switch (Opcode) { |
1923 | default: return false; |
1924 | VPERM_CASES(B) |
1925 | VPERM_CASES_BROADCAST(D) |
1926 | VPERM_CASES_BROADCAST(PD) |
1927 | VPERM_CASES_BROADCAST(PS) |
1928 | VPERM_CASES_BROADCAST(Q) |
1929 | VPERM_CASES(W) |
1930 | return true; |
1931 | } |
1932 | #undef VPERM_CASES_BROADCAST |
1933 | #undef VPERM_CASES |
1934 | } |
1935 | |
1936 | // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching |
1937 | // from the I opcode to the T opcode and vice versa. |
1938 | static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { |
1939 | #define VPERM_CASES(Orig, New) \ |
1940 | case X86::Orig##128rr: return X86::New##128rr; \ |
1941 | case X86::Orig##128rrkz: return X86::New##128rrkz; \ |
1942 | case X86::Orig##128rm: return X86::New##128rm; \ |
1943 | case X86::Orig##128rmkz: return X86::New##128rmkz; \ |
1944 | case X86::Orig##256rr: return X86::New##256rr; \ |
1945 | case X86::Orig##256rrkz: return X86::New##256rrkz; \ |
1946 | case X86::Orig##256rm: return X86::New##256rm; \ |
1947 | case X86::Orig##256rmkz: return X86::New##256rmkz; \ |
1948 | case X86::Orig##rr: return X86::New##rr; \ |
1949 | case X86::Orig##rrkz: return X86::New##rrkz; \ |
1950 | case X86::Orig##rm: return X86::New##rm; \ |
1951 | case X86::Orig##rmkz: return X86::New##rmkz; |
1952 | |
1953 | #define VPERM_CASES_BROADCAST(Orig, New) \ |
1954 | VPERM_CASES(Orig, New) \ |
1955 | case X86::Orig##128rmb: return X86::New##128rmb; \ |
1956 | case X86::Orig##128rmbkz: return X86::New##128rmbkz; \ |
1957 | case X86::Orig##256rmb: return X86::New##256rmb; \ |
1958 | case X86::Orig##256rmbkz: return X86::New##256rmbkz; \ |
1959 | case X86::Orig##rmb: return X86::New##rmb; \ |
1960 | case X86::Orig##rmbkz: return X86::New##rmbkz; |
1961 | |
1962 | switch (Opcode) { |
1963 | VPERM_CASES(VPERMI2B, VPERMT2B) |
1964 | VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D) |
1965 | VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD) |
1966 | VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS) |
1967 | VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q) |
1968 | VPERM_CASES(VPERMI2W, VPERMT2W) |
1969 | VPERM_CASES(VPERMT2B, VPERMI2B) |
1970 | VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D) |
1971 | VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD) |
1972 | VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS) |
1973 | VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q) |
1974 | VPERM_CASES(VPERMT2W, VPERMI2W) |
1975 | } |
1976 | |
1977 | llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 1977); |
1978 | #undef VPERM_CASES_BROADCAST |
1979 | #undef VPERM_CASES |
1980 | } |
1981 | |
1982 | MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
1983 | unsigned OpIdx1, |
1984 | unsigned OpIdx2) const { |
1985 | auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { |
1986 | if (NewMI) |
1987 | return *MI.getParent()->getParent()->CloneMachineInstr(&MI); |
1988 | return MI; |
1989 | }; |
1990 | |
1991 | switch (MI.getOpcode()) { |
1992 | case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) |
1993 | case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) |
1994 | case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) |
1995 | case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) |
1996 | case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) |
1997 | case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) |
1998 | unsigned Opc; |
1999 | unsigned Size; |
2000 | switch (MI.getOpcode()) { |
2001 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2001); |
2002 | case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; |
2003 | case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; |
2004 | case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; |
2005 | case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; |
2006 | case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; |
2007 | case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; |
2008 | } |
2009 | unsigned Amt = MI.getOperand(3).getImm(); |
2010 | auto &WorkingMI = cloneIfNew(MI); |
2011 | WorkingMI.setDesc(get(Opc)); |
2012 | WorkingMI.getOperand(3).setImm(Size - Amt); |
2013 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2014 | OpIdx1, OpIdx2); |
2015 | } |
2016 | case X86::PFSUBrr: |
2017 | case X86::PFSUBRrr: { |
2018 | // PFSUB x, y: x = x - y |
2019 | // PFSUBR x, y: x = y - x |
2020 | unsigned Opc = |
2021 | (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr); |
2022 | auto &WorkingMI = cloneIfNew(MI); |
2023 | WorkingMI.setDesc(get(Opc)); |
2024 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2025 | OpIdx1, OpIdx2); |
2026 | } |
2027 | case X86::BLENDPDrri: |
2028 | case X86::BLENDPSrri: |
2029 | case X86::VBLENDPDrri: |
2030 | case X86::VBLENDPSrri: |
2031 | // If we're optimizing for size, try to use MOVSD/MOVSS. |
2032 | if (MI.getParent()->getParent()->getFunction().hasOptSize()) { |
2033 | unsigned Mask, Opc; |
2034 | switch (MI.getOpcode()) { |
2035 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2035); |
2036 | case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break; |
2037 | case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break; |
2038 | case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break; |
2039 | case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break; |
2040 | } |
2041 | if ((MI.getOperand(3).getImm() ^ Mask) == 1) { |
2042 | auto &WorkingMI = cloneIfNew(MI); |
2043 | WorkingMI.setDesc(get(Opc)); |
2044 | WorkingMI.removeOperand(3); |
2045 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, |
2046 | /*NewMI=*/false, |
2047 | OpIdx1, OpIdx2); |
2048 | } |
2049 | } |
2050 | [[fallthrough]]; |
2051 | case X86::PBLENDWrri: |
2052 | case X86::VBLENDPDYrri: |
2053 | case X86::VBLENDPSYrri: |
2054 | case X86::VPBLENDDrri: |
2055 | case X86::VPBLENDWrri: |
2056 | case X86::VPBLENDDYrri: |
2057 | case X86::VPBLENDWYrri:{ |
2058 | int8_t Mask; |
2059 | switch (MI.getOpcode()) { |
2060 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2060); |
2061 | case X86::BLENDPDrri: Mask = (int8_t)0x03; break; |
2062 | case X86::BLENDPSrri: Mask = (int8_t)0x0F; break; |
2063 | case X86::PBLENDWrri: Mask = (int8_t)0xFF; break; |
2064 | case X86::VBLENDPDrri: Mask = (int8_t)0x03; break; |
2065 | case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break; |
2066 | case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break; |
2067 | case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break; |
2068 | case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break; |
2069 | case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break; |
2070 | case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break; |
2071 | case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break; |
2072 | } |
2073 | // Only the least significant bits of Imm are used. |
2074 | // Using int8_t to ensure it will be sign extended to the int64_t that |
2075 | // setImm takes in order to match isel behavior. |
2076 | int8_t Imm = MI.getOperand(3).getImm() & Mask; |
2077 | auto &WorkingMI = cloneIfNew(MI); |
2078 | WorkingMI.getOperand(3).setImm(Mask ^ Imm); |
2079 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2080 | OpIdx1, OpIdx2); |
2081 | } |
2082 | case X86::INSERTPSrr: |
2083 | case X86::VINSERTPSrr: |
2084 | case X86::VINSERTPSZrr: { |
2085 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
2086 | unsigned ZMask = Imm & 15; |
2087 | unsigned DstIdx = (Imm >> 4) & 3; |
2088 | unsigned SrcIdx = (Imm >> 6) & 3; |
2089 | |
2090 | // We can commute insertps if we zero 2 of the elements, the insertion is |
2091 | // "inline" and we don't override the insertion with a zero. |
2092 | if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && |
2093 | countPopulation(ZMask) == 2) { |
2094 | unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15); |
2095 | assert(AltIdx < 4 && "Illegal insertion index")(static_cast <bool> (AltIdx < 4 && "Illegal insertion index" ) ? void (0) : __assert_fail ("AltIdx < 4 && \"Illegal insertion index\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2095, __extension__ __PRETTY_FUNCTION__)); |
2096 | unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask; |
2097 | auto &WorkingMI = cloneIfNew(MI); |
2098 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm); |
2099 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2100 | OpIdx1, OpIdx2); |
2101 | } |
2102 | return nullptr; |
2103 | } |
2104 | case X86::MOVSDrr: |
2105 | case X86::MOVSSrr: |
2106 | case X86::VMOVSDrr: |
2107 | case X86::VMOVSSrr:{ |
2108 | // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD. |
2109 | if (Subtarget.hasSSE41()) { |
2110 | unsigned Mask, Opc; |
2111 | switch (MI.getOpcode()) { |
2112 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2112); |
2113 | case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break; |
2114 | case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break; |
2115 | case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break; |
2116 | case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break; |
2117 | } |
2118 | |
2119 | auto &WorkingMI = cloneIfNew(MI); |
2120 | WorkingMI.setDesc(get(Opc)); |
2121 | WorkingMI.addOperand(MachineOperand::CreateImm(Mask)); |
2122 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2123 | OpIdx1, OpIdx2); |
2124 | } |
2125 | |
2126 | // Convert to SHUFPD. |
2127 | assert(MI.getOpcode() == X86::MOVSDrr &&(static_cast <bool> (MI.getOpcode() == X86::MOVSDrr && "Can only commute MOVSDrr without SSE4.1") ? void (0) : __assert_fail ("MI.getOpcode() == X86::MOVSDrr && \"Can only commute MOVSDrr without SSE4.1\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2128, __extension__ __PRETTY_FUNCTION__)) |
2128 | "Can only commute MOVSDrr without SSE4.1")(static_cast <bool> (MI.getOpcode() == X86::MOVSDrr && "Can only commute MOVSDrr without SSE4.1") ? void (0) : __assert_fail ("MI.getOpcode() == X86::MOVSDrr && \"Can only commute MOVSDrr without SSE4.1\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2128, __extension__ __PRETTY_FUNCTION__)); |
2129 | |
2130 | auto &WorkingMI = cloneIfNew(MI); |
2131 | WorkingMI.setDesc(get(X86::SHUFPDrri)); |
2132 | WorkingMI.addOperand(MachineOperand::CreateImm(0x02)); |
2133 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2134 | OpIdx1, OpIdx2); |
2135 | } |
2136 | case X86::SHUFPDrri: { |
2137 | // Commute to MOVSD. |
2138 | assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!")(static_cast <bool> (MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!") ? void (0) : __assert_fail ("MI.getOperand(3).getImm() == 0x02 && \"Unexpected immediate!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2138, __extension__ __PRETTY_FUNCTION__)); |
2139 | auto &WorkingMI = cloneIfNew(MI); |
2140 | WorkingMI.setDesc(get(X86::MOVSDrr)); |
2141 | WorkingMI.removeOperand(3); |
2142 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2143 | OpIdx1, OpIdx2); |
2144 | } |
2145 | case X86::PCLMULQDQrr: |
2146 | case X86::VPCLMULQDQrr: |
2147 | case X86::VPCLMULQDQYrr: |
2148 | case X86::VPCLMULQDQZrr: |
2149 | case X86::VPCLMULQDQZ128rr: |
2150 | case X86::VPCLMULQDQZ256rr: { |
2151 | // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] |
2152 | // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] |
2153 | unsigned Imm = MI.getOperand(3).getImm(); |
2154 | unsigned Src1Hi = Imm & 0x01; |
2155 | unsigned Src2Hi = Imm & 0x10; |
2156 | auto &WorkingMI = cloneIfNew(MI); |
2157 | WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); |
2158 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2159 | OpIdx1, OpIdx2); |
2160 | } |
2161 | case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri: |
2162 | case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri: |
2163 | case X86::VPCMPBZrri: case X86::VPCMPUBZrri: |
2164 | case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri: |
2165 | case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri: |
2166 | case X86::VPCMPDZrri: case X86::VPCMPUDZrri: |
2167 | case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri: |
2168 | case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri: |
2169 | case X86::VPCMPQZrri: case X86::VPCMPUQZrri: |
2170 | case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri: |
2171 | case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri: |
2172 | case X86::VPCMPWZrri: case X86::VPCMPUWZrri: |
2173 | case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik: |
2174 | case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik: |
2175 | case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik: |
2176 | case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik: |
2177 | case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik: |
2178 | case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik: |
2179 | case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik: |
2180 | case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik: |
2181 | case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik: |
2182 | case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik: |
2183 | case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik: |
2184 | case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: { |
2185 | // Flip comparison mode immediate (if necessary). |
2186 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7; |
2187 | Imm = X86::getSwappedVPCMPImm(Imm); |
2188 | auto &WorkingMI = cloneIfNew(MI); |
2189 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm); |
2190 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2191 | OpIdx1, OpIdx2); |
2192 | } |
2193 | case X86::VPCOMBri: case X86::VPCOMUBri: |
2194 | case X86::VPCOMDri: case X86::VPCOMUDri: |
2195 | case X86::VPCOMQri: case X86::VPCOMUQri: |
2196 | case X86::VPCOMWri: case X86::VPCOMUWri: { |
2197 | // Flip comparison mode immediate (if necessary). |
2198 | unsigned Imm = MI.getOperand(3).getImm() & 0x7; |
2199 | Imm = X86::getSwappedVPCOMImm(Imm); |
2200 | auto &WorkingMI = cloneIfNew(MI); |
2201 | WorkingMI.getOperand(3).setImm(Imm); |
2202 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2203 | OpIdx1, OpIdx2); |
2204 | } |
2205 | case X86::VCMPSDZrr: |
2206 | case X86::VCMPSSZrr: |
2207 | case X86::VCMPPDZrri: |
2208 | case X86::VCMPPSZrri: |
2209 | case X86::VCMPSHZrr: |
2210 | case X86::VCMPPHZrri: |
2211 | case X86::VCMPPHZ128rri: |
2212 | case X86::VCMPPHZ256rri: |
2213 | case X86::VCMPPDZ128rri: |
2214 | case X86::VCMPPSZ128rri: |
2215 | case X86::VCMPPDZ256rri: |
2216 | case X86::VCMPPSZ256rri: |
2217 | case X86::VCMPPDZrrik: |
2218 | case X86::VCMPPSZrrik: |
2219 | case X86::VCMPPDZ128rrik: |
2220 | case X86::VCMPPSZ128rrik: |
2221 | case X86::VCMPPDZ256rrik: |
2222 | case X86::VCMPPSZ256rrik: { |
2223 | unsigned Imm = |
2224 | MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f; |
2225 | Imm = X86::getSwappedVCMPImm(Imm); |
2226 | auto &WorkingMI = cloneIfNew(MI); |
2227 | WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm); |
2228 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2229 | OpIdx1, OpIdx2); |
2230 | } |
2231 | case X86::VPERM2F128rr: |
2232 | case X86::VPERM2I128rr: { |
2233 | // Flip permute source immediate. |
2234 | // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi. |
2235 | // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi. |
2236 | int8_t Imm = MI.getOperand(3).getImm() & 0xFF; |
2237 | auto &WorkingMI = cloneIfNew(MI); |
2238 | WorkingMI.getOperand(3).setImm(Imm ^ 0x22); |
2239 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2240 | OpIdx1, OpIdx2); |
2241 | } |
2242 | case X86::MOVHLPSrr: |
2243 | case X86::UNPCKHPDrr: |
2244 | case X86::VMOVHLPSrr: |
2245 | case X86::VUNPCKHPDrr: |
2246 | case X86::VMOVHLPSZrr: |
2247 | case X86::VUNPCKHPDZ128rr: { |
2248 | assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!")(static_cast <bool> (Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!" ) ? void (0) : __assert_fail ("Subtarget.hasSSE2() && \"Commuting MOVHLP/UNPCKHPD requires SSE2!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2248, __extension__ __PRETTY_FUNCTION__)); |
2249 | |
2250 | unsigned Opc = MI.getOpcode(); |
2251 | switch (Opc) { |
2252 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2252); |
2253 | case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; |
2254 | case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; |
2255 | case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break; |
2256 | case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break; |
2257 | case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break; |
2258 | case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break; |
2259 | } |
2260 | auto &WorkingMI = cloneIfNew(MI); |
2261 | WorkingMI.setDesc(get(Opc)); |
2262 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2263 | OpIdx1, OpIdx2); |
2264 | } |
2265 | case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: { |
2266 | auto &WorkingMI = cloneIfNew(MI); |
2267 | unsigned OpNo = MI.getDesc().getNumOperands() - 1; |
2268 | X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm()); |
2269 | WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC)); |
2270 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2271 | OpIdx1, OpIdx2); |
2272 | } |
2273 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
2274 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
2275 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
2276 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
2277 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
2278 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
2279 | case X86::VPTERNLOGDZrrik: |
2280 | case X86::VPTERNLOGDZ128rrik: |
2281 | case X86::VPTERNLOGDZ256rrik: |
2282 | case X86::VPTERNLOGQZrrik: |
2283 | case X86::VPTERNLOGQZ128rrik: |
2284 | case X86::VPTERNLOGQZ256rrik: |
2285 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
2286 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
2287 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
2288 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
2289 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
2290 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
2291 | case X86::VPTERNLOGDZ128rmbi: |
2292 | case X86::VPTERNLOGDZ256rmbi: |
2293 | case X86::VPTERNLOGDZrmbi: |
2294 | case X86::VPTERNLOGQZ128rmbi: |
2295 | case X86::VPTERNLOGQZ256rmbi: |
2296 | case X86::VPTERNLOGQZrmbi: |
2297 | case X86::VPTERNLOGDZ128rmbikz: |
2298 | case X86::VPTERNLOGDZ256rmbikz: |
2299 | case X86::VPTERNLOGDZrmbikz: |
2300 | case X86::VPTERNLOGQZ128rmbikz: |
2301 | case X86::VPTERNLOGQZ256rmbikz: |
2302 | case X86::VPTERNLOGQZrmbikz: { |
2303 | auto &WorkingMI = cloneIfNew(MI); |
2304 | commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2); |
2305 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2306 | OpIdx1, OpIdx2); |
2307 | } |
2308 | default: { |
2309 | if (isCommutableVPERMV3Instruction(MI.getOpcode())) { |
2310 | unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode()); |
2311 | auto &WorkingMI = cloneIfNew(MI); |
2312 | WorkingMI.setDesc(get(Opc)); |
2313 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2314 | OpIdx1, OpIdx2); |
2315 | } |
2316 | |
2317 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
2318 | MI.getDesc().TSFlags); |
2319 | if (FMA3Group) { |
2320 | unsigned Opc = |
2321 | getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group); |
2322 | auto &WorkingMI = cloneIfNew(MI); |
2323 | WorkingMI.setDesc(get(Opc)); |
2324 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, |
2325 | OpIdx1, OpIdx2); |
2326 | } |
2327 | |
2328 | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
2329 | } |
2330 | } |
2331 | } |
2332 | |
2333 | bool |
2334 | X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, |
2335 | unsigned &SrcOpIdx1, |
2336 | unsigned &SrcOpIdx2, |
2337 | bool IsIntrinsic) const { |
2338 | uint64_t TSFlags = MI.getDesc().TSFlags; |
2339 | |
2340 | unsigned FirstCommutableVecOp = 1; |
2341 | unsigned LastCommutableVecOp = 3; |
2342 | unsigned KMaskOp = -1U; |
2343 | if (X86II::isKMasked(TSFlags)) { |
2344 | // For k-zero-masked operations it is Ok to commute the first vector |
2345 | // operand. Unless this is an intrinsic instruction. |
2346 | // For regular k-masked operations a conservative choice is done as the |
2347 | // elements of the first vector operand, for which the corresponding bit |
2348 | // in the k-mask operand is set to 0, are copied to the result of the |
2349 | // instruction. |
2350 | // TODO/FIXME: The commute still may be legal if it is known that the |
2351 | // k-mask operand is set to either all ones or all zeroes. |
2352 | // It is also Ok to commute the 1st operand if all users of MI use only |
2353 | // the elements enabled by the k-mask operand. For example, |
2354 | // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i] |
2355 | // : v1[i]; |
2356 | // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 -> |
2357 | // // Ok, to commute v1 in FMADD213PSZrk. |
2358 | |
2359 | // The k-mask operand has index = 2 for masked and zero-masked operations. |
2360 | KMaskOp = 2; |
2361 | |
2362 | // The operand with index = 1 is used as a source for those elements for |
2363 | // which the corresponding bit in the k-mask is set to 0. |
2364 | if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic) |
2365 | FirstCommutableVecOp = 3; |
2366 | |
2367 | LastCommutableVecOp++; |
2368 | } else if (IsIntrinsic) { |
2369 | // Commuting the first operand of an intrinsic instruction isn't possible |
2370 | // unless we can prove that only the lowest element of the result is used. |
2371 | FirstCommutableVecOp = 2; |
2372 | } |
2373 | |
2374 | if (isMem(MI, LastCommutableVecOp)) |
2375 | LastCommutableVecOp--; |
2376 | |
2377 | // Only the first RegOpsNum operands are commutable. |
2378 | // Also, the value 'CommuteAnyOperandIndex' is valid here as it means |
2379 | // that the operand is not specified/fixed. |
2380 | if (SrcOpIdx1 != CommuteAnyOperandIndex && |
2381 | (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp || |
2382 | SrcOpIdx1 == KMaskOp)) |
2383 | return false; |
2384 | if (SrcOpIdx2 != CommuteAnyOperandIndex && |
2385 | (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp || |
2386 | SrcOpIdx2 == KMaskOp)) |
2387 | return false; |
2388 | |
2389 | // Look for two different register operands assumed to be commutable |
2390 | // regardless of the FMA opcode. The FMA opcode is adjusted later. |
2391 | if (SrcOpIdx1 == CommuteAnyOperandIndex || |
2392 | SrcOpIdx2 == CommuteAnyOperandIndex) { |
2393 | unsigned CommutableOpIdx2 = SrcOpIdx2; |
2394 | |
2395 | // At least one of operands to be commuted is not specified and |
2396 | // this method is free to choose appropriate commutable operands. |
2397 | if (SrcOpIdx1 == SrcOpIdx2) |
2398 | // Both of operands are not fixed. By default set one of commutable |
2399 | // operands to the last register operand of the instruction. |
2400 | CommutableOpIdx2 = LastCommutableVecOp; |
2401 | else if (SrcOpIdx2 == CommuteAnyOperandIndex) |
2402 | // Only one of operands is not fixed. |
2403 | CommutableOpIdx2 = SrcOpIdx1; |
2404 | |
2405 | // CommutableOpIdx2 is well defined now. Let's choose another commutable |
2406 | // operand and assign its index to CommutableOpIdx1. |
2407 | Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); |
2408 | |
2409 | unsigned CommutableOpIdx1; |
2410 | for (CommutableOpIdx1 = LastCommutableVecOp; |
2411 | CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) { |
2412 | // Just ignore and skip the k-mask operand. |
2413 | if (CommutableOpIdx1 == KMaskOp) |
2414 | continue; |
2415 | |
2416 | // The commuted operands must have different registers. |
2417 | // Otherwise, the commute transformation does not change anything and |
2418 | // is useless then. |
2419 | if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg()) |
2420 | break; |
2421 | } |
2422 | |
2423 | // No appropriate commutable operands were found. |
2424 | if (CommutableOpIdx1 < FirstCommutableVecOp) |
2425 | return false; |
2426 | |
2427 | // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2 |
2428 | // to return those values. |
2429 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2430 | CommutableOpIdx1, CommutableOpIdx2)) |
2431 | return false; |
2432 | } |
2433 | |
2434 | return true; |
2435 | } |
2436 | |
2437 | bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI, |
2438 | unsigned &SrcOpIdx1, |
2439 | unsigned &SrcOpIdx2) const { |
2440 | const MCInstrDesc &Desc = MI.getDesc(); |
2441 | if (!Desc.isCommutable()) |
2442 | return false; |
2443 | |
2444 | switch (MI.getOpcode()) { |
2445 | case X86::CMPSDrr: |
2446 | case X86::CMPSSrr: |
2447 | case X86::CMPPDrri: |
2448 | case X86::CMPPSrri: |
2449 | case X86::VCMPSDrr: |
2450 | case X86::VCMPSSrr: |
2451 | case X86::VCMPPDrri: |
2452 | case X86::VCMPPSrri: |
2453 | case X86::VCMPPDYrri: |
2454 | case X86::VCMPPSYrri: |
2455 | case X86::VCMPSDZrr: |
2456 | case X86::VCMPSSZrr: |
2457 | case X86::VCMPPDZrri: |
2458 | case X86::VCMPPSZrri: |
2459 | case X86::VCMPSHZrr: |
2460 | case X86::VCMPPHZrri: |
2461 | case X86::VCMPPHZ128rri: |
2462 | case X86::VCMPPHZ256rri: |
2463 | case X86::VCMPPDZ128rri: |
2464 | case X86::VCMPPSZ128rri: |
2465 | case X86::VCMPPDZ256rri: |
2466 | case X86::VCMPPSZ256rri: |
2467 | case X86::VCMPPDZrrik: |
2468 | case X86::VCMPPSZrrik: |
2469 | case X86::VCMPPDZ128rrik: |
2470 | case X86::VCMPPSZ128rrik: |
2471 | case X86::VCMPPDZ256rrik: |
2472 | case X86::VCMPPSZ256rrik: { |
2473 | unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0; |
2474 | |
2475 | // Float comparison can be safely commuted for |
2476 | // Ordered/Unordered/Equal/NotEqual tests |
2477 | unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7; |
2478 | switch (Imm) { |
2479 | default: |
2480 | // EVEX versions can be commuted. |
2481 | if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX) |
2482 | break; |
2483 | return false; |
2484 | case 0x00: // EQUAL |
2485 | case 0x03: // UNORDERED |
2486 | case 0x04: // NOT EQUAL |
2487 | case 0x07: // ORDERED |
2488 | break; |
2489 | } |
2490 | |
2491 | // The indices of the commutable operands are 1 and 2 (or 2 and 3 |
2492 | // when masked). |
2493 | // Assign them to the returned operand indices here. |
2494 | return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset, |
2495 | 2 + OpOffset); |
2496 | } |
2497 | case X86::MOVSSrr: |
2498 | // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can |
2499 | // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since |
2500 | // AVX implies sse4.1. |
2501 | if (Subtarget.hasSSE41()) |
2502 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2503 | return false; |
2504 | case X86::SHUFPDrri: |
2505 | // We can commute this to MOVSD. |
2506 | if (MI.getOperand(3).getImm() == 0x02) |
2507 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2508 | return false; |
2509 | case X86::MOVHLPSrr: |
2510 | case X86::UNPCKHPDrr: |
2511 | case X86::VMOVHLPSrr: |
2512 | case X86::VUNPCKHPDrr: |
2513 | case X86::VMOVHLPSZrr: |
2514 | case X86::VUNPCKHPDZ128rr: |
2515 | if (Subtarget.hasSSE2()) |
2516 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2517 | return false; |
2518 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
2519 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
2520 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
2521 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
2522 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
2523 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
2524 | case X86::VPTERNLOGDZrrik: |
2525 | case X86::VPTERNLOGDZ128rrik: |
2526 | case X86::VPTERNLOGDZ256rrik: |
2527 | case X86::VPTERNLOGQZrrik: |
2528 | case X86::VPTERNLOGQZ128rrik: |
2529 | case X86::VPTERNLOGQZ256rrik: |
2530 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
2531 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
2532 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
2533 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
2534 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
2535 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
2536 | case X86::VPTERNLOGDZ128rmbi: |
2537 | case X86::VPTERNLOGDZ256rmbi: |
2538 | case X86::VPTERNLOGDZrmbi: |
2539 | case X86::VPTERNLOGQZ128rmbi: |
2540 | case X86::VPTERNLOGQZ256rmbi: |
2541 | case X86::VPTERNLOGQZrmbi: |
2542 | case X86::VPTERNLOGDZ128rmbikz: |
2543 | case X86::VPTERNLOGDZ256rmbikz: |
2544 | case X86::VPTERNLOGDZrmbikz: |
2545 | case X86::VPTERNLOGQZ128rmbikz: |
2546 | case X86::VPTERNLOGQZ256rmbikz: |
2547 | case X86::VPTERNLOGQZrmbikz: |
2548 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2549 | case X86::VPDPWSSDYrr: |
2550 | case X86::VPDPWSSDrr: |
2551 | case X86::VPDPWSSDSYrr: |
2552 | case X86::VPDPWSSDSrr: |
2553 | case X86::VPDPWSSDZ128r: |
2554 | case X86::VPDPWSSDZ128rk: |
2555 | case X86::VPDPWSSDZ128rkz: |
2556 | case X86::VPDPWSSDZ256r: |
2557 | case X86::VPDPWSSDZ256rk: |
2558 | case X86::VPDPWSSDZ256rkz: |
2559 | case X86::VPDPWSSDZr: |
2560 | case X86::VPDPWSSDZrk: |
2561 | case X86::VPDPWSSDZrkz: |
2562 | case X86::VPDPWSSDSZ128r: |
2563 | case X86::VPDPWSSDSZ128rk: |
2564 | case X86::VPDPWSSDSZ128rkz: |
2565 | case X86::VPDPWSSDSZ256r: |
2566 | case X86::VPDPWSSDSZ256rk: |
2567 | case X86::VPDPWSSDSZ256rkz: |
2568 | case X86::VPDPWSSDSZr: |
2569 | case X86::VPDPWSSDSZrk: |
2570 | case X86::VPDPWSSDSZrkz: |
2571 | case X86::VPMADD52HUQZ128r: |
2572 | case X86::VPMADD52HUQZ128rk: |
2573 | case X86::VPMADD52HUQZ128rkz: |
2574 | case X86::VPMADD52HUQZ256r: |
2575 | case X86::VPMADD52HUQZ256rk: |
2576 | case X86::VPMADD52HUQZ256rkz: |
2577 | case X86::VPMADD52HUQZr: |
2578 | case X86::VPMADD52HUQZrk: |
2579 | case X86::VPMADD52HUQZrkz: |
2580 | case X86::VPMADD52LUQZ128r: |
2581 | case X86::VPMADD52LUQZ128rk: |
2582 | case X86::VPMADD52LUQZ128rkz: |
2583 | case X86::VPMADD52LUQZ256r: |
2584 | case X86::VPMADD52LUQZ256rk: |
2585 | case X86::VPMADD52LUQZ256rkz: |
2586 | case X86::VPMADD52LUQZr: |
2587 | case X86::VPMADD52LUQZrk: |
2588 | case X86::VPMADD52LUQZrkz: |
2589 | case X86::VFMADDCPHZr: |
2590 | case X86::VFMADDCPHZrk: |
2591 | case X86::VFMADDCPHZrkz: |
2592 | case X86::VFMADDCPHZ128r: |
2593 | case X86::VFMADDCPHZ128rk: |
2594 | case X86::VFMADDCPHZ128rkz: |
2595 | case X86::VFMADDCPHZ256r: |
2596 | case X86::VFMADDCPHZ256rk: |
2597 | case X86::VFMADDCPHZ256rkz: |
2598 | case X86::VFMADDCSHZr: |
2599 | case X86::VFMADDCSHZrk: |
2600 | case X86::VFMADDCSHZrkz: { |
2601 | unsigned CommutableOpIdx1 = 2; |
2602 | unsigned CommutableOpIdx2 = 3; |
2603 | if (X86II::isKMasked(Desc.TSFlags)) { |
2604 | // Skip the mask register. |
2605 | ++CommutableOpIdx1; |
2606 | ++CommutableOpIdx2; |
2607 | } |
2608 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2609 | CommutableOpIdx1, CommutableOpIdx2)) |
2610 | return false; |
2611 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
2612 | !MI.getOperand(SrcOpIdx2).isReg()) |
2613 | // No idea. |
2614 | return false; |
2615 | return true; |
2616 | } |
2617 | |
2618 | default: |
2619 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
2620 | MI.getDesc().TSFlags); |
2621 | if (FMA3Group) |
2622 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2, |
2623 | FMA3Group->isIntrinsic()); |
2624 | |
2625 | // Handled masked instructions since we need to skip over the mask input |
2626 | // and the preserved input. |
2627 | if (X86II::isKMasked(Desc.TSFlags)) { |
2628 | // First assume that the first input is the mask operand and skip past it. |
2629 | unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1; |
2630 | unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2; |
2631 | // Check if the first input is tied. If there isn't one then we only |
2632 | // need to skip the mask operand which we did above. |
2633 | if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(), |
2634 | MCOI::TIED_TO) != -1)) { |
2635 | // If this is zero masking instruction with a tied operand, we need to |
2636 | // move the first index back to the first input since this must |
2637 | // be a 3 input instruction and we want the first two non-mask inputs. |
2638 | // Otherwise this is a 2 input instruction with a preserved input and |
2639 | // mask, so we need to move the indices to skip one more input. |
2640 | if (X86II::isKMergeMasked(Desc.TSFlags)) { |
2641 | ++CommutableOpIdx1; |
2642 | ++CommutableOpIdx2; |
2643 | } else { |
2644 | --CommutableOpIdx1; |
2645 | } |
2646 | } |
2647 | |
2648 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2649 | CommutableOpIdx1, CommutableOpIdx2)) |
2650 | return false; |
2651 | |
2652 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
2653 | !MI.getOperand(SrcOpIdx2).isReg()) |
2654 | // No idea. |
2655 | return false; |
2656 | return true; |
2657 | } |
2658 | |
2659 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2660 | } |
2661 | return false; |
2662 | } |
2663 | |
2664 | static bool isConvertibleLEA(MachineInstr *MI) { |
2665 | unsigned Opcode = MI->getOpcode(); |
2666 | if (Opcode != X86::LEA32r && Opcode != X86::LEA64r && |
2667 | Opcode != X86::LEA64_32r) |
2668 | return false; |
2669 | |
2670 | const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt); |
2671 | const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp); |
2672 | const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg); |
2673 | |
2674 | if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 || |
2675 | Scale.getImm() > 1) |
2676 | return false; |
2677 | |
2678 | return true; |
2679 | } |
2680 | |
2681 | bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const { |
2682 | // Currently we're interested in following sequence only. |
2683 | // r3 = lea r1, r2 |
2684 | // r5 = add r3, r4 |
2685 | // Both r3 and r4 are killed in add, we hope the add instruction has the |
2686 | // operand order |
2687 | // r5 = add r4, r3 |
2688 | // So later in X86FixupLEAs the lea instruction can be rewritten as add. |
2689 | unsigned Opcode = MI.getOpcode(); |
2690 | if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr) |
2691 | return false; |
2692 | |
2693 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
2694 | Register Reg1 = MI.getOperand(1).getReg(); |
2695 | Register Reg2 = MI.getOperand(2).getReg(); |
2696 | |
2697 | // Check if Reg1 comes from LEA in the same MBB. |
2698 | if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) { |
2699 | if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) { |
2700 | Commute = true; |
2701 | return true; |
2702 | } |
2703 | } |
2704 | |
2705 | // Check if Reg2 comes from LEA in the same MBB. |
2706 | if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) { |
2707 | if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) { |
2708 | Commute = false; |
2709 | return true; |
2710 | } |
2711 | } |
2712 | |
2713 | return false; |
2714 | } |
2715 | |
2716 | int X86::getCondSrcNoFromDesc(const MCInstrDesc &MCID) { |
2717 | unsigned Opcode = MCID.getOpcode(); |
2718 | if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isCMOVCC(Opcode))) |
2719 | return -1; |
2720 | // Assume that condition code is always the last use operand. |
2721 | unsigned NumUses = MCID.getNumOperands() - MCID.getNumDefs(); |
2722 | return NumUses - 1; |
2723 | } |
2724 | |
2725 | X86::CondCode X86::getCondFromMI(const MachineInstr &MI) { |
2726 | const MCInstrDesc &MCID = MI.getDesc(); |
2727 | int CondNo = getCondSrcNoFromDesc(MCID); |
2728 | if (CondNo < 0) |
2729 | return X86::COND_INVALID; |
2730 | CondNo += MCID.getNumDefs(); |
2731 | return static_cast<X86::CondCode>(MI.getOperand(CondNo).getImm()); |
2732 | } |
2733 | |
2734 | X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) { |
2735 | return X86::isJCC(MI.getOpcode()) ? X86::getCondFromMI(MI) |
2736 | : X86::COND_INVALID; |
2737 | } |
2738 | |
2739 | X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { |
2740 | return X86::isSETCC(MI.getOpcode()) ? X86::getCondFromMI(MI) |
2741 | : X86::COND_INVALID; |
2742 | } |
2743 | |
2744 | X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) { |
2745 | return X86::isCMOVCC(MI.getOpcode()) ? X86::getCondFromMI(MI) |
2746 | : X86::COND_INVALID; |
2747 | } |
2748 | |
2749 | /// Return the inverse of the specified condition, |
2750 | /// e.g. turning COND_E to COND_NE. |
2751 | X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { |
2752 | switch (CC) { |
2753 | default: llvm_unreachable("Illegal condition code!")::llvm::llvm_unreachable_internal("Illegal condition code!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2753); |
2754 | case X86::COND_E: return X86::COND_NE; |
2755 | case X86::COND_NE: return X86::COND_E; |
2756 | case X86::COND_L: return X86::COND_GE; |
2757 | case X86::COND_LE: return X86::COND_G; |
2758 | case X86::COND_G: return X86::COND_LE; |
2759 | case X86::COND_GE: return X86::COND_L; |
2760 | case X86::COND_B: return X86::COND_AE; |
2761 | case X86::COND_BE: return X86::COND_A; |
2762 | case X86::COND_A: return X86::COND_BE; |
2763 | case X86::COND_AE: return X86::COND_B; |
2764 | case X86::COND_S: return X86::COND_NS; |
2765 | case X86::COND_NS: return X86::COND_S; |
2766 | case X86::COND_P: return X86::COND_NP; |
2767 | case X86::COND_NP: return X86::COND_P; |
2768 | case X86::COND_O: return X86::COND_NO; |
2769 | case X86::COND_NO: return X86::COND_O; |
2770 | case X86::COND_NE_OR_P: return X86::COND_E_AND_NP; |
2771 | case X86::COND_E_AND_NP: return X86::COND_NE_OR_P; |
2772 | } |
2773 | } |
2774 | |
2775 | /// Assuming the flags are set by MI(a,b), return the condition code if we |
2776 | /// modify the instructions such that flags are set by MI(b,a). |
2777 | static X86::CondCode getSwappedCondition(X86::CondCode CC) { |
2778 | switch (CC) { |
2779 | default: return X86::COND_INVALID; |
2780 | case X86::COND_E: return X86::COND_E; |
2781 | case X86::COND_NE: return X86::COND_NE; |
2782 | case X86::COND_L: return X86::COND_G; |
2783 | case X86::COND_LE: return X86::COND_GE; |
2784 | case X86::COND_G: return X86::COND_L; |
2785 | case X86::COND_GE: return X86::COND_LE; |
2786 | case X86::COND_B: return X86::COND_A; |
2787 | case X86::COND_BE: return X86::COND_AE; |
2788 | case X86::COND_A: return X86::COND_B; |
2789 | case X86::COND_AE: return X86::COND_BE; |
2790 | } |
2791 | } |
2792 | |
2793 | std::pair<X86::CondCode, bool> |
2794 | X86::getX86ConditionCode(CmpInst::Predicate Predicate) { |
2795 | X86::CondCode CC = X86::COND_INVALID; |
2796 | bool NeedSwap = false; |
2797 | switch (Predicate) { |
2798 | default: break; |
2799 | // Floating-point Predicates |
2800 | case CmpInst::FCMP_UEQ: CC = X86::COND_E; break; |
2801 | case CmpInst::FCMP_OLT: NeedSwap = true; [[fallthrough]]; |
2802 | case CmpInst::FCMP_OGT: CC = X86::COND_A; break; |
2803 | case CmpInst::FCMP_OLE: NeedSwap = true; [[fallthrough]]; |
2804 | case CmpInst::FCMP_OGE: CC = X86::COND_AE; break; |
2805 | case CmpInst::FCMP_UGT: NeedSwap = true; [[fallthrough]]; |
2806 | case CmpInst::FCMP_ULT: CC = X86::COND_B; break; |
2807 | case CmpInst::FCMP_UGE: NeedSwap = true; [[fallthrough]]; |
2808 | case CmpInst::FCMP_ULE: CC = X86::COND_BE; break; |
2809 | case CmpInst::FCMP_ONE: CC = X86::COND_NE; break; |
2810 | case CmpInst::FCMP_UNO: CC = X86::COND_P; break; |
2811 | case CmpInst::FCMP_ORD: CC = X86::COND_NP; break; |
2812 | case CmpInst::FCMP_OEQ: [[fallthrough]]; |
2813 | case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break; |
2814 | |
2815 | // Integer Predicates |
2816 | case CmpInst::ICMP_EQ: CC = X86::COND_E; break; |
2817 | case CmpInst::ICMP_NE: CC = X86::COND_NE; break; |
2818 | case CmpInst::ICMP_UGT: CC = X86::COND_A; break; |
2819 | case CmpInst::ICMP_UGE: CC = X86::COND_AE; break; |
2820 | case CmpInst::ICMP_ULT: CC = X86::COND_B; break; |
2821 | case CmpInst::ICMP_ULE: CC = X86::COND_BE; break; |
2822 | case CmpInst::ICMP_SGT: CC = X86::COND_G; break; |
2823 | case CmpInst::ICMP_SGE: CC = X86::COND_GE; break; |
2824 | case CmpInst::ICMP_SLT: CC = X86::COND_L; break; |
2825 | case CmpInst::ICMP_SLE: CC = X86::COND_LE; break; |
2826 | } |
2827 | |
2828 | return std::make_pair(CC, NeedSwap); |
2829 | } |
2830 | |
2831 | /// Return a cmov opcode for the given register size in bytes, and operand type. |
2832 | unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) { |
2833 | switch(RegBytes) { |
2834 | default: llvm_unreachable("Illegal register size!")::llvm::llvm_unreachable_internal("Illegal register size!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2834); |
2835 | case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr; |
2836 | case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr; |
2837 | case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr; |
2838 | } |
2839 | } |
2840 | |
2841 | /// Get the VPCMP immediate for the given condition. |
2842 | unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { |
2843 | switch (CC) { |
2844 | default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2844); |
2845 | case ISD::SETNE: return 4; |
2846 | case ISD::SETEQ: return 0; |
2847 | case ISD::SETULT: |
2848 | case ISD::SETLT: return 1; |
2849 | case ISD::SETUGT: |
2850 | case ISD::SETGT: return 6; |
2851 | case ISD::SETUGE: |
2852 | case ISD::SETGE: return 5; |
2853 | case ISD::SETULE: |
2854 | case ISD::SETLE: return 2; |
2855 | } |
2856 | } |
2857 | |
2858 | /// Get the VPCMP immediate if the operands are swapped. |
2859 | unsigned X86::getSwappedVPCMPImm(unsigned Imm) { |
2860 | switch (Imm) { |
2861 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2861); |
2862 | case 0x01: Imm = 0x06; break; // LT -> NLE |
2863 | case 0x02: Imm = 0x05; break; // LE -> NLT |
2864 | case 0x05: Imm = 0x02; break; // NLT -> LE |
2865 | case 0x06: Imm = 0x01; break; // NLE -> LT |
2866 | case 0x00: // EQ |
2867 | case 0x03: // FALSE |
2868 | case 0x04: // NE |
2869 | case 0x07: // TRUE |
2870 | break; |
2871 | } |
2872 | |
2873 | return Imm; |
2874 | } |
2875 | |
2876 | /// Get the VPCOM immediate if the operands are swapped. |
2877 | unsigned X86::getSwappedVPCOMImm(unsigned Imm) { |
2878 | switch (Imm) { |
2879 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2879); |
2880 | case 0x00: Imm = 0x02; break; // LT -> GT |
2881 | case 0x01: Imm = 0x03; break; // LE -> GE |
2882 | case 0x02: Imm = 0x00; break; // GT -> LT |
2883 | case 0x03: Imm = 0x01; break; // GE -> LE |
2884 | case 0x04: // EQ |
2885 | case 0x05: // NE |
2886 | case 0x06: // FALSE |
2887 | case 0x07: // TRUE |
2888 | break; |
2889 | } |
2890 | |
2891 | return Imm; |
2892 | } |
2893 | |
2894 | /// Get the VCMP immediate if the operands are swapped. |
2895 | unsigned X86::getSwappedVCMPImm(unsigned Imm) { |
2896 | // Only need the lower 2 bits to distinquish. |
2897 | switch (Imm & 0x3) { |
2898 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2898); |
2899 | case 0x00: case 0x03: |
2900 | // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted. |
2901 | break; |
2902 | case 0x01: case 0x02: |
2903 | // Need to toggle bits 3:0. Bit 4 stays the same. |
2904 | Imm ^= 0xf; |
2905 | break; |
2906 | } |
2907 | |
2908 | return Imm; |
2909 | } |
2910 | |
2911 | /// Return true if the Reg is X87 register. |
2912 | static bool isX87Reg(unsigned Reg) { |
2913 | return (Reg == X86::FPCW || Reg == X86::FPSW || |
2914 | (Reg >= X86::ST0 && Reg <= X86::ST7)); |
2915 | } |
2916 | |
2917 | /// check if the instruction is X87 instruction |
2918 | bool X86::isX87Instruction(MachineInstr &MI) { |
2919 | for (const MachineOperand &MO : MI.operands()) { |
2920 | if (!MO.isReg()) |
2921 | continue; |
2922 | if (isX87Reg(MO.getReg())) |
2923 | return true; |
2924 | } |
2925 | return false; |
2926 | } |
2927 | |
2928 | bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { |
2929 | switch (MI.getOpcode()) { |
2930 | case X86::TCRETURNdi: |
2931 | case X86::TCRETURNri: |
2932 | case X86::TCRETURNmi: |
2933 | case X86::TCRETURNdi64: |
2934 | case X86::TCRETURNri64: |
2935 | case X86::TCRETURNmi64: |
2936 | return true; |
2937 | default: |
2938 | return false; |
2939 | } |
2940 | } |
2941 | |
2942 | bool X86InstrInfo::canMakeTailCallConditional( |
2943 | SmallVectorImpl<MachineOperand> &BranchCond, |
2944 | const MachineInstr &TailCall) const { |
2945 | if (TailCall.getOpcode() != X86::TCRETURNdi && |
2946 | TailCall.getOpcode() != X86::TCRETURNdi64) { |
2947 | // Only direct calls can be done with a conditional branch. |
2948 | return false; |
2949 | } |
2950 | |
2951 | const MachineFunction *MF = TailCall.getParent()->getParent(); |
2952 | if (Subtarget.isTargetWin64() && MF->hasWinCFI()) { |
2953 | // Conditional tail calls confuse the Win64 unwinder. |
2954 | return false; |
2955 | } |
2956 | |
2957 | assert(BranchCond.size() == 1)(static_cast <bool> (BranchCond.size() == 1) ? void (0) : __assert_fail ("BranchCond.size() == 1", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2957, __extension__ __PRETTY_FUNCTION__)); |
2958 | if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { |
2959 | // Can't make a conditional tail call with this condition. |
2960 | return false; |
2961 | } |
2962 | |
2963 | const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
2964 | if (X86FI->getTCReturnAddrDelta() != 0 || |
2965 | TailCall.getOperand(1).getImm() != 0) { |
2966 | // A conditional tail call cannot do any stack adjustment. |
2967 | return false; |
2968 | } |
2969 | |
2970 | return true; |
2971 | } |
2972 | |
2973 | void X86InstrInfo::replaceBranchWithTailCall( |
2974 | MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, |
2975 | const MachineInstr &TailCall) const { |
2976 | assert(canMakeTailCallConditional(BranchCond, TailCall))(static_cast <bool> (canMakeTailCallConditional(BranchCond , TailCall)) ? void (0) : __assert_fail ("canMakeTailCallConditional(BranchCond, TailCall)" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2976, __extension__ __PRETTY_FUNCTION__)); |
2977 | |
2978 | MachineBasicBlock::iterator I = MBB.end(); |
2979 | while (I != MBB.begin()) { |
2980 | --I; |
2981 | if (I->isDebugInstr()) |
2982 | continue; |
2983 | if (!I->isBranch()) |
2984 | assert(0 && "Can't find the branch to replace!")(static_cast <bool> (0 && "Can't find the branch to replace!" ) ? void (0) : __assert_fail ("0 && \"Can't find the branch to replace!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 2984, __extension__ __PRETTY_FUNCTION__)); |
2985 | |
2986 | X86::CondCode CC = X86::getCondFromBranch(*I); |
2987 | assert(BranchCond.size() == 1)(static_cast <bool> (BranchCond.size() == 1) ? void (0) : __assert_fail ("BranchCond.size() == 1", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 2987, __extension__ __PRETTY_FUNCTION__)); |
2988 | if (CC != BranchCond[0].getImm()) |
2989 | continue; |
2990 | |
2991 | break; |
2992 | } |
2993 | |
2994 | unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc |
2995 | : X86::TCRETURNdi64cc; |
2996 | |
2997 | auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); |
2998 | MIB->addOperand(TailCall.getOperand(0)); // Destination. |
2999 | MIB.addImm(0); // Stack offset (not used). |
3000 | MIB->addOperand(BranchCond[0]); // Condition. |
3001 | MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters. |
3002 | |
3003 | // Add implicit uses and defs of all live regs potentially clobbered by the |
3004 | // call. This way they still appear live across the call. |
3005 | LivePhysRegs LiveRegs(getRegisterInfo()); |
3006 | LiveRegs.addLiveOuts(MBB); |
3007 | SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers; |
3008 | LiveRegs.stepForward(*MIB, Clobbers); |
3009 | for (const auto &C : Clobbers) { |
3010 | MIB.addReg(C.first, RegState::Implicit); |
3011 | MIB.addReg(C.first, RegState::Implicit | RegState::Define); |
3012 | } |
3013 | |
3014 | I->eraseFromParent(); |
3015 | } |
3016 | |
3017 | // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may |
3018 | // not be a fallthrough MBB now due to layout changes). Return nullptr if the |
3019 | // fallthrough MBB cannot be identified. |
3020 | static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB, |
3021 | MachineBasicBlock *TBB) { |
3022 | // Look for non-EHPad successors other than TBB. If we find exactly one, it |
3023 | // is the fallthrough MBB. If we find zero, then TBB is both the target MBB |
3024 | // and fallthrough MBB. If we find more than one, we cannot identify the |
3025 | // fallthrough MBB and should return nullptr. |
3026 | MachineBasicBlock *FallthroughBB = nullptr; |
3027 | for (MachineBasicBlock *Succ : MBB->successors()) { |
3028 | if (Succ->isEHPad() || (Succ == TBB && FallthroughBB)) |
3029 | continue; |
3030 | // Return a nullptr if we found more than one fallthrough successor. |
3031 | if (FallthroughBB && FallthroughBB != TBB) |
3032 | return nullptr; |
3033 | FallthroughBB = Succ; |
3034 | } |
3035 | return FallthroughBB; |
3036 | } |
3037 | |
3038 | bool X86InstrInfo::AnalyzeBranchImpl( |
3039 | MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, |
3040 | SmallVectorImpl<MachineOperand> &Cond, |
3041 | SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { |
3042 | |
3043 | // Start from the bottom of the block and work up, examining the |
3044 | // terminator instructions. |
3045 | MachineBasicBlock::iterator I = MBB.end(); |
3046 | MachineBasicBlock::iterator UnCondBrIter = MBB.end(); |
3047 | while (I != MBB.begin()) { |
3048 | --I; |
3049 | if (I->isDebugInstr()) |
3050 | continue; |
3051 | |
3052 | // Working from the bottom, when we see a non-terminator instruction, we're |
3053 | // done. |
3054 | if (!isUnpredicatedTerminator(*I)) |
3055 | break; |
3056 | |
3057 | // A terminator that isn't a branch can't easily be handled by this |
3058 | // analysis. |
3059 | if (!I->isBranch()) |
3060 | return true; |
3061 | |
3062 | // Handle unconditional branches. |
3063 | if (I->getOpcode() == X86::JMP_1) { |
3064 | UnCondBrIter = I; |
3065 | |
3066 | if (!AllowModify) { |
3067 | TBB = I->getOperand(0).getMBB(); |
3068 | continue; |
3069 | } |
3070 | |
3071 | // If the block has any instructions after a JMP, delete them. |
3072 | MBB.erase(std::next(I), MBB.end()); |
3073 | |
3074 | Cond.clear(); |
3075 | FBB = nullptr; |
3076 | |
3077 | // Delete the JMP if it's equivalent to a fall-through. |
3078 | if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { |
3079 | TBB = nullptr; |
3080 | I->eraseFromParent(); |
3081 | I = MBB.end(); |
3082 | UnCondBrIter = MBB.end(); |
3083 | continue; |
3084 | } |
3085 | |
3086 | // TBB is used to indicate the unconditional destination. |
3087 | TBB = I->getOperand(0).getMBB(); |
3088 | continue; |
3089 | } |
3090 | |
3091 | // Handle conditional branches. |
3092 | X86::CondCode BranchCode = X86::getCondFromBranch(*I); |
3093 | if (BranchCode == X86::COND_INVALID) |
3094 | return true; // Can't handle indirect branch. |
3095 | |
3096 | // In practice we should never have an undef eflags operand, if we do |
3097 | // abort here as we are not prepared to preserve the flag. |
3098 | if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef()) |
3099 | return true; |
3100 | |
3101 | // Working from the bottom, handle the first conditional branch. |
3102 | if (Cond.empty()) { |
3103 | FBB = TBB; |
3104 | TBB = I->getOperand(0).getMBB(); |
3105 | Cond.push_back(MachineOperand::CreateImm(BranchCode)); |
3106 | CondBranches.push_back(&*I); |
3107 | continue; |
3108 | } |
3109 | |
3110 | // Handle subsequent conditional branches. Only handle the case where all |
3111 | // conditional branches branch to the same destination and their condition |
3112 | // opcodes fit one of the special multi-branch idioms. |
3113 | assert(Cond.size() == 1)(static_cast <bool> (Cond.size() == 1) ? void (0) : __assert_fail ("Cond.size() == 1", "llvm/lib/Target/X86/X86InstrInfo.cpp", 3113, __extension__ __PRETTY_FUNCTION__)); |
3114 | assert(TBB)(static_cast <bool> (TBB) ? void (0) : __assert_fail ("TBB" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3114, __extension__ __PRETTY_FUNCTION__)); |
3115 | |
3116 | // If the conditions are the same, we can leave them alone. |
3117 | X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); |
3118 | auto NewTBB = I->getOperand(0).getMBB(); |
3119 | if (OldBranchCode == BranchCode && TBB == NewTBB) |
3120 | continue; |
3121 | |
3122 | // If they differ, see if they fit one of the known patterns. Theoretically, |
3123 | // we could handle more patterns here, but we shouldn't expect to see them |
3124 | // if instruction selection has done a reasonable job. |
3125 | if (TBB == NewTBB && |
3126 | ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) || |
3127 | (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) { |
3128 | BranchCode = X86::COND_NE_OR_P; |
3129 | } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) || |
3130 | (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) { |
3131 | if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB))) |
3132 | return true; |
3133 | |
3134 | // X86::COND_E_AND_NP usually has two different branch destinations. |
3135 | // |
3136 | // JP B1 |
3137 | // JE B2 |
3138 | // JMP B1 |
3139 | // B1: |
3140 | // B2: |
3141 | // |
3142 | // Here this condition branches to B2 only if NP && E. It has another |
3143 | // equivalent form: |
3144 | // |
3145 | // JNE B1 |
3146 | // JNP B2 |
3147 | // JMP B1 |
3148 | // B1: |
3149 | // B2: |
3150 | // |
3151 | // Similarly it branches to B2 only if E && NP. That is why this condition |
3152 | // is named with COND_E_AND_NP. |
3153 | BranchCode = X86::COND_E_AND_NP; |
3154 | } else |
3155 | return true; |
3156 | |
3157 | // Update the MachineOperand. |
3158 | Cond[0].setImm(BranchCode); |
3159 | CondBranches.push_back(&*I); |
3160 | } |
3161 | |
3162 | return false; |
3163 | } |
3164 | |
3165 | bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
3166 | MachineBasicBlock *&TBB, |
3167 | MachineBasicBlock *&FBB, |
3168 | SmallVectorImpl<MachineOperand> &Cond, |
3169 | bool AllowModify) const { |
3170 | SmallVector<MachineInstr *, 4> CondBranches; |
3171 | return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); |
3172 | } |
3173 | |
3174 | bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, |
3175 | MachineBranchPredicate &MBP, |
3176 | bool AllowModify) const { |
3177 | using namespace std::placeholders; |
3178 | |
3179 | SmallVector<MachineOperand, 4> Cond; |
3180 | SmallVector<MachineInstr *, 4> CondBranches; |
3181 | if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, |
3182 | AllowModify)) |
3183 | return true; |
3184 | |
3185 | if (Cond.size() != 1) |
3186 | return true; |
3187 | |
3188 | assert(MBP.TrueDest && "expected!")(static_cast <bool> (MBP.TrueDest && "expected!" ) ? void (0) : __assert_fail ("MBP.TrueDest && \"expected!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3188, __extension__ __PRETTY_FUNCTION__)); |
3189 | |
3190 | if (!MBP.FalseDest) |
3191 | MBP.FalseDest = MBB.getNextNode(); |
3192 | |
3193 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3194 | |
3195 | MachineInstr *ConditionDef = nullptr; |
3196 | bool SingleUseCondition = true; |
3197 | |
3198 | for (MachineInstr &MI : llvm::drop_begin(llvm::reverse(MBB))) { |
3199 | if (MI.modifiesRegister(X86::EFLAGS, TRI)) { |
3200 | ConditionDef = &MI; |
3201 | break; |
3202 | } |
3203 | |
3204 | if (MI.readsRegister(X86::EFLAGS, TRI)) |
3205 | SingleUseCondition = false; |
3206 | } |
3207 | |
3208 | if (!ConditionDef) |
3209 | return true; |
3210 | |
3211 | if (SingleUseCondition) { |
3212 | for (auto *Succ : MBB.successors()) |
3213 | if (Succ->isLiveIn(X86::EFLAGS)) |
3214 | SingleUseCondition = false; |
3215 | } |
3216 | |
3217 | MBP.ConditionDef = ConditionDef; |
3218 | MBP.SingleUseCondition = SingleUseCondition; |
3219 | |
3220 | // Currently we only recognize the simple pattern: |
3221 | // |
3222 | // test %reg, %reg |
3223 | // je %label |
3224 | // |
3225 | const unsigned TestOpcode = |
3226 | Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; |
3227 | |
3228 | if (ConditionDef->getOpcode() == TestOpcode && |
3229 | ConditionDef->getNumOperands() == 3 && |
3230 | ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && |
3231 | (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { |
3232 | MBP.LHS = ConditionDef->getOperand(0); |
3233 | MBP.RHS = MachineOperand::CreateImm(0); |
3234 | MBP.Predicate = Cond[0].getImm() == X86::COND_NE |
3235 | ? MachineBranchPredicate::PRED_NE |
3236 | : MachineBranchPredicate::PRED_EQ; |
3237 | return false; |
3238 | } |
3239 | |
3240 | return true; |
3241 | } |
3242 | |
3243 | unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB, |
3244 | int *BytesRemoved) const { |
3245 | assert(!BytesRemoved && "code size not handled")(static_cast <bool> (!BytesRemoved && "code size not handled" ) ? void (0) : __assert_fail ("!BytesRemoved && \"code size not handled\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3245, __extension__ __PRETTY_FUNCTION__)); |
3246 | |
3247 | MachineBasicBlock::iterator I = MBB.end(); |
3248 | unsigned Count = 0; |
3249 | |
3250 | while (I != MBB.begin()) { |
3251 | --I; |
3252 | if (I->isDebugInstr()) |
3253 | continue; |
3254 | if (I->getOpcode() != X86::JMP_1 && |
3255 | X86::getCondFromBranch(*I) == X86::COND_INVALID) |
3256 | break; |
3257 | // Remove the branch. |
3258 | I->eraseFromParent(); |
3259 | I = MBB.end(); |
3260 | ++Count; |
3261 | } |
3262 | |
3263 | return Count; |
3264 | } |
3265 | |
3266 | unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB, |
3267 | MachineBasicBlock *TBB, |
3268 | MachineBasicBlock *FBB, |
3269 | ArrayRef<MachineOperand> Cond, |
3270 | const DebugLoc &DL, |
3271 | int *BytesAdded) const { |
3272 | // Shouldn't be a fall through. |
3273 | assert(TBB && "insertBranch must not be told to insert a fallthrough")(static_cast <bool> (TBB && "insertBranch must not be told to insert a fallthrough" ) ? void (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3273, __extension__ __PRETTY_FUNCTION__)); |
3274 | assert((Cond.size() == 1 || Cond.size() == 0) &&(static_cast <bool> ((Cond.size() == 1 || Cond.size() == 0) && "X86 branch conditions have one component!") ? void (0) : __assert_fail ("(Cond.size() == 1 || Cond.size() == 0) && \"X86 branch conditions have one component!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3275, __extension__ __PRETTY_FUNCTION__)) |
3275 | "X86 branch conditions have one component!")(static_cast <bool> ((Cond.size() == 1 || Cond.size() == 0) && "X86 branch conditions have one component!") ? void (0) : __assert_fail ("(Cond.size() == 1 || Cond.size() == 0) && \"X86 branch conditions have one component!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3275, __extension__ __PRETTY_FUNCTION__)); |
3276 | assert(!BytesAdded && "code size not handled")(static_cast <bool> (!BytesAdded && "code size not handled" ) ? void (0) : __assert_fail ("!BytesAdded && \"code size not handled\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3276, __extension__ __PRETTY_FUNCTION__)); |
3277 | |
3278 | if (Cond.empty()) { |
3279 | // Unconditional branch? |
3280 | assert(!FBB && "Unconditional branch with multiple successors!")(static_cast <bool> (!FBB && "Unconditional branch with multiple successors!" ) ? void (0) : __assert_fail ("!FBB && \"Unconditional branch with multiple successors!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3280, __extension__ __PRETTY_FUNCTION__)); |
3281 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); |
3282 | return 1; |
3283 | } |
3284 | |
3285 | // If FBB is null, it is implied to be a fall-through block. |
3286 | bool FallThru = FBB == nullptr; |
3287 | |
3288 | // Conditional branch. |
3289 | unsigned Count = 0; |
3290 | X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); |
3291 | switch (CC) { |
3292 | case X86::COND_NE_OR_P: |
3293 | // Synthesize NE_OR_P with two branches. |
3294 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE); |
3295 | ++Count; |
3296 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P); |
3297 | ++Count; |
3298 | break; |
3299 | case X86::COND_E_AND_NP: |
3300 | // Use the next block of MBB as FBB if it is null. |
3301 | if (FBB == nullptr) { |
3302 | FBB = getFallThroughMBB(&MBB, TBB); |
3303 | assert(FBB && "MBB cannot be the last block in function when the false "(static_cast <bool> (FBB && "MBB cannot be the last block in function when the false " "body is a fall-through.") ? void (0) : __assert_fail ("FBB && \"MBB cannot be the last block in function when the false \" \"body is a fall-through.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3304, __extension__ __PRETTY_FUNCTION__)) |
3304 | "body is a fall-through.")(static_cast <bool> (FBB && "MBB cannot be the last block in function when the false " "body is a fall-through.") ? void (0) : __assert_fail ("FBB && \"MBB cannot be the last block in function when the false \" \"body is a fall-through.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3304, __extension__ __PRETTY_FUNCTION__)); |
3305 | } |
3306 | // Synthesize COND_E_AND_NP with two branches. |
3307 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE); |
3308 | ++Count; |
3309 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP); |
3310 | ++Count; |
3311 | break; |
3312 | default: { |
3313 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC); |
3314 | ++Count; |
3315 | } |
3316 | } |
3317 | if (!FallThru) { |
3318 | // Two-way Conditional branch. Insert the second branch. |
3319 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); |
3320 | ++Count; |
3321 | } |
3322 | return Count; |
3323 | } |
3324 | |
3325 | bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
3326 | ArrayRef<MachineOperand> Cond, |
3327 | Register DstReg, Register TrueReg, |
3328 | Register FalseReg, int &CondCycles, |
3329 | int &TrueCycles, int &FalseCycles) const { |
3330 | // Not all subtargets have cmov instructions. |
3331 | if (!Subtarget.canUseCMOV()) |
3332 | return false; |
3333 | if (Cond.size() != 1) |
3334 | return false; |
3335 | // We cannot do the composite conditions, at least not in SSA form. |
3336 | if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND) |
3337 | return false; |
3338 | |
3339 | // Check register classes. |
3340 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
3341 | const TargetRegisterClass *RC = |
3342 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
3343 | if (!RC) |
3344 | return false; |
3345 | |
3346 | // We have cmov instructions for 16, 32, and 64 bit general purpose registers. |
3347 | if (X86::GR16RegClass.hasSubClassEq(RC) || |
3348 | X86::GR32RegClass.hasSubClassEq(RC) || |
3349 | X86::GR64RegClass.hasSubClassEq(RC)) { |
3350 | // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy |
3351 | // Bridge. Probably Ivy Bridge as well. |
3352 | CondCycles = 2; |
3353 | TrueCycles = 2; |
3354 | FalseCycles = 2; |
3355 | return true; |
3356 | } |
3357 | |
3358 | // Can't do vectors. |
3359 | return false; |
3360 | } |
3361 | |
3362 | void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, |
3363 | MachineBasicBlock::iterator I, |
3364 | const DebugLoc &DL, Register DstReg, |
3365 | ArrayRef<MachineOperand> Cond, Register TrueReg, |
3366 | Register FalseReg) const { |
3367 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
3368 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
3369 | const TargetRegisterClass &RC = *MRI.getRegClass(DstReg); |
3370 | assert(Cond.size() == 1 && "Invalid Cond array")(static_cast <bool> (Cond.size() == 1 && "Invalid Cond array" ) ? void (0) : __assert_fail ("Cond.size() == 1 && \"Invalid Cond array\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3370, __extension__ __PRETTY_FUNCTION__)); |
3371 | unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8, |
3372 | false /*HasMemoryOperand*/); |
3373 | BuildMI(MBB, I, DL, get(Opc), DstReg) |
3374 | .addReg(FalseReg) |
3375 | .addReg(TrueReg) |
3376 | .addImm(Cond[0].getImm()); |
3377 | } |
3378 | |
3379 | /// Test if the given register is a physical h register. |
3380 | static bool isHReg(unsigned Reg) { |
3381 | return X86::GR8_ABCD_HRegClass.contains(Reg); |
3382 | } |
3383 | |
3384 | // Try and copy between VR128/VR64 and GR64 registers. |
3385 | static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, |
3386 | const X86Subtarget &Subtarget) { |
3387 | bool HasAVX = Subtarget.hasAVX(); |
3388 | bool HasAVX512 = Subtarget.hasAVX512(); |
3389 | |
3390 | // SrcReg(MaskReg) -> DestReg(GR64) |
3391 | // SrcReg(MaskReg) -> DestReg(GR32) |
3392 | |
3393 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
3394 | if (X86::VK16RegClass.contains(SrcReg)) { |
3395 | if (X86::GR64RegClass.contains(DestReg)) { |
3396 | assert(Subtarget.hasBWI())(static_cast <bool> (Subtarget.hasBWI()) ? void (0) : __assert_fail ("Subtarget.hasBWI()", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 3396, __extension__ __PRETTY_FUNCTION__)); |
3397 | return X86::KMOVQrk; |
3398 | } |
3399 | if (X86::GR32RegClass.contains(DestReg)) |
3400 | return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk; |
3401 | } |
3402 | |
3403 | // SrcReg(GR64) -> DestReg(MaskReg) |
3404 | // SrcReg(GR32) -> DestReg(MaskReg) |
3405 | |
3406 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
3407 | if (X86::VK16RegClass.contains(DestReg)) { |
3408 | if (X86::GR64RegClass.contains(SrcReg)) { |
3409 | assert(Subtarget.hasBWI())(static_cast <bool> (Subtarget.hasBWI()) ? void (0) : __assert_fail ("Subtarget.hasBWI()", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 3409, __extension__ __PRETTY_FUNCTION__)); |
3410 | return X86::KMOVQkr; |
3411 | } |
3412 | if (X86::GR32RegClass.contains(SrcReg)) |
3413 | return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr; |
3414 | } |
3415 | |
3416 | |
3417 | // SrcReg(VR128) -> DestReg(GR64) |
3418 | // SrcReg(VR64) -> DestReg(GR64) |
3419 | // SrcReg(GR64) -> DestReg(VR128) |
3420 | // SrcReg(GR64) -> DestReg(VR64) |
3421 | |
3422 | if (X86::GR64RegClass.contains(DestReg)) { |
3423 | if (X86::VR128XRegClass.contains(SrcReg)) |
3424 | // Copy from a VR128 register to a GR64 register. |
3425 | return HasAVX512 ? X86::VMOVPQIto64Zrr : |
3426 | HasAVX ? X86::VMOVPQIto64rr : |
3427 | X86::MOVPQIto64rr; |
3428 | if (X86::VR64RegClass.contains(SrcReg)) |
3429 | // Copy from a VR64 register to a GR64 register. |
3430 | return X86::MMX_MOVD64from64rr; |
3431 | } else if (X86::GR64RegClass.contains(SrcReg)) { |
3432 | // Copy from a GR64 register to a VR128 register. |
3433 | if (X86::VR128XRegClass.contains(DestReg)) |
3434 | return HasAVX512 ? X86::VMOV64toPQIZrr : |
3435 | HasAVX ? X86::VMOV64toPQIrr : |
3436 | X86::MOV64toPQIrr; |
3437 | // Copy from a GR64 register to a VR64 register. |
3438 | if (X86::VR64RegClass.contains(DestReg)) |
3439 | return X86::MMX_MOVD64to64rr; |
3440 | } |
3441 | |
3442 | // SrcReg(VR128) -> DestReg(GR32) |
3443 | // SrcReg(GR32) -> DestReg(VR128) |
3444 | |
3445 | if (X86::GR32RegClass.contains(DestReg) && |
3446 | X86::VR128XRegClass.contains(SrcReg)) |
3447 | // Copy from a VR128 register to a GR32 register. |
3448 | return HasAVX512 ? X86::VMOVPDI2DIZrr : |
3449 | HasAVX ? X86::VMOVPDI2DIrr : |
3450 | X86::MOVPDI2DIrr; |
3451 | |
3452 | if (X86::VR128XRegClass.contains(DestReg) && |
3453 | X86::GR32RegClass.contains(SrcReg)) |
3454 | // Copy from a VR128 register to a VR128 register. |
3455 | return HasAVX512 ? X86::VMOVDI2PDIZrr : |
3456 | HasAVX ? X86::VMOVDI2PDIrr : |
3457 | X86::MOVDI2PDIrr; |
3458 | return 0; |
3459 | } |
3460 | |
3461 | void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
3462 | MachineBasicBlock::iterator MI, |
3463 | const DebugLoc &DL, MCRegister DestReg, |
3464 | MCRegister SrcReg, bool KillSrc) const { |
3465 | // First deal with the normal symmetric copies. |
3466 | bool HasAVX = Subtarget.hasAVX(); |
3467 | bool HasVLX = Subtarget.hasVLX(); |
3468 | unsigned Opc = 0; |
3469 | if (X86::GR64RegClass.contains(DestReg, SrcReg)) |
3470 | Opc = X86::MOV64rr; |
3471 | else if (X86::GR32RegClass.contains(DestReg, SrcReg)) |
3472 | Opc = X86::MOV32rr; |
3473 | else if (X86::GR16RegClass.contains(DestReg, SrcReg)) |
3474 | Opc = X86::MOV16rr; |
3475 | else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { |
3476 | // Copying to or from a physical H register on x86-64 requires a NOREX |
3477 | // move. Otherwise use a normal move. |
3478 | if ((isHReg(DestReg) || isHReg(SrcReg)) && |
3479 | Subtarget.is64Bit()) { |
3480 | Opc = X86::MOV8rr_NOREX; |
3481 | // Both operands must be encodable without an REX prefix. |
3482 | assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&(static_cast <bool> (X86::GR8_NOREXRegClass.contains(SrcReg , DestReg) && "8-bit H register can not be copied outside GR8_NOREX" ) ? void (0) : __assert_fail ("X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && \"8-bit H register can not be copied outside GR8_NOREX\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3483, __extension__ __PRETTY_FUNCTION__)) |
3483 | "8-bit H register can not be copied outside GR8_NOREX")(static_cast <bool> (X86::GR8_NOREXRegClass.contains(SrcReg , DestReg) && "8-bit H register can not be copied outside GR8_NOREX" ) ? void (0) : __assert_fail ("X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && \"8-bit H register can not be copied outside GR8_NOREX\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3483, __extension__ __PRETTY_FUNCTION__)); |
3484 | } else |
3485 | Opc = X86::MOV8rr; |
3486 | } |
3487 | else if (X86::VR64RegClass.contains(DestReg, SrcReg)) |
3488 | Opc = X86::MMX_MOVQ64rr; |
3489 | else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) { |
3490 | if (HasVLX) |
3491 | Opc = X86::VMOVAPSZ128rr; |
3492 | else if (X86::VR128RegClass.contains(DestReg, SrcReg)) |
3493 | Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; |
3494 | else { |
3495 | // If this an extended register and we don't have VLX we need to use a |
3496 | // 512-bit move. |
3497 | Opc = X86::VMOVAPSZrr; |
3498 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3499 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, |
3500 | &X86::VR512RegClass); |
3501 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, |
3502 | &X86::VR512RegClass); |
3503 | } |
3504 | } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) { |
3505 | if (HasVLX) |
3506 | Opc = X86::VMOVAPSZ256rr; |
3507 | else if (X86::VR256RegClass.contains(DestReg, SrcReg)) |
3508 | Opc = X86::VMOVAPSYrr; |
3509 | else { |
3510 | // If this an extended register and we don't have VLX we need to use a |
3511 | // 512-bit move. |
3512 | Opc = X86::VMOVAPSZrr; |
3513 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3514 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, |
3515 | &X86::VR512RegClass); |
3516 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, |
3517 | &X86::VR512RegClass); |
3518 | } |
3519 | } else if (X86::VR512RegClass.contains(DestReg, SrcReg)) |
3520 | Opc = X86::VMOVAPSZrr; |
3521 | // All KMASK RegClasses hold the same k registers, can be tested against anyone. |
3522 | else if (X86::VK16RegClass.contains(DestReg, SrcReg)) |
3523 | Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk; |
3524 | if (!Opc) |
3525 | Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); |
3526 | |
3527 | if (Opc) { |
3528 | BuildMI(MBB, MI, DL, get(Opc), DestReg) |
3529 | .addReg(SrcReg, getKillRegState(KillSrc)); |
3530 | return; |
3531 | } |
3532 | |
3533 | if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) { |
3534 | // FIXME: We use a fatal error here because historically LLVM has tried |
3535 | // lower some of these physreg copies and we want to ensure we get |
3536 | // reasonable bug reports if someone encounters a case no other testing |
3537 | // found. This path should be removed after the LLVM 7 release. |
3538 | report_fatal_error("Unable to copy EFLAGS physical register!"); |
3539 | } |
3540 | |
3541 | LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " << RI.getName(DestReg ) << '\n'; } } while (false) |
3542 | << RI.getName(DestReg) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " << RI.getName(DestReg ) << '\n'; } } while (false); |
3543 | report_fatal_error("Cannot emit physreg copy instruction"); |
3544 | } |
3545 | |
3546 | Optional<DestSourcePair> |
3547 | X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { |
3548 | if (MI.isMoveReg()) |
3549 | return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; |
3550 | return None; |
3551 | } |
3552 | |
3553 | static unsigned getLoadStoreRegOpcode(Register Reg, |
3554 | const TargetRegisterClass *RC, |
3555 | bool IsStackAligned, |
3556 | const X86Subtarget &STI, bool load) { |
3557 | bool HasAVX = STI.hasAVX(); |
3558 | bool HasAVX512 = STI.hasAVX512(); |
3559 | bool HasVLX = STI.hasVLX(); |
3560 | |
3561 | switch (STI.getRegisterInfo()->getSpillSize(*RC)) { |
3562 | default: |
3563 | llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 3563); |
3564 | case 1: |
3565 | assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass")(static_cast <bool> (X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass") ? void (0) : __assert_fail ("X86::GR8RegClass.hasSubClassEq(RC) && \"Unknown 1-byte regclass\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3565, __extension__ __PRETTY_FUNCTION__)); |
3566 | if (STI.is64Bit()) |
3567 | // Copying to or from a physical H register on x86-64 requires a NOREX |
3568 | // move. Otherwise use a normal move. |
3569 | if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) |
3570 | return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; |
3571 | return load ? X86::MOV8rm : X86::MOV8mr; |
3572 | case 2: |
3573 | if (X86::VK16RegClass.hasSubClassEq(RC)) |
3574 | return load ? X86::KMOVWkm : X86::KMOVWmk; |
3575 | assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass")(static_cast <bool> (X86::GR16RegClass.hasSubClassEq(RC ) && "Unknown 2-byte regclass") ? void (0) : __assert_fail ("X86::GR16RegClass.hasSubClassEq(RC) && \"Unknown 2-byte regclass\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3575, __extension__ __PRETTY_FUNCTION__)); |
3576 | return load ? X86::MOV16rm : X86::MOV16mr; |
3577 | case 4: |
3578 | if (X86::GR32RegClass.hasSubClassEq(RC)) |
3579 | return load ? X86::MOV32rm : X86::MOV32mr; |
3580 | if (X86::FR32XRegClass.hasSubClassEq(RC)) |
3581 | return load ? |
3582 | (HasAVX512 ? X86::VMOVSSZrm_alt : |
3583 | HasAVX ? X86::VMOVSSrm_alt : |
3584 | X86::MOVSSrm_alt) : |
3585 | (HasAVX512 ? X86::VMOVSSZmr : |
3586 | HasAVX ? X86::VMOVSSmr : |
3587 | X86::MOVSSmr); |
3588 | if (X86::RFP32RegClass.hasSubClassEq(RC)) |
3589 | return load ? X86::LD_Fp32m : X86::ST_Fp32m; |
3590 | if (X86::VK32RegClass.hasSubClassEq(RC)) { |
3591 | assert(STI.hasBWI() && "KMOVD requires BWI")(static_cast <bool> (STI.hasBWI() && "KMOVD requires BWI" ) ? void (0) : __assert_fail ("STI.hasBWI() && \"KMOVD requires BWI\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3591, __extension__ __PRETTY_FUNCTION__)); |
3592 | return load ? X86::KMOVDkm : X86::KMOVDmk; |
3593 | } |
3594 | // All of these mask pair classes have the same spill size, the same kind |
3595 | // of kmov instructions can be used with all of them. |
3596 | if (X86::VK1PAIRRegClass.hasSubClassEq(RC) || |
3597 | X86::VK2PAIRRegClass.hasSubClassEq(RC) || |
3598 | X86::VK4PAIRRegClass.hasSubClassEq(RC) || |
3599 | X86::VK8PAIRRegClass.hasSubClassEq(RC) || |
3600 | X86::VK16PAIRRegClass.hasSubClassEq(RC)) |
3601 | return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE; |
3602 | if ((X86::FR16RegClass.hasSubClassEq(RC) || |
3603 | X86::FR16XRegClass.hasSubClassEq(RC)) && |
3604 | STI.hasFP16()) |
3605 | return load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr; |
3606 | llvm_unreachable("Unknown 4-byte regclass")::llvm::llvm_unreachable_internal("Unknown 4-byte regclass", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 3606); |
3607 | case 8: |
3608 | if (X86::GR64RegClass.hasSubClassEq(RC)) |
3609 | return load ? X86::MOV64rm : X86::MOV64mr; |
3610 | if (X86::FR64XRegClass.hasSubClassEq(RC)) |
3611 | return load ? |
3612 | (HasAVX512 ? X86::VMOVSDZrm_alt : |
3613 | HasAVX ? X86::VMOVSDrm_alt : |
3614 | X86::MOVSDrm_alt) : |
3615 | (HasAVX512 ? X86::VMOVSDZmr : |
3616 | HasAVX ? X86::VMOVSDmr : |
3617 | X86::MOVSDmr); |
3618 | if (X86::VR64RegClass.hasSubClassEq(RC)) |
3619 | return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; |
3620 | if (X86::RFP64RegClass.hasSubClassEq(RC)) |
3621 | return load ? X86::LD_Fp64m : X86::ST_Fp64m; |
3622 | if (X86::VK64RegClass.hasSubClassEq(RC)) { |
3623 | assert(STI.hasBWI() && "KMOVQ requires BWI")(static_cast <bool> (STI.hasBWI() && "KMOVQ requires BWI" ) ? void (0) : __assert_fail ("STI.hasBWI() && \"KMOVQ requires BWI\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3623, __extension__ __PRETTY_FUNCTION__)); |
3624 | return load ? X86::KMOVQkm : X86::KMOVQmk; |
3625 | } |
3626 | llvm_unreachable("Unknown 8-byte regclass")::llvm::llvm_unreachable_internal("Unknown 8-byte regclass", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 3626); |
3627 | case 10: |
3628 | assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass")(static_cast <bool> (X86::RFP80RegClass.hasSubClassEq(RC ) && "Unknown 10-byte regclass") ? void (0) : __assert_fail ("X86::RFP80RegClass.hasSubClassEq(RC) && \"Unknown 10-byte regclass\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3628, __extension__ __PRETTY_FUNCTION__)); |
3629 | return load ? X86::LD_Fp80m : X86::ST_FpP80m; |
3630 | case 16: { |
3631 | if (X86::VR128XRegClass.hasSubClassEq(RC)) { |
3632 | // If stack is realigned we can use aligned stores. |
3633 | if (IsStackAligned) |
3634 | return load ? |
3635 | (HasVLX ? X86::VMOVAPSZ128rm : |
3636 | HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : |
3637 | HasAVX ? X86::VMOVAPSrm : |
3638 | X86::MOVAPSrm): |
3639 | (HasVLX ? X86::VMOVAPSZ128mr : |
3640 | HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX : |
3641 | HasAVX ? X86::VMOVAPSmr : |
3642 | X86::MOVAPSmr); |
3643 | else |
3644 | return load ? |
3645 | (HasVLX ? X86::VMOVUPSZ128rm : |
3646 | HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX : |
3647 | HasAVX ? X86::VMOVUPSrm : |
3648 | X86::MOVUPSrm): |
3649 | (HasVLX ? X86::VMOVUPSZ128mr : |
3650 | HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX : |
3651 | HasAVX ? X86::VMOVUPSmr : |
3652 | X86::MOVUPSmr); |
3653 | } |
3654 | llvm_unreachable("Unknown 16-byte regclass")::llvm::llvm_unreachable_internal("Unknown 16-byte regclass", "llvm/lib/Target/X86/X86InstrInfo.cpp", 3654); |
3655 | } |
3656 | case 32: |
3657 | assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass")(static_cast <bool> (X86::VR256XRegClass.hasSubClassEq( RC) && "Unknown 32-byte regclass") ? void (0) : __assert_fail ("X86::VR256XRegClass.hasSubClassEq(RC) && \"Unknown 32-byte regclass\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3657, __extension__ __PRETTY_FUNCTION__)); |
3658 | // If stack is realigned we can use aligned stores. |
3659 | if (IsStackAligned) |
3660 | return load ? |
3661 | (HasVLX ? X86::VMOVAPSZ256rm : |
3662 | HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : |
3663 | X86::VMOVAPSYrm) : |
3664 | (HasVLX ? X86::VMOVAPSZ256mr : |
3665 | HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX : |
3666 | X86::VMOVAPSYmr); |
3667 | else |
3668 | return load ? |
3669 | (HasVLX ? X86::VMOVUPSZ256rm : |
3670 | HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX : |
3671 | X86::VMOVUPSYrm) : |
3672 | (HasVLX ? X86::VMOVUPSZ256mr : |
3673 | HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : |
3674 | X86::VMOVUPSYmr); |
3675 | case 64: |
3676 | assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass")(static_cast <bool> (X86::VR512RegClass.hasSubClassEq(RC ) && "Unknown 64-byte regclass") ? void (0) : __assert_fail ("X86::VR512RegClass.hasSubClassEq(RC) && \"Unknown 64-byte regclass\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3676, __extension__ __PRETTY_FUNCTION__)); |
3677 | assert(STI.hasAVX512() && "Using 512-bit register requires AVX512")(static_cast <bool> (STI.hasAVX512() && "Using 512-bit register requires AVX512" ) ? void (0) : __assert_fail ("STI.hasAVX512() && \"Using 512-bit register requires AVX512\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3677, __extension__ __PRETTY_FUNCTION__)); |
3678 | if (IsStackAligned) |
3679 | return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; |
3680 | else |
3681 | return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; |
3682 | } |
3683 | } |
3684 | |
3685 | Optional<ExtAddrMode> |
3686 | X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, |
3687 | const TargetRegisterInfo *TRI) const { |
3688 | const MCInstrDesc &Desc = MemI.getDesc(); |
3689 | int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); |
3690 | if (MemRefBegin < 0) |
3691 | return None; |
3692 | |
3693 | MemRefBegin += X86II::getOperandBias(Desc); |
3694 | |
3695 | auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg); |
3696 | if (!BaseOp.isReg()) // Can be an MO_FrameIndex |
3697 | return None; |
3698 | |
3699 | const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp); |
3700 | // Displacement can be symbolic |
3701 | if (!DispMO.isImm()) |
3702 | return None; |
3703 | |
3704 | ExtAddrMode AM; |
3705 | AM.BaseReg = BaseOp.getReg(); |
3706 | AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg(); |
3707 | AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm(); |
3708 | AM.Displacement = DispMO.getImm(); |
3709 | return AM; |
3710 | } |
3711 | |
3712 | bool X86InstrInfo::verifyInstruction(const MachineInstr &MI, |
3713 | StringRef &ErrInfo) const { |
3714 | Optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr); |
3715 | if (!AMOrNone) |
3716 | return true; |
3717 | |
3718 | ExtAddrMode AM = *AMOrNone; |
3719 | |
3720 | if (AM.ScaledReg != X86::NoRegister) { |
3721 | switch (AM.Scale) { |
3722 | case 1: |
3723 | case 2: |
3724 | case 4: |
3725 | case 8: |
3726 | break; |
3727 | default: |
3728 | ErrInfo = "Scale factor in address must be 1, 2, 4 or 8"; |
3729 | return false; |
3730 | } |
3731 | } |
3732 | if (!isInt<32>(AM.Displacement)) { |
3733 | ErrInfo = "Displacement in address must fit into 32-bit signed " |
3734 | "integer"; |
3735 | return false; |
3736 | } |
3737 | |
3738 | return true; |
3739 | } |
3740 | |
3741 | bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI, |
3742 | const Register Reg, |
3743 | int64_t &ImmVal) const { |
3744 | if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri) |
3745 | return false; |
3746 | // Mov Src can be a global address. |
3747 | if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg) |
3748 | return false; |
3749 | ImmVal = MI.getOperand(1).getImm(); |
3750 | return true; |
3751 | } |
3752 | |
3753 | bool X86InstrInfo::preservesZeroValueInReg( |
3754 | const MachineInstr *MI, const Register NullValueReg, |
3755 | const TargetRegisterInfo *TRI) const { |
3756 | if (!MI->modifiesRegister(NullValueReg, TRI)) |
3757 | return true; |
3758 | switch (MI->getOpcode()) { |
3759 | // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax |
3760 | // X. |
3761 | case X86::SHR64ri: |
3762 | case X86::SHR32ri: |
3763 | case X86::SHL64ri: |
3764 | case X86::SHL32ri: |
3765 | assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&(static_cast <bool> (MI->getOperand(0).isDef() && MI->getOperand(1).isUse() && "expected for shift opcode!" ) ? void (0) : __assert_fail ("MI->getOperand(0).isDef() && MI->getOperand(1).isUse() && \"expected for shift opcode!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3766, __extension__ __PRETTY_FUNCTION__)) |
3766 | "expected for shift opcode!")(static_cast <bool> (MI->getOperand(0).isDef() && MI->getOperand(1).isUse() && "expected for shift opcode!" ) ? void (0) : __assert_fail ("MI->getOperand(0).isDef() && MI->getOperand(1).isUse() && \"expected for shift opcode!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3766, __extension__ __PRETTY_FUNCTION__)); |
3767 | return MI->getOperand(0).getReg() == NullValueReg && |
3768 | MI->getOperand(1).getReg() == NullValueReg; |
3769 | // Zero extend of a sub-reg of NullValueReg into itself does not change the |
3770 | // null value. |
3771 | case X86::MOV32rr: |
3772 | return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) { |
3773 | return TRI->isSubRegisterEq(NullValueReg, MO.getReg()); |
3774 | }); |
3775 | default: |
3776 | return false; |
3777 | } |
3778 | llvm_unreachable("Should be handled above!")::llvm::llvm_unreachable_internal("Should be handled above!", "llvm/lib/Target/X86/X86InstrInfo.cpp", 3778); |
3779 | } |
3780 | |
3781 | bool X86InstrInfo::getMemOperandsWithOffsetWidth( |
3782 | const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps, |
3783 | int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, |
3784 | const TargetRegisterInfo *TRI) const { |
3785 | const MCInstrDesc &Desc = MemOp.getDesc(); |
3786 | int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); |
3787 | if (MemRefBegin < 0) |
3788 | return false; |
3789 | |
3790 | MemRefBegin += X86II::getOperandBias(Desc); |
3791 | |
3792 | const MachineOperand *BaseOp = |
3793 | &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); |
3794 | if (!BaseOp->isReg()) // Can be an MO_FrameIndex |
3795 | return false; |
3796 | |
3797 | if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) |
3798 | return false; |
3799 | |
3800 | if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != |
3801 | X86::NoRegister) |
3802 | return false; |
3803 | |
3804 | const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp); |
3805 | |
3806 | // Displacement can be symbolic |
3807 | if (!DispMO.isImm()) |
3808 | return false; |
3809 | |
3810 | Offset = DispMO.getImm(); |
3811 | |
3812 | if (!BaseOp->isReg()) |
3813 | return false; |
3814 | |
3815 | OffsetIsScalable = false; |
3816 | // FIXME: Relying on memoperands() may not be right thing to do here. Check |
3817 | // with X86 maintainers, and fix it accordingly. For now, it is ok, since |
3818 | // there is no use of `Width` for X86 back-end at the moment. |
3819 | Width = |
3820 | !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0; |
3821 | BaseOps.push_back(BaseOp); |
3822 | return true; |
3823 | } |
3824 | |
3825 | static unsigned getStoreRegOpcode(Register SrcReg, |
3826 | const TargetRegisterClass *RC, |
3827 | bool IsStackAligned, |
3828 | const X86Subtarget &STI) { |
3829 | return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false); |
3830 | } |
3831 | |
3832 | static unsigned getLoadRegOpcode(Register DestReg, |
3833 | const TargetRegisterClass *RC, |
3834 | bool IsStackAligned, const X86Subtarget &STI) { |
3835 | return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true); |
3836 | } |
3837 | |
3838 | void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
3839 | MachineBasicBlock::iterator MI, |
3840 | Register SrcReg, bool isKill, int FrameIdx, |
3841 | const TargetRegisterClass *RC, |
3842 | const TargetRegisterInfo *TRI) const { |
3843 | const MachineFunction &MF = *MBB.getParent(); |
3844 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
3845 | MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); |
3846 | assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&(static_cast <bool> (MFI.getObjectSize(FrameIdx) >= TRI ->getSpillSize(*RC) && "Stack slot too small for store" ) ? void (0) : __assert_fail ("MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Stack slot too small for store\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3847, __extension__ __PRETTY_FUNCTION__)) |
3847 | "Stack slot too small for store")(static_cast <bool> (MFI.getObjectSize(FrameIdx) >= TRI ->getSpillSize(*RC) && "Stack slot too small for store" ) ? void (0) : __assert_fail ("MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Stack slot too small for store\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3847, __extension__ __PRETTY_FUNCTION__)); |
3848 | if (RC->getID() == X86::TILERegClassID) { |
3849 | unsigned Opc = X86::TILESTORED; |
3850 | // tilestored %tmm, (%sp, %idx) |
3851 | Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
3852 | BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64); |
3853 | MachineInstr *NewMI = |
3854 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3855 | .addReg(SrcReg, getKillRegState(isKill)); |
3856 | MachineOperand &MO = NewMI->getOperand(2); |
3857 | MO.setReg(VirtReg); |
3858 | MO.setIsKill(true); |
3859 | } else if ((RC->getID() == X86::FR16RegClassID || |
3860 | RC->getID() == X86::FR16XRegClassID) && |
3861 | !Subtarget.hasFP16()) { |
3862 | unsigned Opc = Subtarget.hasAVX512() ? X86::VMOVSSZmr |
3863 | : Subtarget.hasAVX() ? X86::VMOVSSmr |
3864 | : X86::MOVSSmr; |
3865 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3866 | .addReg(SrcReg, getKillRegState(isKill)); |
3867 | } else { |
3868 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3869 | bool isAligned = |
3870 | (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || |
3871 | (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx)); |
3872 | unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); |
3873 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3874 | .addReg(SrcReg, getKillRegState(isKill)); |
3875 | } |
3876 | } |
3877 | |
3878 | void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
3879 | MachineBasicBlock::iterator MI, |
3880 | Register DestReg, int FrameIdx, |
3881 | const TargetRegisterClass *RC, |
3882 | const TargetRegisterInfo *TRI) const { |
3883 | const MachineFunction &MF = *MBB.getParent(); |
3884 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
3885 | assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&(static_cast <bool> (MFI.getObjectSize(FrameIdx) >= TRI ->getSpillSize(*RC) && "Load size exceeds stack slot" ) ? void (0) : __assert_fail ("MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Load size exceeds stack slot\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3886, __extension__ __PRETTY_FUNCTION__)) |
3886 | "Load size exceeds stack slot")(static_cast <bool> (MFI.getObjectSize(FrameIdx) >= TRI ->getSpillSize(*RC) && "Load size exceeds stack slot" ) ? void (0) : __assert_fail ("MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && \"Load size exceeds stack slot\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 3886, __extension__ __PRETTY_FUNCTION__)); |
3887 | if (RC->getID() == X86::TILERegClassID) { |
3888 | unsigned Opc = X86::TILELOADD; |
3889 | // tileloadd (%sp, %idx), %tmm |
3890 | MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); |
3891 | Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
3892 | MachineInstr *NewMI = |
Value stored to 'NewMI' during its initialization is never read | |
3893 | BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64); |
3894 | NewMI = addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), |
3895 | FrameIdx); |
3896 | MachineOperand &MO = NewMI->getOperand(3); |
3897 | MO.setReg(VirtReg); |
3898 | MO.setIsKill(true); |
3899 | } else if ((RC->getID() == X86::FR16RegClassID || |
3900 | RC->getID() == X86::FR16XRegClassID) && |
3901 | !Subtarget.hasFP16()) { |
3902 | unsigned Opc = Subtarget.hasAVX512() ? X86::VMOVSSZrm |
3903 | : Subtarget.hasAVX() ? X86::VMOVSSrm |
3904 | : X86::MOVSSrm; |
3905 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), |
3906 | FrameIdx); |
3907 | } else { |
3908 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3909 | bool isAligned = |
3910 | (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || |
3911 | (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx)); |
3912 | unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); |
3913 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), |
3914 | FrameIdx); |
3915 | } |
3916 | } |
3917 | |
3918 | bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
3919 | Register &SrcReg2, int64_t &CmpMask, |
3920 | int64_t &CmpValue) const { |
3921 | switch (MI.getOpcode()) { |
3922 | default: break; |
3923 | case X86::CMP64ri32: |
3924 | case X86::CMP64ri8: |
3925 | case X86::CMP32ri: |
3926 | case X86::CMP32ri8: |
3927 | case X86::CMP16ri: |
3928 | case X86::CMP16ri8: |
3929 | case X86::CMP8ri: |
3930 | SrcReg = MI.getOperand(0).getReg(); |
3931 | SrcReg2 = 0; |
3932 | if (MI.getOperand(1).isImm()) { |
3933 | CmpMask = ~0; |
3934 | CmpValue = MI.getOperand(1).getImm(); |
3935 | } else { |
3936 | CmpMask = CmpValue = 0; |
3937 | } |
3938 | return true; |
3939 | // A SUB can be used to perform comparison. |
3940 | case X86::SUB64rm: |
3941 | case X86::SUB32rm: |
3942 | case X86::SUB16rm: |
3943 | case X86::SUB8rm: |
3944 | SrcReg = MI.getOperand(1).getReg(); |
3945 | SrcReg2 = 0; |
3946 | CmpMask = 0; |
3947 | CmpValue = 0; |
3948 | return true; |
3949 | case X86::SUB64rr: |
3950 | case X86::SUB32rr: |
3951 | case X86::SUB16rr: |
3952 | case X86::SUB8rr: |
3953 | SrcReg = MI.getOperand(1).getReg(); |
3954 | SrcReg2 = MI.getOperand(2).getReg(); |
3955 | CmpMask = 0; |
3956 | CmpValue = 0; |
3957 | return true; |
3958 | case X86::SUB64ri32: |
3959 | case X86::SUB64ri8: |
3960 | case X86::SUB32ri: |
3961 | case X86::SUB32ri8: |
3962 | case X86::SUB16ri: |
3963 | case X86::SUB16ri8: |
3964 | case X86::SUB8ri: |
3965 | SrcReg = MI.getOperand(1).getReg(); |
3966 | SrcReg2 = 0; |
3967 | if (MI.getOperand(2).isImm()) { |
3968 | CmpMask = ~0; |
3969 | CmpValue = MI.getOperand(2).getImm(); |
3970 | } else { |
3971 | CmpMask = CmpValue = 0; |
3972 | } |
3973 | return true; |
3974 | case X86::CMP64rr: |
3975 | case X86::CMP32rr: |
3976 | case X86::CMP16rr: |
3977 | case X86::CMP8rr: |
3978 | SrcReg = MI.getOperand(0).getReg(); |
3979 | SrcReg2 = MI.getOperand(1).getReg(); |
3980 | CmpMask = 0; |
3981 | CmpValue = 0; |
3982 | return true; |
3983 | case X86::TEST8rr: |
3984 | case X86::TEST16rr: |
3985 | case X86::TEST32rr: |
3986 | case X86::TEST64rr: |
3987 | SrcReg = MI.getOperand(0).getReg(); |
3988 | if (MI.getOperand(1).getReg() != SrcReg) |
3989 | return false; |
3990 | // Compare against zero. |
3991 | SrcReg2 = 0; |
3992 | CmpMask = ~0; |
3993 | CmpValue = 0; |
3994 | return true; |
3995 | } |
3996 | return false; |
3997 | } |
3998 | |
3999 | bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI, |
4000 | Register SrcReg, Register SrcReg2, |
4001 | int64_t ImmMask, int64_t ImmValue, |
4002 | const MachineInstr &OI, bool *IsSwapped, |
4003 | int64_t *ImmDelta) const { |
4004 | switch (OI.getOpcode()) { |
4005 | case X86::CMP64rr: |
4006 | case X86::CMP32rr: |
4007 | case X86::CMP16rr: |
4008 | case X86::CMP8rr: |
4009 | case X86::SUB64rr: |
4010 | case X86::SUB32rr: |
4011 | case X86::SUB16rr: |
4012 | case X86::SUB8rr: { |
4013 | Register OISrcReg; |
4014 | Register OISrcReg2; |
4015 | int64_t OIMask; |
4016 | int64_t OIValue; |
4017 | if (!analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) || |
4018 | OIMask != ImmMask || OIValue != ImmValue) |
4019 | return false; |
4020 | if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) { |
4021 | *IsSwapped = false; |
4022 | return true; |
4023 | } |
4024 | if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) { |
4025 | *IsSwapped = true; |
4026 | return true; |
4027 | } |
4028 | return false; |
4029 | } |
4030 | case X86::CMP64ri32: |
4031 | case X86::CMP64ri8: |
4032 | case X86::CMP32ri: |
4033 | case X86::CMP32ri8: |
4034 | case X86::CMP16ri: |
4035 | case X86::CMP16ri8: |
4036 | case X86::CMP8ri: |
4037 | case X86::SUB64ri32: |
4038 | case X86::SUB64ri8: |
4039 | case X86::SUB32ri: |
4040 | case X86::SUB32ri8: |
4041 | case X86::SUB16ri: |
4042 | case X86::SUB16ri8: |
4043 | case X86::SUB8ri: |
4044 | case X86::TEST64rr: |
4045 | case X86::TEST32rr: |
4046 | case X86::TEST16rr: |
4047 | case X86::TEST8rr: { |
4048 | if (ImmMask != 0) { |
4049 | Register OISrcReg; |
4050 | Register OISrcReg2; |
4051 | int64_t OIMask; |
4052 | int64_t OIValue; |
4053 | if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) && |
4054 | SrcReg == OISrcReg && ImmMask == OIMask) { |
4055 | if (OIValue == ImmValue) { |
4056 | *ImmDelta = 0; |
4057 | return true; |
4058 | } else if (static_cast<uint64_t>(ImmValue) == |
4059 | static_cast<uint64_t>(OIValue) - 1) { |
4060 | *ImmDelta = -1; |
4061 | return true; |
4062 | } else if (static_cast<uint64_t>(ImmValue) == |
4063 | static_cast<uint64_t>(OIValue) + 1) { |
4064 | *ImmDelta = 1; |
4065 | return true; |
4066 | } else { |
4067 | return false; |
4068 | } |
4069 | } |
4070 | } |
4071 | return FlagI.isIdenticalTo(OI); |
4072 | } |
4073 | default: |
4074 | return false; |
4075 | } |
4076 | } |
4077 | |
4078 | /// Check whether the definition can be converted |
4079 | /// to remove a comparison against zero. |
4080 | inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, |
4081 | bool &ClearsOverflowFlag) { |
4082 | NoSignFlag = false; |
4083 | ClearsOverflowFlag = false; |
4084 | |
4085 | // "ELF Handling for Thread-Local Storage" specifies that x86-64 GOTTPOFF, and |
4086 | // i386 GOTNTPOFF/INDNTPOFF relocations can convert an ADD to a LEA during |
4087 | // Initial Exec to Local Exec relaxation. In these cases, we must not depend |
4088 | // on the EFLAGS modification of ADD actually happening in the final binary. |
4089 | if (MI.getOpcode() == X86::ADD64rm || MI.getOpcode() == X86::ADD32rm) { |
4090 | unsigned Flags = MI.getOperand(5).getTargetFlags(); |
4091 | if (Flags == X86II::MO_GOTTPOFF || Flags == X86II::MO_INDNTPOFF || |
4092 | Flags == X86II::MO_GOTNTPOFF) |
4093 | return false; |
4094 | } |
4095 | |
4096 | switch (MI.getOpcode()) { |
4097 | default: return false; |
4098 | |
4099 | // The shift instructions only modify ZF if their shift count is non-zero. |
4100 | // N.B.: The processor truncates the shift count depending on the encoding. |
4101 | case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: |
4102 | case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: |
4103 | return getTruncatedShiftCount(MI, 2) != 0; |
4104 | |
4105 | // Some left shift instructions can be turned into LEA instructions but only |
4106 | // if their flags aren't used. Avoid transforming such instructions. |
4107 | case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ |
4108 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
4109 | if (isTruncatedShiftCountForLEA(ShAmt)) return false; |
4110 | return ShAmt != 0; |
4111 | } |
4112 | |
4113 | case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: |
4114 | case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: |
4115 | return getTruncatedShiftCount(MI, 3) != 0; |
4116 | |
4117 | case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: |
4118 | case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: |
4119 | case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: |
4120 | case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: |
4121 | case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: |
4122 | case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: |
4123 | case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: |
4124 | case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: |
4125 | case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: |
4126 | case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: |
4127 | case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: |
4128 | case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: |
4129 | case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri: |
4130 | case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8: |
4131 | case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr: |
4132 | case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm: |
4133 | case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm: |
4134 | case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri: |
4135 | case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8: |
4136 | case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: |
4137 | case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: |
4138 | case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: |
4139 | case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: |
4140 | case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: |
4141 | case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: |
4142 | case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: |
4143 | case X86::LZCNT16rr: case X86::LZCNT16rm: |
4144 | case X86::LZCNT32rr: case X86::LZCNT32rm: |
4145 | case X86::LZCNT64rr: case X86::LZCNT64rm: |
4146 | case X86::POPCNT16rr:case X86::POPCNT16rm: |
4147 | case X86::POPCNT32rr:case X86::POPCNT32rm: |
4148 | case X86::POPCNT64rr:case X86::POPCNT64rm: |
4149 | case X86::TZCNT16rr: case X86::TZCNT16rm: |
4150 | case X86::TZCNT32rr: case X86::TZCNT32rm: |
4151 | case X86::TZCNT64rr: case X86::TZCNT64rm: |
4152 | return true; |
4153 | case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: |
4154 | case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: |
4155 | case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: |
4156 | case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: |
4157 | case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: |
4158 | case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: |
4159 | case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: |
4160 | case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: |
4161 | case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: |
4162 | case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: |
4163 | case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: |
4164 | case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: |
4165 | case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: |
4166 | case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: |
4167 | case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: |
4168 | case X86::ANDN32rr: case X86::ANDN32rm: |
4169 | case X86::ANDN64rr: case X86::ANDN64rm: |
4170 | case X86::BLSI32rr: case X86::BLSI32rm: |
4171 | case X86::BLSI64rr: case X86::BLSI64rm: |
4172 | case X86::BLSMSK32rr: case X86::BLSMSK32rm: |
4173 | case X86::BLSMSK64rr: case X86::BLSMSK64rm: |
4174 | case X86::BLSR32rr: case X86::BLSR32rm: |
4175 | case X86::BLSR64rr: case X86::BLSR64rm: |
4176 | case X86::BLCFILL32rr: case X86::BLCFILL32rm: |
4177 | case X86::BLCFILL64rr: case X86::BLCFILL64rm: |
4178 | case X86::BLCI32rr: case X86::BLCI32rm: |
4179 | case X86::BLCI64rr: case X86::BLCI64rm: |
4180 | case X86::BLCIC32rr: case X86::BLCIC32rm: |
4181 | case X86::BLCIC64rr: case X86::BLCIC64rm: |
4182 | case X86::BLCMSK32rr: case X86::BLCMSK32rm: |
4183 | case X86::BLCMSK64rr: case X86::BLCMSK64rm: |
4184 | case X86::BLCS32rr: case X86::BLCS32rm: |
4185 | case X86::BLCS64rr: case X86::BLCS64rm: |
4186 | case X86::BLSFILL32rr: case X86::BLSFILL32rm: |
4187 | case X86::BLSFILL64rr: case X86::BLSFILL64rm: |
4188 | case X86::BLSIC32rr: case X86::BLSIC32rm: |
4189 | case X86::BLSIC64rr: case X86::BLSIC64rm: |
4190 | case X86::BZHI32rr: case X86::BZHI32rm: |
4191 | case X86::BZHI64rr: case X86::BZHI64rm: |
4192 | case X86::T1MSKC32rr: case X86::T1MSKC32rm: |
4193 | case X86::T1MSKC64rr: case X86::T1MSKC64rm: |
4194 | case X86::TZMSK32rr: case X86::TZMSK32rm: |
4195 | case X86::TZMSK64rr: case X86::TZMSK64rm: |
4196 | // These instructions clear the overflow flag just like TEST. |
4197 | // FIXME: These are not the only instructions in this switch that clear the |
4198 | // overflow flag. |
4199 | ClearsOverflowFlag = true; |
4200 | return true; |
4201 | case X86::BEXTR32rr: case X86::BEXTR64rr: |
4202 | case X86::BEXTR32rm: case X86::BEXTR64rm: |
4203 | case X86::BEXTRI32ri: case X86::BEXTRI32mi: |
4204 | case X86::BEXTRI64ri: case X86::BEXTRI64mi: |
4205 | // BEXTR doesn't update the sign flag so we can't use it. It does clear |
4206 | // the overflow flag, but that's not useful without the sign flag. |
4207 | NoSignFlag = true; |
4208 | return true; |
4209 | } |
4210 | } |
4211 | |
4212 | /// Check whether the use can be converted to remove a comparison against zero. |
4213 | static X86::CondCode isUseDefConvertible(const MachineInstr &MI) { |
4214 | switch (MI.getOpcode()) { |
4215 | default: return X86::COND_INVALID; |
4216 | case X86::NEG8r: |
4217 | case X86::NEG16r: |
4218 | case X86::NEG32r: |
4219 | case X86::NEG64r: |
4220 | return X86::COND_AE; |
4221 | case X86::LZCNT16rr: |
4222 | case X86::LZCNT32rr: |
4223 | case X86::LZCNT64rr: |
4224 | return X86::COND_B; |
4225 | case X86::POPCNT16rr: |
4226 | case X86::POPCNT32rr: |
4227 | case X86::POPCNT64rr: |
4228 | return X86::COND_E; |
4229 | case X86::TZCNT16rr: |
4230 | case X86::TZCNT32rr: |
4231 | case X86::TZCNT64rr: |
4232 | return X86::COND_B; |
4233 | case X86::BSF16rr: |
4234 | case X86::BSF32rr: |
4235 | case X86::BSF64rr: |
4236 | case X86::BSR16rr: |
4237 | case X86::BSR32rr: |
4238 | case X86::BSR64rr: |
4239 | return X86::COND_E; |
4240 | case X86::BLSI32rr: |
4241 | case X86::BLSI64rr: |
4242 | return X86::COND_AE; |
4243 | case X86::BLSR32rr: |
4244 | case X86::BLSR64rr: |
4245 | case X86::BLSMSK32rr: |
4246 | case X86::BLSMSK64rr: |
4247 | return X86::COND_B; |
4248 | // TODO: TBM instructions. |
4249 | } |
4250 | } |
4251 | |
4252 | /// Check if there exists an earlier instruction that |
4253 | /// operates on the same source operands and sets flags in the same way as |
4254 | /// Compare; remove Compare if possible. |
4255 | bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
4256 | Register SrcReg2, int64_t CmpMask, |
4257 | int64_t CmpValue, |
4258 | const MachineRegisterInfo *MRI) const { |
4259 | // Check whether we can replace SUB with CMP. |
4260 | switch (CmpInstr.getOpcode()) { |
4261 | default: break; |
4262 | case X86::SUB64ri32: |
4263 | case X86::SUB64ri8: |
4264 | case X86::SUB32ri: |
4265 | case X86::SUB32ri8: |
4266 | case X86::SUB16ri: |
4267 | case X86::SUB16ri8: |
4268 | case X86::SUB8ri: |
4269 | case X86::SUB64rm: |
4270 | case X86::SUB32rm: |
4271 | case X86::SUB16rm: |
4272 | case X86::SUB8rm: |
4273 | case X86::SUB64rr: |
4274 | case X86::SUB32rr: |
4275 | case X86::SUB16rr: |
4276 | case X86::SUB8rr: { |
4277 | if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) |
4278 | return false; |
4279 | // There is no use of the destination register, we can replace SUB with CMP. |
4280 | unsigned NewOpcode = 0; |
4281 | switch (CmpInstr.getOpcode()) { |
4282 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 4282); |
4283 | case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; |
4284 | case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; |
4285 | case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; |
4286 | case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; |
4287 | case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; |
4288 | case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; |
4289 | case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; |
4290 | case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; |
4291 | case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; |
4292 | case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; |
4293 | case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; |
4294 | case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; |
4295 | case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; |
4296 | case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; |
4297 | case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; |
4298 | } |
4299 | CmpInstr.setDesc(get(NewOpcode)); |
4300 | CmpInstr.removeOperand(0); |
4301 | // Mutating this instruction invalidates any debug data associated with it. |
4302 | CmpInstr.dropDebugNumber(); |
4303 | // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. |
4304 | if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || |
4305 | NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) |
4306 | return false; |
4307 | } |
4308 | } |
4309 | |
4310 | // The following code tries to remove the comparison by re-using EFLAGS |
4311 | // from earlier instructions. |
4312 | |
4313 | bool IsCmpZero = (CmpMask != 0 && CmpValue == 0); |
4314 | |
4315 | // Transformation currently requires SSA values. |
4316 | if (SrcReg2.isPhysical()) |
4317 | return false; |
4318 | MachineInstr *SrcRegDef = MRI->getVRegDef(SrcReg); |
4319 | assert(SrcRegDef && "Must have a definition (SSA)")(static_cast <bool> (SrcRegDef && "Must have a definition (SSA)" ) ? void (0) : __assert_fail ("SrcRegDef && \"Must have a definition (SSA)\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4319, __extension__ __PRETTY_FUNCTION__)); |
4320 | |
4321 | MachineInstr *MI = nullptr; |
4322 | MachineInstr *Sub = nullptr; |
4323 | MachineInstr *Movr0Inst = nullptr; |
4324 | bool NoSignFlag = false; |
4325 | bool ClearsOverflowFlag = false; |
4326 | bool ShouldUpdateCC = false; |
4327 | bool IsSwapped = false; |
4328 | X86::CondCode NewCC = X86::COND_INVALID; |
4329 | int64_t ImmDelta = 0; |
4330 | |
4331 | // Search backward from CmpInstr for the next instruction defining EFLAGS. |
4332 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4333 | MachineBasicBlock &CmpMBB = *CmpInstr.getParent(); |
4334 | MachineBasicBlock::reverse_iterator From = |
4335 | std::next(MachineBasicBlock::reverse_iterator(CmpInstr)); |
4336 | for (MachineBasicBlock *MBB = &CmpMBB;;) { |
4337 | for (MachineInstr &Inst : make_range(From, MBB->rend())) { |
4338 | // Try to use EFLAGS from the instruction defining %SrcReg. Example: |
4339 | // %eax = addl ... |
4340 | // ... // EFLAGS not changed |
4341 | // testl %eax, %eax // <-- can be removed |
4342 | if (&Inst == SrcRegDef) { |
4343 | if (IsCmpZero && |
4344 | isDefConvertible(Inst, NoSignFlag, ClearsOverflowFlag)) { |
4345 | MI = &Inst; |
4346 | break; |
4347 | } |
4348 | |
4349 | // Look back for the following pattern, in which case the test64rr |
4350 | // instruction could be erased. |
4351 | // |
4352 | // Example: |
4353 | // %reg = and32ri %in_reg, 5 |
4354 | // ... // EFLAGS not changed. |
4355 | // %src_reg = subreg_to_reg 0, %reg, %subreg.sub_index |
4356 | // test64rr %src_reg, %src_reg, implicit-def $eflags |
4357 | MachineInstr *AndInstr = nullptr; |
4358 | if (IsCmpZero && |
4359 | findRedundantFlagInstr(CmpInstr, Inst, MRI, &AndInstr, TRI, |
4360 | NoSignFlag, ClearsOverflowFlag)) { |
4361 | assert(AndInstr != nullptr && X86::isAND(AndInstr->getOpcode()))(static_cast <bool> (AndInstr != nullptr && X86 ::isAND(AndInstr->getOpcode())) ? void (0) : __assert_fail ("AndInstr != nullptr && X86::isAND(AndInstr->getOpcode())" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4361, __extension__ __PRETTY_FUNCTION__)); |
4362 | MI = AndInstr; |
4363 | break; |
4364 | } |
4365 | // Cannot find other candidates before definition of SrcReg. |
4366 | return false; |
4367 | } |
4368 | |
4369 | if (Inst.modifiesRegister(X86::EFLAGS, TRI)) { |
4370 | // Try to use EFLAGS produced by an instruction reading %SrcReg. |
4371 | // Example: |
4372 | // %eax = ... |
4373 | // ... |
4374 | // popcntl %eax |
4375 | // ... // EFLAGS not changed |
4376 | // testl %eax, %eax // <-- can be removed |
4377 | if (IsCmpZero) { |
4378 | NewCC = isUseDefConvertible(Inst); |
4379 | if (NewCC != X86::COND_INVALID && Inst.getOperand(1).isReg() && |
4380 | Inst.getOperand(1).getReg() == SrcReg) { |
4381 | ShouldUpdateCC = true; |
4382 | MI = &Inst; |
4383 | break; |
4384 | } |
4385 | } |
4386 | |
4387 | // Try to use EFLAGS from an instruction with similar flag results. |
4388 | // Example: |
4389 | // sub x, y or cmp x, y |
4390 | // ... // EFLAGS not changed |
4391 | // cmp x, y // <-- can be removed |
4392 | if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue, |
4393 | Inst, &IsSwapped, &ImmDelta)) { |
4394 | Sub = &Inst; |
4395 | break; |
4396 | } |
4397 | |
4398 | // MOV32r0 is implemented with xor which clobbers condition code. It is |
4399 | // safe to move up, if the definition to EFLAGS is dead and earlier |
4400 | // instructions do not read or write EFLAGS. |
4401 | if (!Movr0Inst && Inst.getOpcode() == X86::MOV32r0 && |
4402 | Inst.registerDefIsDead(X86::EFLAGS, TRI)) { |
4403 | Movr0Inst = &Inst; |
4404 | continue; |
4405 | } |
4406 | |
4407 | // Cannot do anything for any other EFLAG changes. |
4408 | return false; |
4409 | } |
4410 | } |
4411 | |
4412 | if (MI || Sub) |
4413 | break; |
4414 | |
4415 | // Reached begin of basic block. Continue in predecessor if there is |
4416 | // exactly one. |
4417 | if (MBB->pred_size() != 1) |
4418 | return false; |
4419 | MBB = *MBB->pred_begin(); |
4420 | From = MBB->rbegin(); |
4421 | } |
4422 | |
4423 | // Scan forward from the instruction after CmpInstr for uses of EFLAGS. |
4424 | // It is safe to remove CmpInstr if EFLAGS is redefined or killed. |
4425 | // If we are done with the basic block, we need to check whether EFLAGS is |
4426 | // live-out. |
4427 | bool FlagsMayLiveOut = true; |
4428 | SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate; |
4429 | MachineBasicBlock::iterator AfterCmpInstr = |
4430 | std::next(MachineBasicBlock::iterator(CmpInstr)); |
4431 | for (MachineInstr &Instr : make_range(AfterCmpInstr, CmpMBB.end())) { |
4432 | bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); |
4433 | bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); |
4434 | // We should check the usage if this instruction uses and updates EFLAGS. |
4435 | if (!UseEFLAGS && ModifyEFLAGS) { |
4436 | // It is safe to remove CmpInstr if EFLAGS is updated again. |
4437 | FlagsMayLiveOut = false; |
4438 | break; |
4439 | } |
4440 | if (!UseEFLAGS && !ModifyEFLAGS) |
4441 | continue; |
4442 | |
4443 | // EFLAGS is used by this instruction. |
4444 | X86::CondCode OldCC = X86::COND_INVALID; |
4445 | if (MI || IsSwapped || ImmDelta != 0) { |
4446 | // We decode the condition code from opcode. |
4447 | if (Instr.isBranch()) |
4448 | OldCC = X86::getCondFromBranch(Instr); |
4449 | else { |
4450 | OldCC = X86::getCondFromSETCC(Instr); |
4451 | if (OldCC == X86::COND_INVALID) |
4452 | OldCC = X86::getCondFromCMov(Instr); |
4453 | } |
4454 | if (OldCC == X86::COND_INVALID) return false; |
4455 | } |
4456 | X86::CondCode ReplacementCC = X86::COND_INVALID; |
4457 | if (MI) { |
4458 | switch (OldCC) { |
4459 | default: break; |
4460 | case X86::COND_A: case X86::COND_AE: |
4461 | case X86::COND_B: case X86::COND_BE: |
4462 | // CF is used, we can't perform this optimization. |
4463 | return false; |
4464 | case X86::COND_G: case X86::COND_GE: |
4465 | case X86::COND_L: case X86::COND_LE: |
4466 | // If SF is used, but the instruction doesn't update the SF, then we |
4467 | // can't do the optimization. |
4468 | if (NoSignFlag) |
4469 | return false; |
4470 | [[fallthrough]]; |
4471 | case X86::COND_O: case X86::COND_NO: |
4472 | // If OF is used, the instruction needs to clear it like CmpZero does. |
4473 | if (!ClearsOverflowFlag) |
4474 | return false; |
4475 | break; |
4476 | case X86::COND_S: case X86::COND_NS: |
4477 | // If SF is used, but the instruction doesn't update the SF, then we |
4478 | // can't do the optimization. |
4479 | if (NoSignFlag) |
4480 | return false; |
4481 | break; |
4482 | } |
4483 | |
4484 | // If we're updating the condition code check if we have to reverse the |
4485 | // condition. |
4486 | if (ShouldUpdateCC) |
4487 | switch (OldCC) { |
4488 | default: |
4489 | return false; |
4490 | case X86::COND_E: |
4491 | ReplacementCC = NewCC; |
4492 | break; |
4493 | case X86::COND_NE: |
4494 | ReplacementCC = GetOppositeBranchCondition(NewCC); |
4495 | break; |
4496 | } |
4497 | } else if (IsSwapped) { |
4498 | // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs |
4499 | // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. |
4500 | // We swap the condition code and synthesize the new opcode. |
4501 | ReplacementCC = getSwappedCondition(OldCC); |
4502 | if (ReplacementCC == X86::COND_INVALID) |
4503 | return false; |
4504 | ShouldUpdateCC = true; |
4505 | } else if (ImmDelta != 0) { |
4506 | unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg)); |
4507 | // Shift amount for min/max constants to adjust for 8/16/32 instruction |
4508 | // sizes. |
4509 | switch (OldCC) { |
4510 | case X86::COND_L: // x <s (C + 1) --> x <=s C |
4511 | if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue) |
4512 | return false; |
4513 | ReplacementCC = X86::COND_LE; |
4514 | break; |
4515 | case X86::COND_B: // x <u (C + 1) --> x <=u C |
4516 | if (ImmDelta != 1 || CmpValue == 0) |
4517 | return false; |
4518 | ReplacementCC = X86::COND_BE; |
4519 | break; |
4520 | case X86::COND_GE: // x >=s (C + 1) --> x >s C |
4521 | if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue) |
4522 | return false; |
4523 | ReplacementCC = X86::COND_G; |
4524 | break; |
4525 | case X86::COND_AE: // x >=u (C + 1) --> x >u C |
4526 | if (ImmDelta != 1 || CmpValue == 0) |
4527 | return false; |
4528 | ReplacementCC = X86::COND_A; |
4529 | break; |
4530 | case X86::COND_G: // x >s (C - 1) --> x >=s C |
4531 | if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue) |
4532 | return false; |
4533 | ReplacementCC = X86::COND_GE; |
4534 | break; |
4535 | case X86::COND_A: // x >u (C - 1) --> x >=u C |
4536 | if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue) |
4537 | return false; |
4538 | ReplacementCC = X86::COND_AE; |
4539 | break; |
4540 | case X86::COND_LE: // x <=s (C - 1) --> x <s C |
4541 | if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue) |
4542 | return false; |
4543 | ReplacementCC = X86::COND_L; |
4544 | break; |
4545 | case X86::COND_BE: // x <=u (C - 1) --> x <u C |
4546 | if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue) |
4547 | return false; |
4548 | ReplacementCC = X86::COND_B; |
4549 | break; |
4550 | default: |
4551 | return false; |
4552 | } |
4553 | ShouldUpdateCC = true; |
4554 | } |
4555 | |
4556 | if (ShouldUpdateCC && ReplacementCC != OldCC) { |
4557 | // Push the MachineInstr to OpsToUpdate. |
4558 | // If it is safe to remove CmpInstr, the condition code of these |
4559 | // instructions will be modified. |
4560 | OpsToUpdate.push_back(std::make_pair(&Instr, ReplacementCC)); |
4561 | } |
4562 | if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { |
4563 | // It is safe to remove CmpInstr if EFLAGS is updated again or killed. |
4564 | FlagsMayLiveOut = false; |
4565 | break; |
4566 | } |
4567 | } |
4568 | |
4569 | // If we have to update users but EFLAGS is live-out abort, since we cannot |
4570 | // easily find all of the users. |
4571 | if ((MI != nullptr || ShouldUpdateCC) && FlagsMayLiveOut) { |
4572 | for (MachineBasicBlock *Successor : CmpMBB.successors()) |
4573 | if (Successor->isLiveIn(X86::EFLAGS)) |
4574 | return false; |
4575 | } |
4576 | |
4577 | // The instruction to be updated is either Sub or MI. |
4578 | assert((MI == nullptr || Sub == nullptr) && "Should not have Sub and MI set")(static_cast <bool> ((MI == nullptr || Sub == nullptr) && "Should not have Sub and MI set") ? void (0) : __assert_fail ("(MI == nullptr || Sub == nullptr) && \"Should not have Sub and MI set\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4578, __extension__ __PRETTY_FUNCTION__)); |
4579 | Sub = MI != nullptr ? MI : Sub; |
4580 | MachineBasicBlock *SubBB = Sub->getParent(); |
4581 | // Move Movr0Inst to the appropriate place before Sub. |
4582 | if (Movr0Inst) { |
4583 | // Only move within the same block so we don't accidentally move to a |
4584 | // block with higher execution frequency. |
4585 | if (&CmpMBB != SubBB) |
4586 | return false; |
4587 | // Look backwards until we find a def that doesn't use the current EFLAGS. |
4588 | MachineBasicBlock::reverse_iterator InsertI = Sub, |
4589 | InsertE = Sub->getParent()->rend(); |
4590 | for (; InsertI != InsertE; ++InsertI) { |
4591 | MachineInstr *Instr = &*InsertI; |
4592 | if (!Instr->readsRegister(X86::EFLAGS, TRI) && |
4593 | Instr->modifiesRegister(X86::EFLAGS, TRI)) { |
4594 | Movr0Inst->getParent()->remove(Movr0Inst); |
4595 | Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), |
4596 | Movr0Inst); |
4597 | break; |
4598 | } |
4599 | } |
4600 | if (InsertI == InsertE) |
4601 | return false; |
4602 | } |
4603 | |
4604 | // Make sure Sub instruction defines EFLAGS and mark the def live. |
4605 | MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS); |
4606 | assert(FlagDef && "Unable to locate a def EFLAGS operand")(static_cast <bool> (FlagDef && "Unable to locate a def EFLAGS operand" ) ? void (0) : __assert_fail ("FlagDef && \"Unable to locate a def EFLAGS operand\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4606, __extension__ __PRETTY_FUNCTION__)); |
4607 | FlagDef->setIsDead(false); |
4608 | |
4609 | CmpInstr.eraseFromParent(); |
4610 | |
4611 | // Modify the condition code of instructions in OpsToUpdate. |
4612 | for (auto &Op : OpsToUpdate) { |
4613 | Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1) |
4614 | .setImm(Op.second); |
4615 | } |
4616 | // Add EFLAGS to block live-ins between CmpBB and block of flags producer. |
4617 | for (MachineBasicBlock *MBB = &CmpMBB; MBB != SubBB; |
4618 | MBB = *MBB->pred_begin()) { |
4619 | assert(MBB->pred_size() == 1 && "Expected exactly one predecessor")(static_cast <bool> (MBB->pred_size() == 1 && "Expected exactly one predecessor") ? void (0) : __assert_fail ("MBB->pred_size() == 1 && \"Expected exactly one predecessor\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4619, __extension__ __PRETTY_FUNCTION__)); |
4620 | if (!MBB->isLiveIn(X86::EFLAGS)) |
4621 | MBB->addLiveIn(X86::EFLAGS); |
4622 | } |
4623 | return true; |
4624 | } |
4625 | |
4626 | /// Try to remove the load by folding it to a register |
4627 | /// operand at the use. We fold the load instructions if load defines a virtual |
4628 | /// register, the virtual register is used once in the same BB, and the |
4629 | /// instructions in-between do not load or store, and have no side effects. |
4630 | MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, |
4631 | const MachineRegisterInfo *MRI, |
4632 | Register &FoldAsLoadDefReg, |
4633 | MachineInstr *&DefMI) const { |
4634 | // Check whether we can move DefMI here. |
4635 | DefMI = MRI->getVRegDef(FoldAsLoadDefReg); |
4636 | assert(DefMI)(static_cast <bool> (DefMI) ? void (0) : __assert_fail ( "DefMI", "llvm/lib/Target/X86/X86InstrInfo.cpp", 4636, __extension__ __PRETTY_FUNCTION__)); |
4637 | bool SawStore = false; |
4638 | if (!DefMI->isSafeToMove(nullptr, SawStore)) |
4639 | return nullptr; |
4640 | |
4641 | // Collect information about virtual register operands of MI. |
4642 | SmallVector<unsigned, 1> SrcOperandIds; |
4643 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
4644 | MachineOperand &MO = MI.getOperand(i); |
4645 | if (!MO.isReg()) |
4646 | continue; |
4647 | Register Reg = MO.getReg(); |
4648 | if (Reg != FoldAsLoadDefReg) |
4649 | continue; |
4650 | // Do not fold if we have a subreg use or a def. |
4651 | if (MO.getSubReg() || MO.isDef()) |
4652 | return nullptr; |
4653 | SrcOperandIds.push_back(i); |
4654 | } |
4655 | if (SrcOperandIds.empty()) |
4656 | return nullptr; |
4657 | |
4658 | // Check whether we can fold the def into SrcOperandId. |
4659 | if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) { |
4660 | FoldAsLoadDefReg = 0; |
4661 | return FoldMI; |
4662 | } |
4663 | |
4664 | return nullptr; |
4665 | } |
4666 | |
4667 | /// Expand a single-def pseudo instruction to a two-addr |
4668 | /// instruction with two undef reads of the register being defined. |
4669 | /// This is used for mapping: |
4670 | /// %xmm4 = V_SET0 |
4671 | /// to: |
4672 | /// %xmm4 = PXORrr undef %xmm4, undef %xmm4 |
4673 | /// |
4674 | static bool Expand2AddrUndef(MachineInstrBuilder &MIB, |
4675 | const MCInstrDesc &Desc) { |
4676 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.")(static_cast <bool> (Desc.getNumOperands() == 3 && "Expected two-addr instruction.") ? void (0) : __assert_fail ("Desc.getNumOperands() == 3 && \"Expected two-addr instruction.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4676, __extension__ __PRETTY_FUNCTION__)); |
4677 | Register Reg = MIB.getReg(0); |
4678 | MIB->setDesc(Desc); |
4679 | |
4680 | // MachineInstr::addOperand() will insert explicit operands before any |
4681 | // implicit operands. |
4682 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
4683 | // But we don't trust that. |
4684 | assert(MIB.getReg(1) == Reg &&(static_cast <bool> (MIB.getReg(1) == Reg && MIB .getReg(2) == Reg && "Misplaced operand") ? void (0) : __assert_fail ("MIB.getReg(1) == Reg && MIB.getReg(2) == Reg && \"Misplaced operand\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4685, __extension__ __PRETTY_FUNCTION__)) |
4685 | MIB.getReg(2) == Reg && "Misplaced operand")(static_cast <bool> (MIB.getReg(1) == Reg && MIB .getReg(2) == Reg && "Misplaced operand") ? void (0) : __assert_fail ("MIB.getReg(1) == Reg && MIB.getReg(2) == Reg && \"Misplaced operand\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4685, __extension__ __PRETTY_FUNCTION__)); |
4686 | return true; |
4687 | } |
4688 | |
4689 | /// Expand a single-def pseudo instruction to a two-addr |
4690 | /// instruction with two %k0 reads. |
4691 | /// This is used for mapping: |
4692 | /// %k4 = K_SET1 |
4693 | /// to: |
4694 | /// %k4 = KXNORrr %k0, %k0 |
4695 | static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, |
4696 | Register Reg) { |
4697 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.")(static_cast <bool> (Desc.getNumOperands() == 3 && "Expected two-addr instruction.") ? void (0) : __assert_fail ("Desc.getNumOperands() == 3 && \"Expected two-addr instruction.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); |
4698 | MIB->setDesc(Desc); |
4699 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
4700 | return true; |
4701 | } |
4702 | |
4703 | static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, |
4704 | bool MinusOne) { |
4705 | MachineBasicBlock &MBB = *MIB->getParent(); |
4706 | const DebugLoc &DL = MIB->getDebugLoc(); |
4707 | Register Reg = MIB.getReg(0); |
4708 | |
4709 | // Insert the XOR. |
4710 | BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) |
4711 | .addReg(Reg, RegState::Undef) |
4712 | .addReg(Reg, RegState::Undef); |
4713 | |
4714 | // Turn the pseudo into an INC or DEC. |
4715 | MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); |
4716 | MIB.addReg(Reg); |
4717 | |
4718 | return true; |
4719 | } |
4720 | |
4721 | static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, |
4722 | const TargetInstrInfo &TII, |
4723 | const X86Subtarget &Subtarget) { |
4724 | MachineBasicBlock &MBB = *MIB->getParent(); |
4725 | const DebugLoc &DL = MIB->getDebugLoc(); |
4726 | int64_t Imm = MIB->getOperand(1).getImm(); |
4727 | assert(Imm != 0 && "Using push/pop for 0 is not efficient.")(static_cast <bool> (Imm != 0 && "Using push/pop for 0 is not efficient." ) ? void (0) : __assert_fail ("Imm != 0 && \"Using push/pop for 0 is not efficient.\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4727, __extension__ __PRETTY_FUNCTION__)); |
4728 | MachineBasicBlock::iterator I = MIB.getInstr(); |
4729 | |
4730 | int StackAdjustment; |
4731 | |
4732 | if (Subtarget.is64Bit()) { |
4733 | assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||(static_cast <bool> (MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8) ? void (0) : __assert_fail ("MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4734, __extension__ __PRETTY_FUNCTION__)) |
4734 | MIB->getOpcode() == X86::MOV32ImmSExti8)(static_cast <bool> (MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8) ? void (0) : __assert_fail ("MIB->getOpcode() == X86::MOV64ImmSExti8 || MIB->getOpcode() == X86::MOV32ImmSExti8" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4734, __extension__ __PRETTY_FUNCTION__)); |
4735 | |
4736 | // Can't use push/pop lowering if the function might write to the red zone. |
4737 | X86MachineFunctionInfo *X86FI = |
4738 | MBB.getParent()->getInfo<X86MachineFunctionInfo>(); |
4739 | if (X86FI->getUsesRedZone()) { |
4740 | MIB->setDesc(TII.get(MIB->getOpcode() == |
4741 | X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri)); |
4742 | return true; |
4743 | } |
4744 | |
4745 | // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and |
4746 | // widen the register if necessary. |
4747 | StackAdjustment = 8; |
4748 | BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm); |
4749 | MIB->setDesc(TII.get(X86::POP64r)); |
4750 | MIB->getOperand(0) |
4751 | .setReg(getX86SubSuperRegister(MIB.getReg(0), 64)); |
4752 | } else { |
4753 | assert(MIB->getOpcode() == X86::MOV32ImmSExti8)(static_cast <bool> (MIB->getOpcode() == X86::MOV32ImmSExti8 ) ? void (0) : __assert_fail ("MIB->getOpcode() == X86::MOV32ImmSExti8" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4753, __extension__ __PRETTY_FUNCTION__)); |
4754 | StackAdjustment = 4; |
4755 | BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); |
4756 | MIB->setDesc(TII.get(X86::POP32r)); |
4757 | } |
4758 | MIB->removeOperand(1); |
4759 | MIB->addImplicitDefUseOperands(*MBB.getParent()); |
4760 | |
4761 | // Build CFI if necessary. |
4762 | MachineFunction &MF = *MBB.getParent(); |
4763 | const X86FrameLowering *TFL = Subtarget.getFrameLowering(); |
4764 | bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); |
4765 | bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves(); |
4766 | bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; |
4767 | if (EmitCFI) { |
4768 | TFL->BuildCFI(MBB, I, DL, |
4769 | MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); |
4770 | TFL->BuildCFI(MBB, std::next(I), DL, |
4771 | MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); |
4772 | } |
4773 | |
4774 | return true; |
4775 | } |
4776 | |
4777 | // LoadStackGuard has so far only been implemented for 64-bit MachO. Different |
4778 | // code sequence is needed for other targets. |
4779 | static void expandLoadStackGuard(MachineInstrBuilder &MIB, |
4780 | const TargetInstrInfo &TII) { |
4781 | MachineBasicBlock &MBB = *MIB->getParent(); |
4782 | const DebugLoc &DL = MIB->getDebugLoc(); |
4783 | Register Reg = MIB.getReg(0); |
4784 | const GlobalValue *GV = |
4785 | cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); |
4786 | auto Flags = MachineMemOperand::MOLoad | |
4787 | MachineMemOperand::MODereferenceable | |
4788 | MachineMemOperand::MOInvariant; |
4789 | MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( |
4790 | MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8)); |
4791 | MachineBasicBlock::iterator I = MIB.getInstr(); |
4792 | |
4793 | BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) |
4794 | .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) |
4795 | .addMemOperand(MMO); |
4796 | MIB->setDebugLoc(DL); |
4797 | MIB->setDesc(TII.get(X86::MOV64rm)); |
4798 | MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); |
4799 | } |
4800 | |
4801 | static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { |
4802 | MachineBasicBlock &MBB = *MIB->getParent(); |
4803 | MachineFunction &MF = *MBB.getParent(); |
4804 | const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); |
4805 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); |
4806 | unsigned XorOp = |
4807 | MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr; |
4808 | MIB->setDesc(TII.get(XorOp)); |
4809 | MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef); |
4810 | return true; |
4811 | } |
4812 | |
4813 | // This is used to handle spills for 128/256-bit registers when we have AVX512, |
4814 | // but not VLX. If it uses an extended register we need to use an instruction |
4815 | // that loads the lower 128/256-bit, but is available with only AVX512F. |
4816 | static bool expandNOVLXLoad(MachineInstrBuilder &MIB, |
4817 | const TargetRegisterInfo *TRI, |
4818 | const MCInstrDesc &LoadDesc, |
4819 | const MCInstrDesc &BroadcastDesc, |
4820 | unsigned SubIdx) { |
4821 | Register DestReg = MIB.getReg(0); |
4822 | // Check if DestReg is XMM16-31 or YMM16-31. |
4823 | if (TRI->getEncodingValue(DestReg) < 16) { |
4824 | // We can use a normal VEX encoded load. |
4825 | MIB->setDesc(LoadDesc); |
4826 | } else { |
4827 | // Use a 128/256-bit VBROADCAST instruction. |
4828 | MIB->setDesc(BroadcastDesc); |
4829 | // Change the destination to a 512-bit register. |
4830 | DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass); |
4831 | MIB->getOperand(0).setReg(DestReg); |
4832 | } |
4833 | return true; |
4834 | } |
4835 | |
4836 | // This is used to handle spills for 128/256-bit registers when we have AVX512, |
4837 | // but not VLX. If it uses an extended register we need to use an instruction |
4838 | // that stores the lower 128/256-bit, but is available with only AVX512F. |
4839 | static bool expandNOVLXStore(MachineInstrBuilder &MIB, |
4840 | const TargetRegisterInfo *TRI, |
4841 | const MCInstrDesc &StoreDesc, |
4842 | const MCInstrDesc &ExtractDesc, |
4843 | unsigned SubIdx) { |
4844 | Register SrcReg = MIB.getReg(X86::AddrNumOperands); |
4845 | // Check if DestReg is XMM16-31 or YMM16-31. |
4846 | if (TRI->getEncodingValue(SrcReg) < 16) { |
4847 | // We can use a normal VEX encoded store. |
4848 | MIB->setDesc(StoreDesc); |
4849 | } else { |
4850 | // Use a VEXTRACTF instruction. |
4851 | MIB->setDesc(ExtractDesc); |
4852 | // Change the destination to a 512-bit register. |
4853 | SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass); |
4854 | MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg); |
4855 | MIB.addImm(0x0); // Append immediate to extract from the lower bits. |
4856 | } |
4857 | |
4858 | return true; |
4859 | } |
4860 | |
4861 | static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { |
4862 | MIB->setDesc(Desc); |
4863 | int64_t ShiftAmt = MIB->getOperand(2).getImm(); |
4864 | // Temporarily remove the immediate so we can add another source register. |
4865 | MIB->removeOperand(2); |
4866 | // Add the register. Don't copy the kill flag if there is one. |
4867 | MIB.addReg(MIB.getReg(1), |
4868 | getUndefRegState(MIB->getOperand(1).isUndef())); |
4869 | // Add back the immediate. |
4870 | MIB.addImm(ShiftAmt); |
4871 | return true; |
4872 | } |
4873 | |
4874 | bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
4875 | bool HasAVX = Subtarget.hasAVX(); |
4876 | MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); |
4877 | switch (MI.getOpcode()) { |
4878 | case X86::MOV32r0: |
4879 | return Expand2AddrUndef(MIB, get(X86::XOR32rr)); |
4880 | case X86::MOV32r1: |
4881 | return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); |
4882 | case X86::MOV32r_1: |
4883 | return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); |
4884 | case X86::MOV32ImmSExti8: |
4885 | case X86::MOV64ImmSExti8: |
4886 | return ExpandMOVImmSExti8(MIB, *this, Subtarget); |
4887 | case X86::SETB_C32r: |
4888 | return Expand2AddrUndef(MIB, get(X86::SBB32rr)); |
4889 | case X86::SETB_C64r: |
4890 | return Expand2AddrUndef(MIB, get(X86::SBB64rr)); |
4891 | case X86::MMX_SET0: |
4892 | return Expand2AddrUndef(MIB, get(X86::MMX_PXORrr)); |
4893 | case X86::V_SET0: |
4894 | case X86::FsFLD0SS: |
4895 | case X86::FsFLD0SD: |
4896 | case X86::FsFLD0SH: |
4897 | case X86::FsFLD0F128: |
4898 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); |
4899 | case X86::AVX_SET0: { |
4900 | assert(HasAVX && "AVX not supported")(static_cast <bool> (HasAVX && "AVX not supported" ) ? void (0) : __assert_fail ("HasAVX && \"AVX not supported\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 4900, __extension__ __PRETTY_FUNCTION__)); |
4901 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4902 | Register SrcReg = MIB.getReg(0); |
4903 | Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
4904 | MIB->getOperand(0).setReg(XReg); |
4905 | Expand2AddrUndef(MIB, get(X86::VXORPSrr)); |
4906 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
4907 | return true; |
4908 | } |
4909 | case X86::AVX512_128_SET0: |
4910 | case X86::AVX512_FsFLD0SH: |
4911 | case X86::AVX512_FsFLD0SS: |
4912 | case X86::AVX512_FsFLD0SD: |
4913 | case X86::AVX512_FsFLD0F128: { |
4914 | bool HasVLX = Subtarget.hasVLX(); |
4915 | Register SrcReg = MIB.getReg(0); |
4916 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4917 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) |
4918 | return Expand2AddrUndef(MIB, |
4919 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
4920 | // Extended register without VLX. Use a larger XOR. |
4921 | SrcReg = |
4922 | TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass); |
4923 | MIB->getOperand(0).setReg(SrcReg); |
4924 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
4925 | } |
4926 | case X86::AVX512_256_SET0: |
4927 | case X86::AVX512_512_SET0: { |
4928 | bool HasVLX = Subtarget.hasVLX(); |
4929 | Register SrcReg = MIB.getReg(0); |
4930 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4931 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { |
4932 | Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
4933 | MIB->getOperand(0).setReg(XReg); |
4934 | Expand2AddrUndef(MIB, |
4935 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
4936 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
4937 | return true; |
4938 | } |
4939 | if (MI.getOpcode() == X86::AVX512_256_SET0) { |
4940 | // No VLX so we must reference a zmm. |
4941 | unsigned ZReg = |
4942 | TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass); |
4943 | MIB->getOperand(0).setReg(ZReg); |
4944 | } |
4945 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
4946 | } |
4947 | case X86::V_SETALLONES: |
4948 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); |
4949 | case X86::AVX2_SETALLONES: |
4950 | return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); |
4951 | case X86::AVX1_SETALLONES: { |
4952 | Register Reg = MIB.getReg(0); |
4953 | // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS. |
4954 | MIB->setDesc(get(X86::VCMPPSYrri)); |
4955 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); |
4956 | return true; |
4957 | } |
4958 | case X86::AVX512_512_SETALLONES: { |
4959 | Register Reg = MIB.getReg(0); |
4960 | MIB->setDesc(get(X86::VPTERNLOGDZrri)); |
4961 | // VPTERNLOGD needs 3 register inputs and an immediate. |
4962 | // 0xff will return 1s for any input. |
4963 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef) |
4964 | .addReg(Reg, RegState::Undef).addImm(0xff); |
4965 | return true; |
4966 | } |
4967 | case X86::AVX512_512_SEXT_MASK_32: |
4968 | case X86::AVX512_512_SEXT_MASK_64: { |
4969 | Register Reg = MIB.getReg(0); |
4970 | Register MaskReg = MIB.getReg(1); |
4971 | unsigned MaskState = getRegState(MIB->getOperand(1)); |
4972 | unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? |
4973 | X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; |
4974 | MI.removeOperand(1); |
4975 | MIB->setDesc(get(Opc)); |
4976 | // VPTERNLOG needs 3 register inputs and an immediate. |
4977 | // 0xff will return 1s for any input. |
4978 | MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState) |
4979 | .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff); |
4980 | return true; |
4981 | } |
4982 | case X86::VMOVAPSZ128rm_NOVLX: |
4983 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm), |
4984 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
4985 | case X86::VMOVUPSZ128rm_NOVLX: |
4986 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm), |
4987 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
4988 | case X86::VMOVAPSZ256rm_NOVLX: |
4989 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm), |
4990 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
4991 | case X86::VMOVUPSZ256rm_NOVLX: |
4992 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm), |
4993 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
4994 | case X86::VMOVAPSZ128mr_NOVLX: |
4995 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), |
4996 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
4997 | case X86::VMOVUPSZ128mr_NOVLX: |
4998 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), |
4999 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
5000 | case X86::VMOVAPSZ256mr_NOVLX: |
5001 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), |
5002 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
5003 | case X86::VMOVUPSZ256mr_NOVLX: |
5004 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), |
5005 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
5006 | case X86::MOV32ri64: { |
5007 | Register Reg = MIB.getReg(0); |
5008 | Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit); |
5009 | MI.setDesc(get(X86::MOV32ri)); |
5010 | MIB->getOperand(0).setReg(Reg32); |
5011 | MIB.addReg(Reg, RegState::ImplicitDefine); |
5012 | return true; |
5013 | } |
5014 | |
5015 | // KNL does not recognize dependency-breaking idioms for mask registers, |
5016 | // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. |
5017 | // Using %k0 as the undef input register is a performance heuristic based |
5018 | // on the assumption that %k0 is used less frequently than the other mask |
5019 | // registers, since it is not usable as a write mask. |
5020 | // FIXME: A more advanced approach would be to choose the best input mask |
5021 | // register based on context. |
5022 | case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); |
5023 | case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); |
5024 | case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); |
5025 | case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); |
5026 | case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); |
5027 | case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); |
5028 | case TargetOpcode::LOAD_STACK_GUARD: |
5029 | expandLoadStackGuard(MIB, *this); |
5030 | return true; |
5031 | case X86::XOR64_FP: |
5032 | case X86::XOR32_FP: |
5033 | return expandXorFP(MIB, *this); |
5034 | case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8)); |
5035 | case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8)); |
5036 | case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8)); |
5037 | case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8)); |
5038 | case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break; |
5039 | case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break; |
5040 | case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break; |
5041 | case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break; |
5042 | case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break; |
5043 | case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break; |
5044 | case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break; |
5045 | case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break; |
5046 | case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break; |
5047 | case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break; |
5048 | case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break; |
5049 | } |
5050 | return false; |
5051 | } |
5052 | |
5053 | /// Return true for all instructions that only update |
5054 | /// the first 32 or 64-bits of the destination register and leave the rest |
5055 | /// unmodified. This can be used to avoid folding loads if the instructions |
5056 | /// only update part of the destination register, and the non-updated part is |
5057 | /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these |
5058 | /// instructions breaks the partial register dependency and it can improve |
5059 | /// performance. e.g.: |
5060 | /// |
5061 | /// movss (%rdi), %xmm0 |
5062 | /// cvtss2sd %xmm0, %xmm0 |
5063 | /// |
5064 | /// Instead of |
5065 | /// cvtss2sd (%rdi), %xmm0 |
5066 | /// |
5067 | /// FIXME: This should be turned into a TSFlags. |
5068 | /// |
5069 | static bool hasPartialRegUpdate(unsigned Opcode, |
5070 | const X86Subtarget &Subtarget, |
5071 | bool ForLoadFold = false) { |
5072 | switch (Opcode) { |
5073 | case X86::CVTSI2SSrr: |
5074 | case X86::CVTSI2SSrm: |
5075 | case X86::CVTSI642SSrr: |
5076 | case X86::CVTSI642SSrm: |
5077 | case X86::CVTSI2SDrr: |
5078 | case X86::CVTSI2SDrm: |
5079 | case X86::CVTSI642SDrr: |
5080 | case X86::CVTSI642SDrm: |
5081 | // Load folding won't effect the undef register update since the input is |
5082 | // a GPR. |
5083 | return !ForLoadFold; |
5084 | case X86::CVTSD2SSrr: |
5085 | case X86::CVTSD2SSrm: |
5086 | case X86::CVTSS2SDrr: |
5087 | case X86::CVTSS2SDrm: |
5088 | case X86::MOVHPDrm: |
5089 | case X86::MOVHPSrm: |
5090 | case X86::MOVLPDrm: |
5091 | case X86::MOVLPSrm: |
5092 | case X86::RCPSSr: |
5093 | case X86::RCPSSm: |
5094 | case X86::RCPSSr_Int: |
5095 | case X86::RCPSSm_Int: |
5096 | case X86::ROUNDSDr: |
5097 | case X86::ROUNDSDm: |
5098 | case X86::ROUNDSSr: |
5099 | case X86::ROUNDSSm: |
5100 | case X86::RSQRTSSr: |
5101 | case X86::RSQRTSSm: |
5102 | case X86::RSQRTSSr_Int: |
5103 | case X86::RSQRTSSm_Int: |
5104 | case X86::SQRTSSr: |
5105 | case X86::SQRTSSm: |
5106 | case X86::SQRTSSr_Int: |
5107 | case X86::SQRTSSm_Int: |
5108 | case X86::SQRTSDr: |
5109 | case X86::SQRTSDm: |
5110 | case X86::SQRTSDr_Int: |
5111 | case X86::SQRTSDm_Int: |
5112 | return true; |
5113 | case X86::VFCMULCPHZ128rm: |
5114 | case X86::VFCMULCPHZ128rmb: |
5115 | case X86::VFCMULCPHZ128rmbkz: |
5116 | case X86::VFCMULCPHZ128rmkz: |
5117 | case X86::VFCMULCPHZ128rr: |
5118 | case X86::VFCMULCPHZ128rrkz: |
5119 | case X86::VFCMULCPHZ256rm: |
5120 | case X86::VFCMULCPHZ256rmb: |
5121 | case X86::VFCMULCPHZ256rmbkz: |
5122 | case X86::VFCMULCPHZ256rmkz: |
5123 | case X86::VFCMULCPHZ256rr: |
5124 | case X86::VFCMULCPHZ256rrkz: |
5125 | case X86::VFCMULCPHZrm: |
5126 | case X86::VFCMULCPHZrmb: |
5127 | case X86::VFCMULCPHZrmbkz: |
5128 | case X86::VFCMULCPHZrmkz: |
5129 | case X86::VFCMULCPHZrr: |
5130 | case X86::VFCMULCPHZrrb: |
5131 | case X86::VFCMULCPHZrrbkz: |
5132 | case X86::VFCMULCPHZrrkz: |
5133 | case X86::VFMULCPHZ128rm: |
5134 | case X86::VFMULCPHZ128rmb: |
5135 | case X86::VFMULCPHZ128rmbkz: |
5136 | case X86::VFMULCPHZ128rmkz: |
5137 | case X86::VFMULCPHZ128rr: |
5138 | case X86::VFMULCPHZ128rrkz: |
5139 | case X86::VFMULCPHZ256rm: |
5140 | case X86::VFMULCPHZ256rmb: |
5141 | case X86::VFMULCPHZ256rmbkz: |
5142 | case X86::VFMULCPHZ256rmkz: |
5143 | case X86::VFMULCPHZ256rr: |
5144 | case X86::VFMULCPHZ256rrkz: |
5145 | case X86::VFMULCPHZrm: |
5146 | case X86::VFMULCPHZrmb: |
5147 | case X86::VFMULCPHZrmbkz: |
5148 | case X86::VFMULCPHZrmkz: |
5149 | case X86::VFMULCPHZrr: |
5150 | case X86::VFMULCPHZrrb: |
5151 | case X86::VFMULCPHZrrbkz: |
5152 | case X86::VFMULCPHZrrkz: |
5153 | case X86::VFCMULCSHZrm: |
5154 | case X86::VFCMULCSHZrmkz: |
5155 | case X86::VFCMULCSHZrr: |
5156 | case X86::VFCMULCSHZrrb: |
5157 | case X86::VFCMULCSHZrrbkz: |
5158 | case X86::VFCMULCSHZrrkz: |
5159 | case X86::VFMULCSHZrm: |
5160 | case X86::VFMULCSHZrmkz: |
5161 | case X86::VFMULCSHZrr: |
5162 | case X86::VFMULCSHZrrb: |
5163 | case X86::VFMULCSHZrrbkz: |
5164 | case X86::VFMULCSHZrrkz: |
5165 | return Subtarget.hasMULCFalseDeps(); |
5166 | case X86::VPERMDYrm: |
5167 | case X86::VPERMDYrr: |
5168 | case X86::VPERMQYmi: |
5169 | case X86::VPERMQYri: |
5170 | case X86::VPERMPSYrm: |
5171 | case X86::VPERMPSYrr: |
5172 | case X86::VPERMPDYmi: |
5173 | case X86::VPERMPDYri: |
5174 | case X86::VPERMDZ256rm: |
5175 | case X86::VPERMDZ256rmb: |
5176 | case X86::VPERMDZ256rmbkz: |
5177 | case X86::VPERMDZ256rmkz: |
5178 | case X86::VPERMDZ256rr: |
5179 | case X86::VPERMDZ256rrkz: |
5180 | case X86::VPERMDZrm: |
5181 | case X86::VPERMDZrmb: |
5182 | case X86::VPERMDZrmbkz: |
5183 | case X86::VPERMDZrmkz: |
5184 | case X86::VPERMDZrr: |
5185 | case X86::VPERMDZrrkz: |
5186 | case X86::VPERMQZ256mbi: |
5187 | case X86::VPERMQZ256mbikz: |
5188 | case X86::VPERMQZ256mi: |
5189 | case X86::VPERMQZ256mikz: |
5190 | case X86::VPERMQZ256ri: |
5191 | case X86::VPERMQZ256rikz: |
5192 | case X86::VPERMQZ256rm: |
5193 | case X86::VPERMQZ256rmb: |
5194 | case X86::VPERMQZ256rmbkz: |
5195 | case X86::VPERMQZ256rmkz: |
5196 | case X86::VPERMQZ256rr: |
5197 | case X86::VPERMQZ256rrkz: |
5198 | case X86::VPERMQZmbi: |
5199 | case X86::VPERMQZmbikz: |
5200 | case X86::VPERMQZmi: |
5201 | case X86::VPERMQZmikz: |
5202 | case X86::VPERMQZri: |
5203 | case X86::VPERMQZrikz: |
5204 | case X86::VPERMQZrm: |
5205 | case X86::VPERMQZrmb: |
5206 | case X86::VPERMQZrmbkz: |
5207 | case X86::VPERMQZrmkz: |
5208 | case X86::VPERMQZrr: |
5209 | case X86::VPERMQZrrkz: |
5210 | case X86::VPERMPSZ256rm: |
5211 | case X86::VPERMPSZ256rmb: |
5212 | case X86::VPERMPSZ256rmbkz: |
5213 | case X86::VPERMPSZ256rmkz: |
5214 | case X86::VPERMPSZ256rr: |
5215 | case X86::VPERMPSZ256rrkz: |
5216 | case X86::VPERMPSZrm: |
5217 | case X86::VPERMPSZrmb: |
5218 | case X86::VPERMPSZrmbkz: |
5219 | case X86::VPERMPSZrmkz: |
5220 | case X86::VPERMPSZrr: |
5221 | case X86::VPERMPSZrrkz: |
5222 | case X86::VPERMPDZ256mbi: |
5223 | case X86::VPERMPDZ256mbikz: |
5224 | case X86::VPERMPDZ256mi: |
5225 | case X86::VPERMPDZ256mikz: |
5226 | case X86::VPERMPDZ256ri: |
5227 | case X86::VPERMPDZ256rikz: |
5228 | case X86::VPERMPDZ256rm: |
5229 | case X86::VPERMPDZ256rmb: |
5230 | case X86::VPERMPDZ256rmbkz: |
5231 | case X86::VPERMPDZ256rmkz: |
5232 | case X86::VPERMPDZ256rr: |
5233 | case X86::VPERMPDZ256rrkz: |
5234 | case X86::VPERMPDZmbi: |
5235 | case X86::VPERMPDZmbikz: |
5236 | case X86::VPERMPDZmi: |
5237 | case X86::VPERMPDZmikz: |
5238 | case X86::VPERMPDZri: |
5239 | case X86::VPERMPDZrikz: |
5240 | case X86::VPERMPDZrm: |
5241 | case X86::VPERMPDZrmb: |
5242 | case X86::VPERMPDZrmbkz: |
5243 | case X86::VPERMPDZrmkz: |
5244 | case X86::VPERMPDZrr: |
5245 | case X86::VPERMPDZrrkz: |
5246 | return Subtarget.hasPERMFalseDeps(); |
5247 | case X86::VRANGEPDZ128rmbi: |
5248 | case X86::VRANGEPDZ128rmbikz: |
5249 | case X86::VRANGEPDZ128rmi: |
5250 | case X86::VRANGEPDZ128rmikz: |
5251 | case X86::VRANGEPDZ128rri: |
5252 | case X86::VRANGEPDZ128rrikz: |
5253 | case X86::VRANGEPDZ256rmbi: |
5254 | case X86::VRANGEPDZ256rmbikz: |
5255 | case X86::VRANGEPDZ256rmi: |
5256 | case X86::VRANGEPDZ256rmikz: |
5257 | case X86::VRANGEPDZ256rri: |
5258 | case X86::VRANGEPDZ256rrikz: |
5259 | case X86::VRANGEPDZrmbi: |
5260 | case X86::VRANGEPDZrmbikz: |
5261 | case X86::VRANGEPDZrmi: |
5262 | case X86::VRANGEPDZrmikz: |
5263 | case X86::VRANGEPDZrri: |
5264 | case X86::VRANGEPDZrrib: |
5265 | case X86::VRANGEPDZrribkz: |
5266 | case X86::VRANGEPDZrrikz: |
5267 | case X86::VRANGEPSZ128rmbi: |
5268 | case X86::VRANGEPSZ128rmbikz: |
5269 | case X86::VRANGEPSZ128rmi: |
5270 | case X86::VRANGEPSZ128rmikz: |
5271 | case X86::VRANGEPSZ128rri: |
5272 | case X86::VRANGEPSZ128rrikz: |
5273 | case X86::VRANGEPSZ256rmbi: |
5274 | case X86::VRANGEPSZ256rmbikz: |
5275 | case X86::VRANGEPSZ256rmi: |
5276 | case X86::VRANGEPSZ256rmikz: |
5277 | case X86::VRANGEPSZ256rri: |
5278 | case X86::VRANGEPSZ256rrikz: |
5279 | case X86::VRANGEPSZrmbi: |
5280 | case X86::VRANGEPSZrmbikz: |
5281 | case X86::VRANGEPSZrmi: |
5282 | case X86::VRANGEPSZrmikz: |
5283 | case X86::VRANGEPSZrri: |
5284 | case X86::VRANGEPSZrrib: |
5285 | case X86::VRANGEPSZrribkz: |
5286 | case X86::VRANGEPSZrrikz: |
5287 | case X86::VRANGESDZrmi: |
5288 | case X86::VRANGESDZrmikz: |
5289 | case X86::VRANGESDZrri: |
5290 | case X86::VRANGESDZrrib: |
5291 | case X86::VRANGESDZrribkz: |
5292 | case X86::VRANGESDZrrikz: |
5293 | case X86::VRANGESSZrmi: |
5294 | case X86::VRANGESSZrmikz: |
5295 | case X86::VRANGESSZrri: |
5296 | case X86::VRANGESSZrrib: |
5297 | case X86::VRANGESSZrribkz: |
5298 | case X86::VRANGESSZrrikz: |
5299 | return Subtarget.hasRANGEFalseDeps(); |
5300 | case X86::VGETMANTSSZrmi: |
5301 | case X86::VGETMANTSSZrmikz: |
5302 | case X86::VGETMANTSSZrri: |
5303 | case X86::VGETMANTSSZrrib: |
5304 | case X86::VGETMANTSSZrribkz: |
5305 | case X86::VGETMANTSSZrrikz: |
5306 | case X86::VGETMANTSDZrmi: |
5307 | case X86::VGETMANTSDZrmikz: |
5308 | case X86::VGETMANTSDZrri: |
5309 | case X86::VGETMANTSDZrrib: |
5310 | case X86::VGETMANTSDZrribkz: |
5311 | case X86::VGETMANTSDZrrikz: |
5312 | case X86::VGETMANTSHZrmi: |
5313 | case X86::VGETMANTSHZrmikz: |
5314 | case X86::VGETMANTSHZrri: |
5315 | case X86::VGETMANTSHZrrib: |
5316 | case X86::VGETMANTSHZrribkz: |
5317 | case X86::VGETMANTSHZrrikz: |
5318 | case X86::VGETMANTPSZ128rmbi: |
5319 | case X86::VGETMANTPSZ128rmbikz: |
5320 | case X86::VGETMANTPSZ128rmi: |
5321 | case X86::VGETMANTPSZ128rmikz: |
5322 | case X86::VGETMANTPSZ256rmbi: |
5323 | case X86::VGETMANTPSZ256rmbikz: |
5324 | case X86::VGETMANTPSZ256rmi: |
5325 | case X86::VGETMANTPSZ256rmikz: |
5326 | case X86::VGETMANTPSZrmbi: |
5327 | case X86::VGETMANTPSZrmbikz: |
5328 | case X86::VGETMANTPSZrmi: |
5329 | case X86::VGETMANTPSZrmikz: |
5330 | case X86::VGETMANTPDZ128rmbi: |
5331 | case X86::VGETMANTPDZ128rmbikz: |
5332 | case X86::VGETMANTPDZ128rmi: |
5333 | case X86::VGETMANTPDZ128rmikz: |
5334 | case X86::VGETMANTPDZ256rmbi: |
5335 | case X86::VGETMANTPDZ256rmbikz: |
5336 | case X86::VGETMANTPDZ256rmi: |
5337 | case X86::VGETMANTPDZ256rmikz: |
5338 | case X86::VGETMANTPDZrmbi: |
5339 | case X86::VGETMANTPDZrmbikz: |
5340 | case X86::VGETMANTPDZrmi: |
5341 | case X86::VGETMANTPDZrmikz: |
5342 | return Subtarget.hasGETMANTFalseDeps(); |
5343 | case X86::VPMULLQZ128rm: |
5344 | case X86::VPMULLQZ128rmb: |
5345 | case X86::VPMULLQZ128rmbkz: |
5346 | case X86::VPMULLQZ128rmkz: |
5347 | case X86::VPMULLQZ128rr: |
5348 | case X86::VPMULLQZ128rrkz: |
5349 | case X86::VPMULLQZ256rm: |
5350 | case X86::VPMULLQZ256rmb: |
5351 | case X86::VPMULLQZ256rmbkz: |
5352 | case X86::VPMULLQZ256rmkz: |
5353 | case X86::VPMULLQZ256rr: |
5354 | case X86::VPMULLQZ256rrkz: |
5355 | case X86::VPMULLQZrm: |
5356 | case X86::VPMULLQZrmb: |
5357 | case X86::VPMULLQZrmbkz: |
5358 | case X86::VPMULLQZrmkz: |
5359 | case X86::VPMULLQZrr: |
5360 | case X86::VPMULLQZrrkz: |
5361 | return Subtarget.hasMULLQFalseDeps(); |
5362 | // GPR |
5363 | case X86::POPCNT32rm: |
5364 | case X86::POPCNT32rr: |
5365 | case X86::POPCNT64rm: |
5366 | case X86::POPCNT64rr: |
5367 | return Subtarget.hasPOPCNTFalseDeps(); |
5368 | case X86::LZCNT32rm: |
5369 | case X86::LZCNT32rr: |
5370 | case X86::LZCNT64rm: |
5371 | case X86::LZCNT64rr: |
5372 | case X86::TZCNT32rm: |
5373 | case X86::TZCNT32rr: |
5374 | case X86::TZCNT64rm: |
5375 | case X86::TZCNT64rr: |
5376 | return Subtarget.hasLZCNTFalseDeps(); |
5377 | } |
5378 | |
5379 | return false; |
5380 | } |
5381 | |
5382 | /// Inform the BreakFalseDeps pass how many idle |
5383 | /// instructions we would like before a partial register update. |
5384 | unsigned X86InstrInfo::getPartialRegUpdateClearance( |
5385 | const MachineInstr &MI, unsigned OpNum, |
5386 | const TargetRegisterInfo *TRI) const { |
5387 | if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget)) |
5388 | return 0; |
5389 | |
5390 | // If MI is marked as reading Reg, the partial register update is wanted. |
5391 | const MachineOperand &MO = MI.getOperand(0); |
5392 | Register Reg = MO.getReg(); |
5393 | if (Reg.isVirtual()) { |
5394 | if (MO.readsReg() || MI.readsVirtualRegister(Reg)) |
5395 | return 0; |
5396 | } else { |
5397 | if (MI.readsRegister(Reg, TRI)) |
5398 | return 0; |
5399 | } |
5400 | |
5401 | // If any instructions in the clearance range are reading Reg, insert a |
5402 | // dependency breaking instruction, which is inexpensive and is likely to |
5403 | // be hidden in other instruction's cycles. |
5404 | return PartialRegUpdateClearance; |
5405 | } |
5406 | |
5407 | // Return true for any instruction the copies the high bits of the first source |
5408 | // operand into the unused high bits of the destination operand. |
5409 | // Also returns true for instructions that have two inputs where one may |
5410 | // be undef and we want it to use the same register as the other input. |
5411 | static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, |
5412 | bool ForLoadFold = false) { |
5413 | // Set the OpNum parameter to the first source operand. |
5414 | switch (Opcode) { |
5415 | case X86::MMX_PUNPCKHBWrr: |
5416 | case X86::MMX_PUNPCKHWDrr: |
5417 | case X86::MMX_PUNPCKHDQrr: |
5418 | case X86::MMX_PUNPCKLBWrr: |
5419 | case X86::MMX_PUNPCKLWDrr: |
5420 | case X86::MMX_PUNPCKLDQrr: |
5421 | case X86::MOVHLPSrr: |
5422 | case X86::PACKSSWBrr: |
5423 | case X86::PACKUSWBrr: |
5424 | case X86::PACKSSDWrr: |
5425 | case X86::PACKUSDWrr: |
5426 | case X86::PUNPCKHBWrr: |
5427 | case X86::PUNPCKLBWrr: |
5428 | case X86::PUNPCKHWDrr: |
5429 | case X86::PUNPCKLWDrr: |
5430 | case X86::PUNPCKHDQrr: |
5431 | case X86::PUNPCKLDQrr: |
5432 | case X86::PUNPCKHQDQrr: |
5433 | case X86::PUNPCKLQDQrr: |
5434 | case X86::SHUFPDrri: |
5435 | case X86::SHUFPSrri: |
5436 | // These instructions are sometimes used with an undef first or second |
5437 | // source. Return true here so BreakFalseDeps will assign this source to the |
5438 | // same register as the first source to avoid a false dependency. |
5439 | // Operand 1 of these instructions is tied so they're separate from their |
5440 | // VEX counterparts. |
5441 | return OpNum == 2 && !ForLoadFold; |
5442 | |
5443 | case X86::VMOVLHPSrr: |
5444 | case X86::VMOVLHPSZrr: |
5445 | case X86::VPACKSSWBrr: |
5446 | case X86::VPACKUSWBrr: |
5447 | case X86::VPACKSSDWrr: |
5448 | case X86::VPACKUSDWrr: |
5449 | case X86::VPACKSSWBZ128rr: |
5450 | case X86::VPACKUSWBZ128rr: |
5451 | case X86::VPACKSSDWZ128rr: |
5452 | case X86::VPACKUSDWZ128rr: |
5453 | case X86::VPERM2F128rr: |
5454 | case X86::VPERM2I128rr: |
5455 | case X86::VSHUFF32X4Z256rri: |
5456 | case X86::VSHUFF32X4Zrri: |
5457 | case X86::VSHUFF64X2Z256rri: |
5458 | case X86::VSHUFF64X2Zrri: |
5459 | case X86::VSHUFI32X4Z256rri: |
5460 | case X86::VSHUFI32X4Zrri: |
5461 | case X86::VSHUFI64X2Z256rri: |
5462 | case X86::VSHUFI64X2Zrri: |
5463 | case X86::VPUNPCKHBWrr: |
5464 | case X86::VPUNPCKLBWrr: |
5465 | case X86::VPUNPCKHBWYrr: |
5466 | case X86::VPUNPCKLBWYrr: |
5467 | case X86::VPUNPCKHBWZ128rr: |
5468 | case X86::VPUNPCKLBWZ128rr: |
5469 | case X86::VPUNPCKHBWZ256rr: |
5470 | case X86::VPUNPCKLBWZ256rr: |
5471 | case X86::VPUNPCKHBWZrr: |
5472 | case X86::VPUNPCKLBWZrr: |
5473 | case X86::VPUNPCKHWDrr: |
5474 | case X86::VPUNPCKLWDrr: |
5475 | case X86::VPUNPCKHWDYrr: |
5476 | case X86::VPUNPCKLWDYrr: |
5477 | case X86::VPUNPCKHWDZ128rr: |
5478 | case X86::VPUNPCKLWDZ128rr: |
5479 | case X86::VPUNPCKHWDZ256rr: |
5480 | case X86::VPUNPCKLWDZ256rr: |
5481 | case X86::VPUNPCKHWDZrr: |
5482 | case X86::VPUNPCKLWDZrr: |
5483 | case X86::VPUNPCKHDQrr: |
5484 | case X86::VPUNPCKLDQrr: |
5485 | case X86::VPUNPCKHDQYrr: |
5486 | case X86::VPUNPCKLDQYrr: |
5487 | case X86::VPUNPCKHDQZ128rr: |
5488 | case X86::VPUNPCKLDQZ128rr: |
5489 | case X86::VPUNPCKHDQZ256rr: |
5490 | case X86::VPUNPCKLDQZ256rr: |
5491 | case X86::VPUNPCKHDQZrr: |
5492 | case X86::VPUNPCKLDQZrr: |
5493 | case X86::VPUNPCKHQDQrr: |
5494 | case X86::VPUNPCKLQDQrr: |
5495 | case X86::VPUNPCKHQDQYrr: |
5496 | case X86::VPUNPCKLQDQYrr: |
5497 | case X86::VPUNPCKHQDQZ128rr: |
5498 | case X86::VPUNPCKLQDQZ128rr: |
5499 | case X86::VPUNPCKHQDQZ256rr: |
5500 | case X86::VPUNPCKLQDQZ256rr: |
5501 | case X86::VPUNPCKHQDQZrr: |
5502 | case X86::VPUNPCKLQDQZrr: |
5503 | // These instructions are sometimes used with an undef first or second |
5504 | // source. Return true here so BreakFalseDeps will assign this source to the |
5505 | // same register as the first source to avoid a false dependency. |
5506 | return (OpNum == 1 || OpNum == 2) && !ForLoadFold; |
5507 | |
5508 | case X86::VCVTSI2SSrr: |
5509 | case X86::VCVTSI2SSrm: |
5510 | case X86::VCVTSI2SSrr_Int: |
5511 | case X86::VCVTSI2SSrm_Int: |
5512 | case X86::VCVTSI642SSrr: |
5513 | case X86::VCVTSI642SSrm: |
5514 | case X86::VCVTSI642SSrr_Int: |
5515 | case X86::VCVTSI642SSrm_Int: |
5516 | case X86::VCVTSI2SDrr: |
5517 | case X86::VCVTSI2SDrm: |
5518 | case X86::VCVTSI2SDrr_Int: |
5519 | case X86::VCVTSI2SDrm_Int: |
5520 | case X86::VCVTSI642SDrr: |
5521 | case X86::VCVTSI642SDrm: |
5522 | case X86::VCVTSI642SDrr_Int: |
5523 | case X86::VCVTSI642SDrm_Int: |
5524 | // AVX-512 |
5525 | case X86::VCVTSI2SSZrr: |
5526 | case X86::VCVTSI2SSZrm: |
5527 | case X86::VCVTSI2SSZrr_Int: |
5528 | case X86::VCVTSI2SSZrrb_Int: |
5529 | case X86::VCVTSI2SSZrm_Int: |
5530 | case X86::VCVTSI642SSZrr: |
5531 | case X86::VCVTSI642SSZrm: |
5532 | case X86::VCVTSI642SSZrr_Int: |
5533 | case X86::VCVTSI642SSZrrb_Int: |
5534 | case X86::VCVTSI642SSZrm_Int: |
5535 | case X86::VCVTSI2SDZrr: |
5536 | case X86::VCVTSI2SDZrm: |
5537 | case X86::VCVTSI2SDZrr_Int: |
5538 | case X86::VCVTSI2SDZrm_Int: |
5539 | case X86::VCVTSI642SDZrr: |
5540 | case X86::VCVTSI642SDZrm: |
5541 | case X86::VCVTSI642SDZrr_Int: |
5542 | case X86::VCVTSI642SDZrrb_Int: |
5543 | case X86::VCVTSI642SDZrm_Int: |
5544 | case X86::VCVTUSI2SSZrr: |
5545 | case X86::VCVTUSI2SSZrm: |
5546 | case X86::VCVTUSI2SSZrr_Int: |
5547 | case X86::VCVTUSI2SSZrrb_Int: |
5548 | case X86::VCVTUSI2SSZrm_Int: |
5549 | case X86::VCVTUSI642SSZrr: |
5550 | case X86::VCVTUSI642SSZrm: |
5551 | case X86::VCVTUSI642SSZrr_Int: |
5552 | case X86::VCVTUSI642SSZrrb_Int: |
5553 | case X86::VCVTUSI642SSZrm_Int: |
5554 | case X86::VCVTUSI2SDZrr: |
5555 | case X86::VCVTUSI2SDZrm: |
5556 | case X86::VCVTUSI2SDZrr_Int: |
5557 | case X86::VCVTUSI2SDZrm_Int: |
5558 | case X86::VCVTUSI642SDZrr: |
5559 | case X86::VCVTUSI642SDZrm: |
5560 | case X86::VCVTUSI642SDZrr_Int: |
5561 | case X86::VCVTUSI642SDZrrb_Int: |
5562 | case X86::VCVTUSI642SDZrm_Int: |
5563 | case X86::VCVTSI2SHZrr: |
5564 | case X86::VCVTSI2SHZrm: |
5565 | case X86::VCVTSI2SHZrr_Int: |
5566 | case X86::VCVTSI2SHZrrb_Int: |
5567 | case X86::VCVTSI2SHZrm_Int: |
5568 | case X86::VCVTSI642SHZrr: |
5569 | case X86::VCVTSI642SHZrm: |
5570 | case X86::VCVTSI642SHZrr_Int: |
5571 | case X86::VCVTSI642SHZrrb_Int: |
5572 | case X86::VCVTSI642SHZrm_Int: |
5573 | case X86::VCVTUSI2SHZrr: |
5574 | case X86::VCVTUSI2SHZrm: |
5575 | case X86::VCVTUSI2SHZrr_Int: |
5576 | case X86::VCVTUSI2SHZrrb_Int: |
5577 | case X86::VCVTUSI2SHZrm_Int: |
5578 | case X86::VCVTUSI642SHZrr: |
5579 | case X86::VCVTUSI642SHZrm: |
5580 | case X86::VCVTUSI642SHZrr_Int: |
5581 | case X86::VCVTUSI642SHZrrb_Int: |
5582 | case X86::VCVTUSI642SHZrm_Int: |
5583 | // Load folding won't effect the undef register update since the input is |
5584 | // a GPR. |
5585 | return OpNum == 1 && !ForLoadFold; |
5586 | case X86::VCVTSD2SSrr: |
5587 | case X86::VCVTSD2SSrm: |
5588 | case X86::VCVTSD2SSrr_Int: |
5589 | case X86::VCVTSD2SSrm_Int: |
5590 | case X86::VCVTSS2SDrr: |
5591 | case X86::VCVTSS2SDrm: |
5592 | case X86::VCVTSS2SDrr_Int: |
5593 | case X86::VCVTSS2SDrm_Int: |
5594 | case X86::VRCPSSr: |
5595 | case X86::VRCPSSr_Int: |
5596 | case X86::VRCPSSm: |
5597 | case X86::VRCPSSm_Int: |
5598 | case X86::VROUNDSDr: |
5599 | case X86::VROUNDSDm: |
5600 | case X86::VROUNDSDr_Int: |
5601 | case X86::VROUNDSDm_Int: |
5602 | case X86::VROUNDSSr: |
5603 | case X86::VROUNDSSm: |
5604 | case X86::VROUNDSSr_Int: |
5605 | case X86::VROUNDSSm_Int: |
5606 | case X86::VRSQRTSSr: |
5607 | case X86::VRSQRTSSr_Int: |
5608 | case X86::VRSQRTSSm: |
5609 | case X86::VRSQRTSSm_Int: |
5610 | case X86::VSQRTSSr: |
5611 | case X86::VSQRTSSr_Int: |
5612 | case X86::VSQRTSSm: |
5613 | case X86::VSQRTSSm_Int: |
5614 | case X86::VSQRTSDr: |
5615 | case X86::VSQRTSDr_Int: |
5616 | case X86::VSQRTSDm: |
5617 | case X86::VSQRTSDm_Int: |
5618 | // AVX-512 |
5619 | case X86::VCVTSD2SSZrr: |
5620 | case X86::VCVTSD2SSZrr_Int: |
5621 | case X86::VCVTSD2SSZrrb_Int: |
5622 | case X86::VCVTSD2SSZrm: |
5623 | case X86::VCVTSD2SSZrm_Int: |
5624 | case X86::VCVTSS2SDZrr: |
5625 | case X86::VCVTSS2SDZrr_Int: |
5626 | case X86::VCVTSS2SDZrrb_Int: |
5627 | case X86::VCVTSS2SDZrm: |
5628 | case X86::VCVTSS2SDZrm_Int: |
5629 | case X86::VGETEXPSDZr: |
5630 | case X86::VGETEXPSDZrb: |
5631 | case X86::VGETEXPSDZm: |
5632 | case X86::VGETEXPSSZr: |
5633 | case X86::VGETEXPSSZrb: |
5634 | case X86::VGETEXPSSZm: |
5635 | case X86::VGETMANTSDZrri: |
5636 | case X86::VGETMANTSDZrrib: |
5637 | case X86::VGETMANTSDZrmi: |
5638 | case X86::VGETMANTSSZrri: |
5639 | case X86::VGETMANTSSZrrib: |
5640 | case X86::VGETMANTSSZrmi: |
5641 | case X86::VRNDSCALESDZr: |
5642 | case X86::VRNDSCALESDZr_Int: |
5643 | case X86::VRNDSCALESDZrb_Int: |
5644 | case X86::VRNDSCALESDZm: |
5645 | case X86::VRNDSCALESDZm_Int: |
5646 | case X86::VRNDSCALESSZr: |
5647 | case X86::VRNDSCALESSZr_Int: |
5648 | case X86::VRNDSCALESSZrb_Int: |
5649 | case X86::VRNDSCALESSZm: |
5650 | case X86::VRNDSCALESSZm_Int: |
5651 | case X86::VRCP14SDZrr: |
5652 | case X86::VRCP14SDZrm: |
5653 | case X86::VRCP14SSZrr: |
5654 | case X86::VRCP14SSZrm: |
5655 | case X86::VRCPSHZrr: |
5656 | case X86::VRCPSHZrm: |
5657 | case X86::VRSQRTSHZrr: |
5658 | case X86::VRSQRTSHZrm: |
5659 | case X86::VREDUCESHZrmi: |
5660 | case X86::VREDUCESHZrri: |
5661 | case X86::VREDUCESHZrrib: |
5662 | case X86::VGETEXPSHZr: |
5663 | case X86::VGETEXPSHZrb: |
5664 | case X86::VGETEXPSHZm: |
5665 | case X86::VGETMANTSHZrri: |
5666 | case X86::VGETMANTSHZrrib: |
5667 | case X86::VGETMANTSHZrmi: |
5668 | case X86::VRNDSCALESHZr: |
5669 | case X86::VRNDSCALESHZr_Int: |
5670 | case X86::VRNDSCALESHZrb_Int: |
5671 | case X86::VRNDSCALESHZm: |
5672 | case X86::VRNDSCALESHZm_Int: |
5673 | case X86::VSQRTSHZr: |
5674 | case X86::VSQRTSHZr_Int: |
5675 | case X86::VSQRTSHZrb_Int: |
5676 | case X86::VSQRTSHZm: |
5677 | case X86::VSQRTSHZm_Int: |
5678 | case X86::VRCP28SDZr: |
5679 | case X86::VRCP28SDZrb: |
5680 | case X86::VRCP28SDZm: |
5681 | case X86::VRCP28SSZr: |
5682 | case X86::VRCP28SSZrb: |
5683 | case X86::VRCP28SSZm: |
5684 | case X86::VREDUCESSZrmi: |
5685 | case X86::VREDUCESSZrri: |
5686 | case X86::VREDUCESSZrrib: |
5687 | case X86::VRSQRT14SDZrr: |
5688 | case X86::VRSQRT14SDZrm: |
5689 | case X86::VRSQRT14SSZrr: |
5690 | case X86::VRSQRT14SSZrm: |
5691 | case X86::VRSQRT28SDZr: |
5692 | case X86::VRSQRT28SDZrb: |
5693 | case X86::VRSQRT28SDZm: |
5694 | case X86::VRSQRT28SSZr: |
5695 | case X86::VRSQRT28SSZrb: |
5696 | case X86::VRSQRT28SSZm: |
5697 | case X86::VSQRTSSZr: |
5698 | case X86::VSQRTSSZr_Int: |
5699 | case X86::VSQRTSSZrb_Int: |
5700 | case X86::VSQRTSSZm: |
5701 | case X86::VSQRTSSZm_Int: |
5702 | case X86::VSQRTSDZr: |
5703 | case X86::VSQRTSDZr_Int: |
5704 | case X86::VSQRTSDZrb_Int: |
5705 | case X86::VSQRTSDZm: |
5706 | case X86::VSQRTSDZm_Int: |
5707 | case X86::VCVTSD2SHZrr: |
5708 | case X86::VCVTSD2SHZrr_Int: |
5709 | case X86::VCVTSD2SHZrrb_Int: |
5710 | case X86::VCVTSD2SHZrm: |
5711 | case X86::VCVTSD2SHZrm_Int: |
5712 | case X86::VCVTSS2SHZrr: |
5713 | case X86::VCVTSS2SHZrr_Int: |
5714 | case X86::VCVTSS2SHZrrb_Int: |
5715 | case X86::VCVTSS2SHZrm: |
5716 | case X86::VCVTSS2SHZrm_Int: |
5717 | case X86::VCVTSH2SDZrr: |
5718 | case X86::VCVTSH2SDZrr_Int: |
5719 | case X86::VCVTSH2SDZrrb_Int: |
5720 | case X86::VCVTSH2SDZrm: |
5721 | case X86::VCVTSH2SDZrm_Int: |
5722 | case X86::VCVTSH2SSZrr: |
5723 | case X86::VCVTSH2SSZrr_Int: |
5724 | case X86::VCVTSH2SSZrrb_Int: |
5725 | case X86::VCVTSH2SSZrm: |
5726 | case X86::VCVTSH2SSZrm_Int: |
5727 | return OpNum == 1; |
5728 | case X86::VMOVSSZrrk: |
5729 | case X86::VMOVSDZrrk: |
5730 | return OpNum == 3 && !ForLoadFold; |
5731 | case X86::VMOVSSZrrkz: |
5732 | case X86::VMOVSDZrrkz: |
5733 | return OpNum == 2 && !ForLoadFold; |
5734 | } |
5735 | |
5736 | return false; |
5737 | } |
5738 | |
5739 | /// Inform the BreakFalseDeps pass how many idle instructions we would like |
5740 | /// before certain undef register reads. |
5741 | /// |
5742 | /// This catches the VCVTSI2SD family of instructions: |
5743 | /// |
5744 | /// vcvtsi2sdq %rax, undef %xmm0, %xmm14 |
5745 | /// |
5746 | /// We should to be careful *not* to catch VXOR idioms which are presumably |
5747 | /// handled specially in the pipeline: |
5748 | /// |
5749 | /// vxorps undef %xmm1, undef %xmm1, %xmm1 |
5750 | /// |
5751 | /// Like getPartialRegUpdateClearance, this makes a strong assumption that the |
5752 | /// high bits that are passed-through are not live. |
5753 | unsigned |
5754 | X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, |
5755 | const TargetRegisterInfo *TRI) const { |
5756 | const MachineOperand &MO = MI.getOperand(OpNum); |
5757 | if (Register::isPhysicalRegister(MO.getReg()) && |
5758 | hasUndefRegUpdate(MI.getOpcode(), OpNum)) |
5759 | return UndefRegClearance; |
5760 | |
5761 | return 0; |
5762 | } |
5763 | |
5764 | void X86InstrInfo::breakPartialRegDependency( |
5765 | MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { |
5766 | Register Reg = MI.getOperand(OpNum).getReg(); |
5767 | // If MI kills this register, the false dependence is already broken. |
5768 | if (MI.killsRegister(Reg, TRI)) |
5769 | return; |
5770 | |
5771 | if (X86::VR128RegClass.contains(Reg)) { |
5772 | // These instructions are all floating point domain, so xorps is the best |
5773 | // choice. |
5774 | unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; |
5775 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg) |
5776 | .addReg(Reg, RegState::Undef) |
5777 | .addReg(Reg, RegState::Undef); |
5778 | MI.addRegisterKilled(Reg, TRI, true); |
5779 | } else if (X86::VR256RegClass.contains(Reg)) { |
5780 | // Use vxorps to clear the full ymm register. |
5781 | // It wants to read and write the xmm sub-register. |
5782 | Register XReg = TRI->getSubReg(Reg, X86::sub_xmm); |
5783 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) |
5784 | .addReg(XReg, RegState::Undef) |
5785 | .addReg(XReg, RegState::Undef) |
5786 | .addReg(Reg, RegState::ImplicitDefine); |
5787 | MI.addRegisterKilled(Reg, TRI, true); |
5788 | } else if (X86::VR128XRegClass.contains(Reg)) { |
5789 | // Only handle VLX targets. |
5790 | if (!Subtarget.hasVLX()) |
5791 | return; |
5792 | // Since vxorps requires AVX512DQ, vpxord should be the best choice. |
5793 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), Reg) |
5794 | .addReg(Reg, RegState::Undef) |
5795 | .addReg(Reg, RegState::Undef); |
5796 | MI.addRegisterKilled(Reg, TRI, true); |
5797 | } else if (X86::VR256XRegClass.contains(Reg) || |
5798 | X86::VR512RegClass.contains(Reg)) { |
5799 | // Only handle VLX targets. |
5800 | if (!Subtarget.hasVLX()) |
5801 | return; |
5802 | // Use vpxord to clear the full ymm/zmm register. |
5803 | // It wants to read and write the xmm sub-register. |
5804 | Register XReg = TRI->getSubReg(Reg, X86::sub_xmm); |
5805 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), XReg) |
5806 | .addReg(XReg, RegState::Undef) |
5807 | .addReg(XReg, RegState::Undef) |
5808 | .addReg(Reg, RegState::ImplicitDefine); |
5809 | MI.addRegisterKilled(Reg, TRI, true); |
5810 | } else if (X86::GR64RegClass.contains(Reg)) { |
5811 | // Using XOR32rr because it has shorter encoding and zeros up the upper bits |
5812 | // as well. |
5813 | Register XReg = TRI->getSubReg(Reg, X86::sub_32bit); |
5814 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) |
5815 | .addReg(XReg, RegState::Undef) |
5816 | .addReg(XReg, RegState::Undef) |
5817 | .addReg(Reg, RegState::ImplicitDefine); |
5818 | MI.addRegisterKilled(Reg, TRI, true); |
5819 | } else if (X86::GR32RegClass.contains(Reg)) { |
5820 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg) |
5821 | .addReg(Reg, RegState::Undef) |
5822 | .addReg(Reg, RegState::Undef); |
5823 | MI.addRegisterKilled(Reg, TRI, true); |
5824 | } |
5825 | } |
5826 | |
5827 | static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, |
5828 | int PtrOffset = 0) { |
5829 | unsigned NumAddrOps = MOs.size(); |
5830 | |
5831 | if (NumAddrOps < 4) { |
5832 | // FrameIndex only - add an immediate offset (whether its zero or not). |
5833 | for (unsigned i = 0; i != NumAddrOps; ++i) |
5834 | MIB.add(MOs[i]); |
5835 | addOffset(MIB, PtrOffset); |
5836 | } else { |
5837 | // General Memory Addressing - we need to add any offset to an existing |
5838 | // offset. |
5839 | assert(MOs.size() == 5 && "Unexpected memory operand list length")(static_cast <bool> (MOs.size() == 5 && "Unexpected memory operand list length" ) ? void (0) : __assert_fail ("MOs.size() == 5 && \"Unexpected memory operand list length\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 5839, __extension__ __PRETTY_FUNCTION__)); |
5840 | for (unsigned i = 0; i != NumAddrOps; ++i) { |
5841 | const MachineOperand &MO = MOs[i]; |
5842 | if (i == 3 && PtrOffset != 0) { |
5843 | MIB.addDisp(MO, PtrOffset); |
5844 | } else { |
5845 | MIB.add(MO); |
5846 | } |
5847 | } |
5848 | } |
5849 | } |
5850 | |
5851 | static void updateOperandRegConstraints(MachineFunction &MF, |
5852 | MachineInstr &NewMI, |
5853 | const TargetInstrInfo &TII) { |
5854 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5855 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
5856 | |
5857 | for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) { |
5858 | MachineOperand &MO = NewMI.getOperand(Idx); |
5859 | // We only need to update constraints on virtual register operands. |
5860 | if (!MO.isReg()) |
5861 | continue; |
5862 | Register Reg = MO.getReg(); |
5863 | if (!Reg.isVirtual()) |
5864 | continue; |
5865 | |
5866 | auto *NewRC = MRI.constrainRegClass( |
5867 | Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF)); |
5868 | if (!NewRC) { |
5869 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
5870 | dbgs() << "WARNING: Unable to update register constraint for operand "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
5871 | << Idx << " of instruction:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false) |
5872 | NewMI.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("x86-instr-info")) { dbgs() << "WARNING: Unable to update register constraint for operand " << Idx << " of instruction:\n"; NewMI.dump(); dbgs () << "\n"; } } while (false); |
5873 | } |
5874 | } |
5875 | } |
5876 | |
5877 | static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, |
5878 | ArrayRef<MachineOperand> MOs, |
5879 | MachineBasicBlock::iterator InsertPt, |
5880 | MachineInstr &MI, |
5881 | const TargetInstrInfo &TII) { |
5882 | // Create the base instruction with the memory operand as the first part. |
5883 | // Omit the implicit operands, something BuildMI can't do. |
5884 | MachineInstr *NewMI = |
5885 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
5886 | MachineInstrBuilder MIB(MF, NewMI); |
5887 | addOperands(MIB, MOs); |
5888 | |
5889 | // Loop over the rest of the ri operands, converting them over. |
5890 | unsigned NumOps = MI.getDesc().getNumOperands() - 2; |
5891 | for (unsigned i = 0; i != NumOps; ++i) { |
5892 | MachineOperand &MO = MI.getOperand(i + 2); |
5893 | MIB.add(MO); |
5894 | } |
5895 | for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), NumOps + 2)) |
5896 | MIB.add(MO); |
5897 | |
5898 | updateOperandRegConstraints(MF, *NewMI, TII); |
5899 | |
5900 | MachineBasicBlock *MBB = InsertPt->getParent(); |
5901 | MBB->insert(InsertPt, NewMI); |
5902 | |
5903 | return MIB; |
5904 | } |
5905 | |
5906 | static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, |
5907 | unsigned OpNo, ArrayRef<MachineOperand> MOs, |
5908 | MachineBasicBlock::iterator InsertPt, |
5909 | MachineInstr &MI, const TargetInstrInfo &TII, |
5910 | int PtrOffset = 0) { |
5911 | // Omit the implicit operands, something BuildMI can't do. |
5912 | MachineInstr *NewMI = |
5913 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
5914 | MachineInstrBuilder MIB(MF, NewMI); |
5915 | |
5916 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
5917 | MachineOperand &MO = MI.getOperand(i); |
5918 | if (i == OpNo) { |
5919 | assert(MO.isReg() && "Expected to fold into reg operand!")(static_cast <bool> (MO.isReg() && "Expected to fold into reg operand!" ) ? void (0) : __assert_fail ("MO.isReg() && \"Expected to fold into reg operand!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 5919, __extension__ __PRETTY_FUNCTION__)); |
5920 | addOperands(MIB, MOs, PtrOffset); |
5921 | } else { |
5922 | MIB.add(MO); |
5923 | } |
5924 | } |
5925 | |
5926 | updateOperandRegConstraints(MF, *NewMI, TII); |
5927 | |
5928 | // Copy the NoFPExcept flag from the instruction we're fusing. |
5929 | if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept)) |
5930 | NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept); |
5931 | |
5932 | MachineBasicBlock *MBB = InsertPt->getParent(); |
5933 | MBB->insert(InsertPt, NewMI); |
5934 | |
5935 | return MIB; |
5936 | } |
5937 | |
5938 | static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, |
5939 | ArrayRef<MachineOperand> MOs, |
5940 | MachineBasicBlock::iterator InsertPt, |
5941 | MachineInstr &MI) { |
5942 | MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, |
5943 | MI.getDebugLoc(), TII.get(Opcode)); |
5944 | addOperands(MIB, MOs); |
5945 | return MIB.addImm(0); |
5946 | } |
5947 | |
5948 | MachineInstr *X86InstrInfo::foldMemoryOperandCustom( |
5949 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
5950 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
5951 | unsigned Size, Align Alignment) const { |
5952 | switch (MI.getOpcode()) { |
5953 | case X86::INSERTPSrr: |
5954 | case X86::VINSERTPSrr: |
5955 | case X86::VINSERTPSZrr: |
5956 | // Attempt to convert the load of inserted vector into a fold load |
5957 | // of a single float. |
5958 | if (OpNum == 2) { |
5959 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
5960 | unsigned ZMask = Imm & 15; |
5961 | unsigned DstIdx = (Imm >> 4) & 3; |
5962 | unsigned SrcIdx = (Imm >> 6) & 3; |
5963 | |
5964 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5965 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
5966 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5967 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) { |
5968 | int PtrOffset = SrcIdx * 4; |
5969 | unsigned NewImm = (DstIdx << 4) | ZMask; |
5970 | unsigned NewOpCode = |
5971 | (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : |
5972 | (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : |
5973 | X86::INSERTPSrm; |
5974 | MachineInstr *NewMI = |
5975 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); |
5976 | NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); |
5977 | return NewMI; |
5978 | } |
5979 | } |
5980 | break; |
5981 | case X86::MOVHLPSrr: |
5982 | case X86::VMOVHLPSrr: |
5983 | case X86::VMOVHLPSZrr: |
5984 | // Move the upper 64-bits of the second operand to the lower 64-bits. |
5985 | // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. |
5986 | // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. |
5987 | if (OpNum == 2) { |
5988 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5989 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
5990 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5991 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) { |
5992 | unsigned NewOpCode = |
5993 | (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : |
5994 | (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : |
5995 | X86::MOVLPSrm; |
5996 | MachineInstr *NewMI = |
5997 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); |
5998 | return NewMI; |
5999 | } |
6000 | } |
6001 | break; |
6002 | case X86::UNPCKLPDrr: |
6003 | // If we won't be able to fold this to the memory form of UNPCKL, use |
6004 | // MOVHPD instead. Done as custom because we can't have this in the load |
6005 | // table twice. |
6006 | if (OpNum == 2) { |
6007 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6008 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
6009 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
6010 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) { |
6011 | MachineInstr *NewMI = |
6012 | FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this); |
6013 | return NewMI; |
6014 | } |
6015 | } |
6016 | break; |
6017 | } |
6018 | |
6019 | return nullptr; |
6020 | } |
6021 | |
6022 | static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, |
6023 | MachineInstr &MI) { |
6024 | if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/true) || |
6025 | !MI.getOperand(1).isReg()) |
6026 | return false; |
6027 | |
6028 | // The are two cases we need to handle depending on where in the pipeline |
6029 | // the folding attempt is being made. |
6030 | // -Register has the undef flag set. |
6031 | // -Register is produced by the IMPLICIT_DEF instruction. |
6032 | |
6033 | if (MI.getOperand(1).isUndef()) |
6034 | return true; |
6035 | |
6036 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
6037 | MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg()); |
6038 | return VRegDef && VRegDef->isImplicitDef(); |
6039 | } |
6040 | |
6041 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
6042 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
6043 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
6044 | unsigned Size, Align Alignment, bool AllowCommute) const { |
6045 | bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); |
6046 | bool isTwoAddrFold = false; |
6047 | |
6048 | // For CPUs that favor the register form of a call or push, |
6049 | // do not fold loads into calls or pushes, unless optimizing for size |
6050 | // aggressively. |
6051 | if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && |
6052 | (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || |
6053 | MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || |
6054 | MI.getOpcode() == X86::PUSH64r)) |
6055 | return nullptr; |
6056 | |
6057 | // Avoid partial and undef register update stalls unless optimizing for size. |
6058 | if (!MF.getFunction().hasOptSize() && |
6059 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
6060 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
6061 | return nullptr; |
6062 | |
6063 | unsigned NumOps = MI.getDesc().getNumOperands(); |
6064 | bool isTwoAddr = |
6065 | NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; |
6066 | |
6067 | // FIXME: AsmPrinter doesn't know how to handle |
6068 | // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. |
6069 | if (MI.getOpcode() == X86::ADD32ri && |
6070 | MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) |
6071 | return nullptr; |
6072 | |
6073 | // GOTTPOFF relocation loads can only be folded into add instructions. |
6074 | // FIXME: Need to exclude other relocations that only support specific |
6075 | // instructions. |
6076 | if (MOs.size() == X86::AddrNumOperands && |
6077 | MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && |
6078 | MI.getOpcode() != X86::ADD64rr) |
6079 | return nullptr; |
6080 | |
6081 | MachineInstr *NewMI = nullptr; |
6082 | |
6083 | // Attempt to fold any custom cases we have. |
6084 | if (MachineInstr *CustomMI = foldMemoryOperandCustom( |
6085 | MF, MI, OpNum, MOs, InsertPt, Size, Alignment)) |
6086 | return CustomMI; |
6087 | |
6088 | const X86MemoryFoldTableEntry *I = nullptr; |
6089 | |
6090 | // Folding a memory location into the two-address part of a two-address |
6091 | // instruction is different than folding it other places. It requires |
6092 | // replacing the *two* registers with the memory location. |
6093 | if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() && |
6094 | MI.getOperand(1).isReg() && |
6095 | MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { |
6096 | I = lookupTwoAddrFoldTable(MI.getOpcode()); |
6097 | isTwoAddrFold = true; |
6098 | } else { |
6099 | if (OpNum == 0) { |
6100 | if (MI.getOpcode() == X86::MOV32r0) { |
6101 | NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); |
6102 | if (NewMI) |
6103 | return NewMI; |
6104 | } |
6105 | } |
6106 | |
6107 | I = lookupFoldTable(MI.getOpcode(), OpNum); |
6108 | } |
6109 | |
6110 | if (I != nullptr) { |
6111 | unsigned Opcode = I->DstOp; |
6112 | bool FoldedLoad = |
6113 | isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0; |
6114 | bool FoldedStore = |
6115 | isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE); |
6116 | MaybeAlign MinAlign = |
6117 | decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT); |
6118 | if (MinAlign && Alignment < *MinAlign) |
6119 | return nullptr; |
6120 | bool NarrowToMOV32rm = false; |
6121 | if (Size) { |
6122 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6123 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, |
6124 | &RI, MF); |
6125 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
6126 | // Check if it's safe to fold the load. If the size of the object is |
6127 | // narrower than the load width, then it's not. |
6128 | // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int. |
6129 | if (FoldedLoad && Size < RCSize) { |
6130 | // If this is a 64-bit load, but the spill slot is 32, then we can do |
6131 | // a 32-bit load which is implicitly zero-extended. This likely is |
6132 | // due to live interval analysis remat'ing a load from stack slot. |
6133 | if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) |
6134 | return nullptr; |
6135 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
6136 | return nullptr; |
6137 | Opcode = X86::MOV32rm; |
6138 | NarrowToMOV32rm = true; |
6139 | } |
6140 | // For stores, make sure the size of the object is equal to the size of |
6141 | // the store. If the object is larger, the extra bits would be garbage. If |
6142 | // the object is smaller we might overwrite another object or fault. |
6143 | if (FoldedStore && Size != RCSize) |
6144 | return nullptr; |
6145 | } |
6146 | |
6147 | if (isTwoAddrFold) |
6148 | NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); |
6149 | else |
6150 | NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); |
6151 | |
6152 | if (NarrowToMOV32rm) { |
6153 | // If this is the special case where we use a MOV32rm to load a 32-bit |
6154 | // value and zero-extend the top bits. Change the destination register |
6155 | // to a 32-bit one. |
6156 | Register DstReg = NewMI->getOperand(0).getReg(); |
6157 | if (DstReg.isPhysical()) |
6158 | NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); |
6159 | else |
6160 | NewMI->getOperand(0).setSubReg(X86::sub_32bit); |
6161 | } |
6162 | return NewMI; |
6163 | } |
6164 | |
6165 | // If the instruction and target operand are commutable, commute the |
6166 | // instruction and try again. |
6167 | if (AllowCommute) { |
6168 | unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; |
6169 | if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { |
6170 | bool HasDef = MI.getDesc().getNumDefs(); |
6171 | Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); |
6172 | Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); |
6173 | Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); |
6174 | bool Tied1 = |
6175 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); |
6176 | bool Tied2 = |
6177 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); |
6178 | |
6179 | // If either of the commutable operands are tied to the destination |
6180 | // then we can not commute + fold. |
6181 | if ((HasDef && Reg0 == Reg1 && Tied1) || |
6182 | (HasDef && Reg0 == Reg2 && Tied2)) |
6183 | return nullptr; |
6184 | |
6185 | MachineInstr *CommutedMI = |
6186 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
6187 | if (!CommutedMI) { |
6188 | // Unable to commute. |
6189 | return nullptr; |
6190 | } |
6191 | if (CommutedMI != &MI) { |
6192 | // New instruction. We can't fold from this. |
6193 | CommutedMI->eraseFromParent(); |
6194 | return nullptr; |
6195 | } |
6196 | |
6197 | // Attempt to fold with the commuted version of the instruction. |
6198 | NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size, |
6199 | Alignment, /*AllowCommute=*/false); |
6200 | if (NewMI) |
6201 | return NewMI; |
6202 | |
6203 | // Folding failed again - undo the commute before returning. |
6204 | MachineInstr *UncommutedMI = |
6205 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
6206 | if (!UncommutedMI) { |
6207 | // Unable to commute. |
6208 | return nullptr; |
6209 | } |
6210 | if (UncommutedMI != &MI) { |
6211 | // New instruction. It doesn't need to be kept. |
6212 | UncommutedMI->eraseFromParent(); |
6213 | return nullptr; |
6214 | } |
6215 | |
6216 | // Return here to prevent duplicate fuse failure report. |
6217 | return nullptr; |
6218 | } |
6219 | } |
6220 | |
6221 | // No fusion |
6222 | if (PrintFailedFusing && !MI.isCopy()) |
6223 | dbgs() << "We failed to fuse operand " << OpNum << " in " << MI; |
6224 | return nullptr; |
6225 | } |
6226 | |
6227 | MachineInstr * |
6228 | X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
6229 | ArrayRef<unsigned> Ops, |
6230 | MachineBasicBlock::iterator InsertPt, |
6231 | int FrameIndex, LiveIntervals *LIS, |
6232 | VirtRegMap *VRM) const { |
6233 | // Check switch flag |
6234 | if (NoFusing) |
6235 | return nullptr; |
6236 | |
6237 | // Avoid partial and undef register update stalls unless optimizing for size. |
6238 | if (!MF.getFunction().hasOptSize() && |
6239 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
6240 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
6241 | return nullptr; |
6242 | |
6243 | // Don't fold subreg spills, or reloads that use a high subreg. |
6244 | for (auto Op : Ops) { |
6245 | MachineOperand &MO = MI.getOperand(Op); |
6246 | auto SubReg = MO.getSubReg(); |
6247 | if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi)) |
6248 | return nullptr; |
6249 | } |
6250 | |
6251 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
6252 | unsigned Size = MFI.getObjectSize(FrameIndex); |
6253 | Align Alignment = MFI.getObjectAlign(FrameIndex); |
6254 | // If the function stack isn't realigned we don't want to fold instructions |
6255 | // that need increased alignment. |
6256 | if (!RI.hasStackRealignment(MF)) |
6257 | Alignment = |
6258 | std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign()); |
6259 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
6260 | unsigned NewOpc = 0; |
6261 | unsigned RCSize = 0; |
6262 | switch (MI.getOpcode()) { |
6263 | default: return nullptr; |
6264 | case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; |
6265 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; |
6266 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; |
6267 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; |
6268 | } |
6269 | // Check if it's safe to fold the load. If the size of the object is |
6270 | // narrower than the load width, then it's not. |
6271 | if (Size < RCSize) |
6272 | return nullptr; |
6273 | // Change to CMPXXri r, 0 first. |
6274 | MI.setDesc(get(NewOpc)); |
6275 | MI.getOperand(1).ChangeToImmediate(0); |
6276 | } else if (Ops.size() != 1) |
6277 | return nullptr; |
6278 | |
6279 | return foldMemoryOperandImpl(MF, MI, Ops[0], |
6280 | MachineOperand::CreateFI(FrameIndex), InsertPt, |
6281 | Size, Alignment, /*AllowCommute=*/true); |
6282 | } |
6283 | |
6284 | /// Check if \p LoadMI is a partial register load that we can't fold into \p MI |
6285 | /// because the latter uses contents that wouldn't be defined in the folded |
6286 | /// version. For instance, this transformation isn't legal: |
6287 | /// movss (%rdi), %xmm0 |
6288 | /// addps %xmm0, %xmm0 |
6289 | /// -> |
6290 | /// addps (%rdi), %xmm0 |
6291 | /// |
6292 | /// But this one is: |
6293 | /// movss (%rdi), %xmm0 |
6294 | /// addss %xmm0, %xmm0 |
6295 | /// -> |
6296 | /// addss (%rdi), %xmm0 |
6297 | /// |
6298 | static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, |
6299 | const MachineInstr &UserMI, |
6300 | const MachineFunction &MF) { |
6301 | unsigned Opc = LoadMI.getOpcode(); |
6302 | unsigned UserOpc = UserMI.getOpcode(); |
6303 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6304 | const TargetRegisterClass *RC = |
6305 | MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); |
6306 | unsigned RegSize = TRI.getRegSizeInBits(*RC); |
6307 | |
6308 | if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm || |
6309 | Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt || |
6310 | Opc == X86::VMOVSSZrm_alt) && |
6311 | RegSize > 32) { |
6312 | // These instructions only load 32 bits, we can't fold them if the |
6313 | // destination register is wider than 32 bits (4 bytes), and its user |
6314 | // instruction isn't scalar (SS). |
6315 | switch (UserOpc) { |
6316 | case X86::CVTSS2SDrr_Int: |
6317 | case X86::VCVTSS2SDrr_Int: |
6318 | case X86::VCVTSS2SDZrr_Int: |
6319 | case X86::VCVTSS2SDZrr_Intk: |
6320 | case X86::VCVTSS2SDZrr_Intkz: |
6321 | case X86::CVTSS2SIrr_Int: case X86::CVTSS2SI64rr_Int: |
6322 | case X86::VCVTSS2SIrr_Int: case X86::VCVTSS2SI64rr_Int: |
6323 | case X86::VCVTSS2SIZrr_Int: case X86::VCVTSS2SI64Zrr_Int: |
6324 | case X86::CVTTSS2SIrr_Int: case X86::CVTTSS2SI64rr_Int: |
6325 | case X86::VCVTTSS2SIrr_Int: case X86::VCVTTSS2SI64rr_Int: |
6326 | case X86::VCVTTSS2SIZrr_Int: case X86::VCVTTSS2SI64Zrr_Int: |
6327 | case X86::VCVTSS2USIZrr_Int: case X86::VCVTSS2USI64Zrr_Int: |
6328 | case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int: |
6329 | case X86::RCPSSr_Int: case X86::VRCPSSr_Int: |
6330 | case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int: |
6331 | case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int: |
6332 | case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int: |
6333 | case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int: |
6334 | case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int: |
6335 | case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int: |
6336 | case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int: |
6337 | case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int: |
6338 | case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int: |
6339 | case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int: |
6340 | case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int: |
6341 | case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int: |
6342 | case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz: |
6343 | case X86::VCMPSSZrr_Intk: |
6344 | case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz: |
6345 | case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz: |
6346 | case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz: |
6347 | case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz: |
6348 | case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz: |
6349 | case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz: |
6350 | case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int: |
6351 | case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int: |
6352 | case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int: |
6353 | case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int: |
6354 | case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int: |
6355 | case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int: |
6356 | case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int: |
6357 | case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int: |
6358 | case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int: |
6359 | case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int: |
6360 | case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int: |
6361 | case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int: |
6362 | case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int: |
6363 | case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int: |
6364 | case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk: |
6365 | case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk: |
6366 | case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk: |
6367 | case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk: |
6368 | case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk: |
6369 | case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk: |
6370 | case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz: |
6371 | case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz: |
6372 | case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz: |
6373 | case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz: |
6374 | case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz: |
6375 | case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz: |
6376 | case X86::VFIXUPIMMSSZrri: |
6377 | case X86::VFIXUPIMMSSZrrik: |
6378 | case X86::VFIXUPIMMSSZrrikz: |
6379 | case X86::VFPCLASSSSZrr: |
6380 | case X86::VFPCLASSSSZrrk: |
6381 | case X86::VGETEXPSSZr: |
6382 | case X86::VGETEXPSSZrk: |
6383 | case X86::VGETEXPSSZrkz: |
6384 | case X86::VGETMANTSSZrri: |
6385 | case X86::VGETMANTSSZrrik: |
6386 | case X86::VGETMANTSSZrrikz: |
6387 | case X86::VRANGESSZrri: |
6388 | case X86::VRANGESSZrrik: |
6389 | case X86::VRANGESSZrrikz: |
6390 | case X86::VRCP14SSZrr: |
6391 | case X86::VRCP14SSZrrk: |
6392 | case X86::VRCP14SSZrrkz: |
6393 | case X86::VRCP28SSZr: |
6394 | case X86::VRCP28SSZrk: |
6395 | case X86::VRCP28SSZrkz: |
6396 | case X86::VREDUCESSZrri: |
6397 | case X86::VREDUCESSZrrik: |
6398 | case X86::VREDUCESSZrrikz: |
6399 | case X86::VRNDSCALESSZr_Int: |
6400 | case X86::VRNDSCALESSZr_Intk: |
6401 | case X86::VRNDSCALESSZr_Intkz: |
6402 | case X86::VRSQRT14SSZrr: |
6403 | case X86::VRSQRT14SSZrrk: |
6404 | case X86::VRSQRT14SSZrrkz: |
6405 | case X86::VRSQRT28SSZr: |
6406 | case X86::VRSQRT28SSZrk: |
6407 | case X86::VRSQRT28SSZrkz: |
6408 | case X86::VSCALEFSSZrr: |
6409 | case X86::VSCALEFSSZrrk: |
6410 | case X86::VSCALEFSSZrrkz: |
6411 | return false; |
6412 | default: |
6413 | return true; |
6414 | } |
6415 | } |
6416 | |
6417 | if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm || |
6418 | Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt || |
6419 | Opc == X86::VMOVSDZrm_alt) && |
6420 | RegSize > 64) { |
6421 | // These instructions only load 64 bits, we can't fold them if the |
6422 | // destination register is wider than 64 bits (8 bytes), and its user |
6423 | // instruction isn't scalar (SD). |
6424 | switch (UserOpc) { |
6425 | case X86::CVTSD2SSrr_Int: |
6426 | case X86::VCVTSD2SSrr_Int: |
6427 | case X86::VCVTSD2SSZrr_Int: |
6428 | case X86::VCVTSD2SSZrr_Intk: |
6429 | case X86::VCVTSD2SSZrr_Intkz: |
6430 | case X86::CVTSD2SIrr_Int: case X86::CVTSD2SI64rr_Int: |
6431 | case X86::VCVTSD2SIrr_Int: case X86::VCVTSD2SI64rr_Int: |
6432 | case X86::VCVTSD2SIZrr_Int: case X86::VCVTSD2SI64Zrr_Int: |
6433 | case X86::CVTTSD2SIrr_Int: case X86::CVTTSD2SI64rr_Int: |
6434 | case X86::VCVTTSD2SIrr_Int: case X86::VCVTTSD2SI64rr_Int: |
6435 | case X86::VCVTTSD2SIZrr_Int: case X86::VCVTTSD2SI64Zrr_Int: |
6436 | case X86::VCVTSD2USIZrr_Int: case X86::VCVTSD2USI64Zrr_Int: |
6437 | case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int: |
6438 | case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int: |
6439 | case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int: |
6440 | case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int: |
6441 | case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int: |
6442 | case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int: |
6443 | case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int: |
6444 | case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int: |
6445 | case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int: |
6446 | case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int: |
6447 | case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int: |
6448 | case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int: |
6449 | case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz: |
6450 | case X86::VCMPSDZrr_Intk: |
6451 | case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz: |
6452 | case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz: |
6453 | case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz: |
6454 | case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz: |
6455 | case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz: |
6456 | case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz: |
6457 | case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int: |
6458 | case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int: |
6459 | case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int: |
6460 | case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int: |
6461 | case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int: |
6462 | case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int: |
6463 | case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int: |
6464 | case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int: |
6465 | case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int: |
6466 | case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int: |
6467 | case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int: |
6468 | case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int: |
6469 | case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int: |
6470 | case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int: |
6471 | case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk: |
6472 | case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk: |
6473 | case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk: |
6474 | case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk: |
6475 | case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk: |
6476 | case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk: |
6477 | case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz: |
6478 | case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz: |
6479 | case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz: |
6480 | case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz: |
6481 | case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz: |
6482 | case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz: |
6483 | case X86::VFIXUPIMMSDZrri: |
6484 | case X86::VFIXUPIMMSDZrrik: |
6485 | case X86::VFIXUPIMMSDZrrikz: |
6486 | case X86::VFPCLASSSDZrr: |
6487 | case X86::VFPCLASSSDZrrk: |
6488 | case X86::VGETEXPSDZr: |
6489 | case X86::VGETEXPSDZrk: |
6490 | case X86::VGETEXPSDZrkz: |
6491 | case X86::VGETMANTSDZrri: |
6492 | case X86::VGETMANTSDZrrik: |
6493 | case X86::VGETMANTSDZrrikz: |
6494 | case X86::VRANGESDZrri: |
6495 | case X86::VRANGESDZrrik: |
6496 | case X86::VRANGESDZrrikz: |
6497 | case X86::VRCP14SDZrr: |
6498 | case X86::VRCP14SDZrrk: |
6499 | case X86::VRCP14SDZrrkz: |
6500 | case X86::VRCP28SDZr: |
6501 | case X86::VRCP28SDZrk: |
6502 | case X86::VRCP28SDZrkz: |
6503 | case X86::VREDUCESDZrri: |
6504 | case X86::VREDUCESDZrrik: |
6505 | case X86::VREDUCESDZrrikz: |
6506 | case X86::VRNDSCALESDZr_Int: |
6507 | case X86::VRNDSCALESDZr_Intk: |
6508 | case X86::VRNDSCALESDZr_Intkz: |
6509 | case X86::VRSQRT14SDZrr: |
6510 | case X86::VRSQRT14SDZrrk: |
6511 | case X86::VRSQRT14SDZrrkz: |
6512 | case X86::VRSQRT28SDZr: |
6513 | case X86::VRSQRT28SDZrk: |
6514 | case X86::VRSQRT28SDZrkz: |
6515 | case X86::VSCALEFSDZrr: |
6516 | case X86::VSCALEFSDZrrk: |
6517 | case X86::VSCALEFSDZrrkz: |
6518 | return false; |
6519 | default: |
6520 | return true; |
6521 | } |
6522 | } |
6523 | |
6524 | if ((Opc == X86::VMOVSHZrm || Opc == X86::VMOVSHZrm_alt) && RegSize > 16) { |
6525 | // These instructions only load 16 bits, we can't fold them if the |
6526 | // destination register is wider than 16 bits (2 bytes), and its user |
6527 | // instruction isn't scalar (SH). |
6528 | switch (UserOpc) { |
6529 | case X86::VADDSHZrr_Int: |
6530 | case X86::VCMPSHZrr_Int: |
6531 | case X86::VDIVSHZrr_Int: |
6532 | case X86::VMAXSHZrr_Int: |
6533 | case X86::VMINSHZrr_Int: |
6534 | case X86::VMULSHZrr_Int: |
6535 | case X86::VSUBSHZrr_Int: |
6536 | case X86::VADDSHZrr_Intk: case X86::VADDSHZrr_Intkz: |
6537 | case X86::VCMPSHZrr_Intk: |
6538 | case X86::VDIVSHZrr_Intk: case X86::VDIVSHZrr_Intkz: |
6539 | case X86::VMAXSHZrr_Intk: case X86::VMAXSHZrr_Intkz: |
6540 | case X86::VMINSHZrr_Intk: case X86::VMINSHZrr_Intkz: |
6541 | case X86::VMULSHZrr_Intk: case X86::VMULSHZrr_Intkz: |
6542 | case X86::VSUBSHZrr_Intk: case X86::VSUBSHZrr_Intkz: |
6543 | case X86::VFMADD132SHZr_Int: case X86::VFNMADD132SHZr_Int: |
6544 | case X86::VFMADD213SHZr_Int: case X86::VFNMADD213SHZr_Int: |
6545 | case X86::VFMADD231SHZr_Int: case X86::VFNMADD231SHZr_Int: |
6546 | case X86::VFMSUB132SHZr_Int: case X86::VFNMSUB132SHZr_Int: |
6547 | case X86::VFMSUB213SHZr_Int: case X86::VFNMSUB213SHZr_Int: |
6548 | case X86::VFMSUB231SHZr_Int: case X86::VFNMSUB231SHZr_Int: |
6549 | case X86::VFMADD132SHZr_Intk: case X86::VFNMADD132SHZr_Intk: |
6550 | case X86::VFMADD213SHZr_Intk: case X86::VFNMADD213SHZr_Intk: |
6551 | case X86::VFMADD231SHZr_Intk: case X86::VFNMADD231SHZr_Intk: |
6552 | case X86::VFMSUB132SHZr_Intk: case X86::VFNMSUB132SHZr_Intk: |
6553 | case X86::VFMSUB213SHZr_Intk: case X86::VFNMSUB213SHZr_Intk: |
6554 | case X86::VFMSUB231SHZr_Intk: case X86::VFNMSUB231SHZr_Intk: |
6555 | case X86::VFMADD132SHZr_Intkz: case X86::VFNMADD132SHZr_Intkz: |
6556 | case X86::VFMADD213SHZr_Intkz: case X86::VFNMADD213SHZr_Intkz: |
6557 | case X86::VFMADD231SHZr_Intkz: case X86::VFNMADD231SHZr_Intkz: |
6558 | case X86::VFMSUB132SHZr_Intkz: case X86::VFNMSUB132SHZr_Intkz: |
6559 | case X86::VFMSUB213SHZr_Intkz: case X86::VFNMSUB213SHZr_Intkz: |
6560 | case X86::VFMSUB231SHZr_Intkz: case X86::VFNMSUB231SHZr_Intkz: |
6561 | return false; |
6562 | default: |
6563 | return true; |
6564 | } |
6565 | } |
6566 | |
6567 | return false; |
6568 | } |
6569 | |
6570 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
6571 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
6572 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
6573 | LiveIntervals *LIS) const { |
6574 | |
6575 | // TODO: Support the case where LoadMI loads a wide register, but MI |
6576 | // only uses a subreg. |
6577 | for (auto Op : Ops) { |
6578 | if (MI.getOperand(Op).getSubReg()) |
6579 | return nullptr; |
6580 | } |
6581 | |
6582 | // If loading from a FrameIndex, fold directly from the FrameIndex. |
6583 | unsigned NumOps = LoadMI.getDesc().getNumOperands(); |
6584 | int FrameIndex; |
6585 | if (isLoadFromStackSlot(LoadMI, FrameIndex)) { |
6586 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
6587 | return nullptr; |
6588 | return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); |
6589 | } |
6590 | |
6591 | // Check switch flag |
6592 | if (NoFusing) return nullptr; |
6593 | |
6594 | // Avoid partial and undef register update stalls unless optimizing for size. |
6595 | if (!MF.getFunction().hasOptSize() && |
6596 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || |
6597 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
6598 | return nullptr; |
6599 | |
6600 | // Determine the alignment of the load. |
6601 | Align Alignment; |
6602 | if (LoadMI.hasOneMemOperand()) |
6603 | Alignment = (*LoadMI.memoperands_begin())->getAlign(); |
6604 | else |
6605 | switch (LoadMI.getOpcode()) { |
6606 | case X86::AVX512_512_SET0: |
6607 | case X86::AVX512_512_SETALLONES: |
6608 | Alignment = Align(64); |
6609 | break; |
6610 | case X86::AVX2_SETALLONES: |
6611 | case X86::AVX1_SETALLONES: |
6612 | case X86::AVX_SET0: |
6613 | case X86::AVX512_256_SET0: |
6614 | Alignment = Align(32); |
6615 | break; |
6616 | case X86::V_SET0: |
6617 | case X86::V_SETALLONES: |
6618 | case X86::AVX512_128_SET0: |
6619 | case X86::FsFLD0F128: |
6620 | case X86::AVX512_FsFLD0F128: |
6621 | Alignment = Align(16); |
6622 | break; |
6623 | case X86::MMX_SET0: |
6624 | case X86::FsFLD0SD: |
6625 | case X86::AVX512_FsFLD0SD: |
6626 | Alignment = Align(8); |
6627 | break; |
6628 | case X86::FsFLD0SS: |
6629 | case X86::AVX512_FsFLD0SS: |
6630 | Alignment = Align(4); |
6631 | break; |
6632 | case X86::FsFLD0SH: |
6633 | case X86::AVX512_FsFLD0SH: |
6634 | Alignment = Align(2); |
6635 | break; |
6636 | default: |
6637 | return nullptr; |
6638 | } |
6639 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
6640 | unsigned NewOpc = 0; |
6641 | switch (MI.getOpcode()) { |
6642 | default: return nullptr; |
6643 | case X86::TEST8rr: NewOpc = X86::CMP8ri; break; |
6644 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; |
6645 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; |
6646 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; |
6647 | } |
6648 | // Change to CMPXXri r, 0 first. |
6649 | MI.setDesc(get(NewOpc)); |
6650 | MI.getOperand(1).ChangeToImmediate(0); |
6651 | } else if (Ops.size() != 1) |
6652 | return nullptr; |
6653 | |
6654 | // Make sure the subregisters match. |
6655 | // Otherwise we risk changing the size of the load. |
6656 | if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg()) |
6657 | return nullptr; |
6658 | |
6659 | SmallVector<MachineOperand,X86::AddrNumOperands> MOs; |
6660 | switch (LoadMI.getOpcode()) { |
6661 | case X86::MMX_SET0: |
6662 | case X86::V_SET0: |
6663 | case X86::V_SETALLONES: |
6664 | case X86::AVX2_SETALLONES: |
6665 | case X86::AVX1_SETALLONES: |
6666 | case X86::AVX_SET0: |
6667 | case X86::AVX512_128_SET0: |
6668 | case X86::AVX512_256_SET0: |
6669 | case X86::AVX512_512_SET0: |
6670 | case X86::AVX512_512_SETALLONES: |
6671 | case X86::FsFLD0SH: |
6672 | case X86::AVX512_FsFLD0SH: |
6673 | case X86::FsFLD0SD: |
6674 | case X86::AVX512_FsFLD0SD: |
6675 | case X86::FsFLD0SS: |
6676 | case X86::AVX512_FsFLD0SS: |
6677 | case X86::FsFLD0F128: |
6678 | case X86::AVX512_FsFLD0F128: { |
6679 | // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. |
6680 | // Create a constant-pool entry and operands to load from it. |
6681 | |
6682 | // Medium and large mode can't fold loads this way. |
6683 | if (MF.getTarget().getCodeModel() != CodeModel::Small && |
6684 | MF.getTarget().getCodeModel() != CodeModel::Kernel) |
6685 | return nullptr; |
6686 | |
6687 | // x86-32 PIC requires a PIC base register for constant pools. |
6688 | unsigned PICBase = 0; |
6689 | // Since we're using Small or Kernel code model, we can always use |
6690 | // RIP-relative addressing for a smaller encoding. |
6691 | if (Subtarget.is64Bit()) { |
6692 | PICBase = X86::RIP; |
6693 | } else if (MF.getTarget().isPositionIndependent()) { |
6694 | // FIXME: PICBase = getGlobalBaseReg(&MF); |
6695 | // This doesn't work for several reasons. |
6696 | // 1. GlobalBaseReg may have been spilled. |
6697 | // 2. It may not be live at MI. |
6698 | return nullptr; |
6699 | } |
6700 | |
6701 | // Create a constant-pool entry. |
6702 | MachineConstantPool &MCP = *MF.getConstantPool(); |
6703 | Type *Ty; |
6704 | unsigned Opc = LoadMI.getOpcode(); |
6705 | if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) |
6706 | Ty = Type::getFloatTy(MF.getFunction().getContext()); |
6707 | else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) |
6708 | Ty = Type::getDoubleTy(MF.getFunction().getContext()); |
6709 | else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128) |
6710 | Ty = Type::getFP128Ty(MF.getFunction().getContext()); |
6711 | else if (Opc == X86::FsFLD0SH || Opc == X86::AVX512_FsFLD0SH) |
6712 | Ty = Type::getHalfTy(MF.getFunction().getContext()); |
6713 | else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) |
6714 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6715 | 16); |
6716 | else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || |
6717 | Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) |
6718 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6719 | 8); |
6720 | else if (Opc == X86::MMX_SET0) |
6721 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6722 | 2); |
6723 | else |
6724 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6725 | 4); |
6726 | |
6727 | bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || |
6728 | Opc == X86::AVX512_512_SETALLONES || |
6729 | Opc == X86::AVX1_SETALLONES); |
6730 | const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : |
6731 | Constant::getNullValue(Ty); |
6732 | unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); |
6733 | |
6734 | // Create operands to load from the constant pool entry. |
6735 | MOs.push_back(MachineOperand::CreateReg(PICBase, false)); |
6736 | MOs.push_back(MachineOperand::CreateImm(1)); |
6737 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
6738 | MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); |
6739 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
6740 | break; |
6741 | } |
6742 | default: { |
6743 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
6744 | return nullptr; |
6745 | |
6746 | // Folding a normal load. Just copy the load's address operands. |
6747 | MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, |
6748 | LoadMI.operands_begin() + NumOps); |
6749 | break; |
6750 | } |
6751 | } |
6752 | return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, |
6753 | /*Size=*/0, Alignment, /*AllowCommute=*/true); |
6754 | } |
6755 | |
6756 | static SmallVector<MachineMemOperand *, 2> |
6757 | extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
6758 | SmallVector<MachineMemOperand *, 2> LoadMMOs; |
6759 | |
6760 | for (MachineMemOperand *MMO : MMOs) { |
6761 | if (!MMO->isLoad()) |
6762 | continue; |
6763 | |
6764 | if (!MMO->isStore()) { |
6765 | // Reuse the MMO. |
6766 | LoadMMOs.push_back(MMO); |
6767 | } else { |
6768 | // Clone the MMO and unset the store flag. |
6769 | LoadMMOs.push_back(MF.getMachineMemOperand( |
6770 | MMO, MMO->getFlags() & ~MachineMemOperand::MOStore)); |
6771 | } |
6772 | } |
6773 | |
6774 | return LoadMMOs; |
6775 | } |
6776 | |
6777 | static SmallVector<MachineMemOperand *, 2> |
6778 | extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
6779 | SmallVector<MachineMemOperand *, 2> StoreMMOs; |
6780 | |
6781 | for (MachineMemOperand *MMO : MMOs) { |
6782 | if (!MMO->isStore()) |
6783 | continue; |
6784 | |
6785 | if (!MMO->isLoad()) { |
6786 | // Reuse the MMO. |
6787 | StoreMMOs.push_back(MMO); |
6788 | } else { |
6789 | // Clone the MMO and unset the load flag. |
6790 | StoreMMOs.push_back(MF.getMachineMemOperand( |
6791 | MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad)); |
6792 | } |
6793 | } |
6794 | |
6795 | return StoreMMOs; |
6796 | } |
6797 | |
6798 | static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I, |
6799 | const TargetRegisterClass *RC, |
6800 | const X86Subtarget &STI) { |
6801 | assert(STI.hasAVX512() && "Expected at least AVX512!")(static_cast <bool> (STI.hasAVX512() && "Expected at least AVX512!" ) ? void (0) : __assert_fail ("STI.hasAVX512() && \"Expected at least AVX512!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 6801, __extension__ __PRETTY_FUNCTION__)); |
6802 | unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC); |
6803 | assert((SpillSize == 64 || STI.hasVLX()) &&(static_cast <bool> ((SpillSize == 64 || STI.hasVLX()) && "Can't broadcast less than 64 bytes without AVX512VL!") ? void (0) : __assert_fail ("(SpillSize == 64 || STI.hasVLX()) && \"Can't broadcast less than 64 bytes without AVX512VL!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 6804, __extension__ __PRETTY_FUNCTION__)) |
6804 | "Can't broadcast less than 64 bytes without AVX512VL!")(static_cast <bool> ((SpillSize == 64 || STI.hasVLX()) && "Can't broadcast less than 64 bytes without AVX512VL!") ? void (0) : __assert_fail ("(SpillSize == 64 || STI.hasVLX()) && \"Can't broadcast less than 64 bytes without AVX512VL!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 6804, __extension__ __PRETTY_FUNCTION__)); |
6805 | |
6806 | switch (I->Flags & TB_BCAST_MASK) { |
6807 | default: llvm_unreachable("Unexpected broadcast type!")::llvm::llvm_unreachable_internal("Unexpected broadcast type!" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 6807); |
6808 | case TB_BCAST_D: |
6809 | switch (SpillSize) { |
6810 | default: llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 6810); |
6811 | case 16: return X86::VPBROADCASTDZ128rm; |
6812 | case 32: return X86::VPBROADCASTDZ256rm; |
6813 | case 64: return X86::VPBROADCASTDZrm; |
6814 | } |
6815 | break; |
6816 | case TB_BCAST_Q: |
6817 | switch (SpillSize) { |
6818 | default: llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 6818); |
6819 | case 16: return X86::VPBROADCASTQZ128rm; |
6820 | case 32: return X86::VPBROADCASTQZ256rm; |
6821 | case 64: return X86::VPBROADCASTQZrm; |
6822 | } |
6823 | break; |
6824 | case TB_BCAST_SS: |
6825 | switch (SpillSize) { |
6826 | default: llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 6826); |
6827 | case 16: return X86::VBROADCASTSSZ128rm; |
6828 | case 32: return X86::VBROADCASTSSZ256rm; |
6829 | case 64: return X86::VBROADCASTSSZrm; |
6830 | } |
6831 | break; |
6832 | case TB_BCAST_SD: |
6833 | switch (SpillSize) { |
6834 | default: llvm_unreachable("Unknown spill size")::llvm::llvm_unreachable_internal("Unknown spill size", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 6834); |
6835 | case 16: return X86::VMOVDDUPZ128rm; |
6836 | case 32: return X86::VBROADCASTSDZ256rm; |
6837 | case 64: return X86::VBROADCASTSDZrm; |
6838 | } |
6839 | break; |
6840 | } |
6841 | } |
6842 | |
6843 | bool X86InstrInfo::unfoldMemoryOperand( |
6844 | MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, |
6845 | bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { |
6846 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode()); |
6847 | if (I == nullptr) |
6848 | return false; |
6849 | unsigned Opc = I->DstOp; |
6850 | unsigned Index = I->Flags & TB_INDEX_MASK; |
6851 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
6852 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
6853 | bool FoldedBCast = I->Flags & TB_FOLDED_BCAST; |
6854 | if (UnfoldLoad && !FoldedLoad) |
6855 | return false; |
6856 | UnfoldLoad &= FoldedLoad; |
6857 | if (UnfoldStore && !FoldedStore) |
6858 | return false; |
6859 | UnfoldStore &= FoldedStore; |
6860 | |
6861 | const MCInstrDesc &MCID = get(Opc); |
6862 | |
6863 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
6864 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6865 | // TODO: Check if 32-byte or greater accesses are slow too? |
6866 | if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass && |
6867 | Subtarget.isUnalignedMem16Slow()) |
6868 | // Without memoperands, loadRegFromAddr and storeRegToStackSlot will |
6869 | // conservatively assume the address is unaligned. That's bad for |
6870 | // performance. |
6871 | return false; |
6872 | SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; |
6873 | SmallVector<MachineOperand,2> BeforeOps; |
6874 | SmallVector<MachineOperand,2> AfterOps; |
6875 | SmallVector<MachineOperand,4> ImpOps; |
6876 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
6877 | MachineOperand &Op = MI.getOperand(i); |
6878 | if (i >= Index && i < Index + X86::AddrNumOperands) |
6879 | AddrOps.push_back(Op); |
6880 | else if (Op.isReg() && Op.isImplicit()) |
6881 | ImpOps.push_back(Op); |
6882 | else if (i < Index) |
6883 | BeforeOps.push_back(Op); |
6884 | else if (i > Index) |
6885 | AfterOps.push_back(Op); |
6886 | } |
6887 | |
6888 | // Emit the load or broadcast instruction. |
6889 | if (UnfoldLoad) { |
6890 | auto MMOs = extractLoadMMOs(MI.memoperands(), MF); |
6891 | |
6892 | unsigned Opc; |
6893 | if (FoldedBCast) { |
6894 | Opc = getBroadcastOpcode(I, RC, Subtarget); |
6895 | } else { |
6896 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
6897 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6898 | Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget); |
6899 | } |
6900 | |
6901 | DebugLoc DL; |
6902 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg); |
6903 | for (unsigned i = 0, e = AddrOps.size(); i != e; ++i) |
6904 | MIB.add(AddrOps[i]); |
6905 | MIB.setMemRefs(MMOs); |
6906 | NewMIs.push_back(MIB); |
6907 | |
6908 | if (UnfoldStore) { |
6909 | // Address operands cannot be marked isKill. |
6910 | for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { |
6911 | MachineOperand &MO = NewMIs[0]->getOperand(i); |
6912 | if (MO.isReg()) |
6913 | MO.setIsKill(false); |
6914 | } |
6915 | } |
6916 | } |
6917 | |
6918 | // Emit the data processing instruction. |
6919 | MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true); |
6920 | MachineInstrBuilder MIB(MF, DataMI); |
6921 | |
6922 | if (FoldedStore) |
6923 | MIB.addReg(Reg, RegState::Define); |
6924 | for (MachineOperand &BeforeOp : BeforeOps) |
6925 | MIB.add(BeforeOp); |
6926 | if (FoldedLoad) |
6927 | MIB.addReg(Reg); |
6928 | for (MachineOperand &AfterOp : AfterOps) |
6929 | MIB.add(AfterOp); |
6930 | for (MachineOperand &ImpOp : ImpOps) { |
6931 | MIB.addReg(ImpOp.getReg(), |
6932 | getDefRegState(ImpOp.isDef()) | |
6933 | RegState::Implicit | |
6934 | getKillRegState(ImpOp.isKill()) | |
6935 | getDeadRegState(ImpOp.isDead()) | |
6936 | getUndefRegState(ImpOp.isUndef())); |
6937 | } |
6938 | // Change CMP32ri r, 0 back to TEST32rr r, r, etc. |
6939 | switch (DataMI->getOpcode()) { |
6940 | default: break; |
6941 | case X86::CMP64ri32: |
6942 | case X86::CMP64ri8: |
6943 | case X86::CMP32ri: |
6944 | case X86::CMP32ri8: |
6945 | case X86::CMP16ri: |
6946 | case X86::CMP16ri8: |
6947 | case X86::CMP8ri: { |
6948 | MachineOperand &MO0 = DataMI->getOperand(0); |
6949 | MachineOperand &MO1 = DataMI->getOperand(1); |
6950 | if (MO1.isImm() && MO1.getImm() == 0) { |
6951 | unsigned NewOpc; |
6952 | switch (DataMI->getOpcode()) { |
6953 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 6953); |
6954 | case X86::CMP64ri8: |
6955 | case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; |
6956 | case X86::CMP32ri8: |
6957 | case X86::CMP32ri: NewOpc = X86::TEST32rr; break; |
6958 | case X86::CMP16ri8: |
6959 | case X86::CMP16ri: NewOpc = X86::TEST16rr; break; |
6960 | case X86::CMP8ri: NewOpc = X86::TEST8rr; break; |
6961 | } |
6962 | DataMI->setDesc(get(NewOpc)); |
6963 | MO1.ChangeToRegister(MO0.getReg(), false); |
6964 | } |
6965 | } |
6966 | } |
6967 | NewMIs.push_back(DataMI); |
6968 | |
6969 | // Emit the store instruction. |
6970 | if (UnfoldStore) { |
6971 | const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); |
6972 | auto MMOs = extractStoreMMOs(MI.memoperands(), MF); |
6973 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16); |
6974 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6975 | unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget); |
6976 | DebugLoc DL; |
6977 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); |
6978 | for (unsigned i = 0, e = AddrOps.size(); i != e; ++i) |
6979 | MIB.add(AddrOps[i]); |
6980 | MIB.addReg(Reg, RegState::Kill); |
6981 | MIB.setMemRefs(MMOs); |
6982 | NewMIs.push_back(MIB); |
6983 | } |
6984 | |
6985 | return true; |
6986 | } |
6987 | |
6988 | bool |
6989 | X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
6990 | SmallVectorImpl<SDNode*> &NewNodes) const { |
6991 | if (!N->isMachineOpcode()) |
6992 | return false; |
6993 | |
6994 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode()); |
6995 | if (I == nullptr) |
6996 | return false; |
6997 | unsigned Opc = I->DstOp; |
6998 | unsigned Index = I->Flags & TB_INDEX_MASK; |
6999 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
7000 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
7001 | bool FoldedBCast = I->Flags & TB_FOLDED_BCAST; |
7002 | const MCInstrDesc &MCID = get(Opc); |
7003 | MachineFunction &MF = DAG.getMachineFunction(); |
7004 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
7005 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
7006 | unsigned NumDefs = MCID.NumDefs; |
7007 | std::vector<SDValue> AddrOps; |
7008 | std::vector<SDValue> BeforeOps; |
7009 | std::vector<SDValue> AfterOps; |
7010 | SDLoc dl(N); |
7011 | unsigned NumOps = N->getNumOperands(); |
7012 | for (unsigned i = 0; i != NumOps-1; ++i) { |
7013 | SDValue Op = N->getOperand(i); |
7014 | if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) |
7015 | AddrOps.push_back(Op); |
7016 | else if (i < Index-NumDefs) |
7017 | BeforeOps.push_back(Op); |
7018 | else if (i > Index-NumDefs) |
7019 | AfterOps.push_back(Op); |
7020 | } |
7021 | SDValue Chain = N->getOperand(NumOps-1); |
7022 | AddrOps.push_back(Chain); |
7023 | |
7024 | // Emit the load instruction. |
7025 | SDNode *Load = nullptr; |
7026 | if (FoldedLoad) { |
7027 | EVT VT = *TRI.legalclasstypes_begin(*RC); |
7028 | auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
7029 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
7030 | Subtarget.isUnalignedMem16Slow()) |
7031 | // Do not introduce a slow unaligned load. |
7032 | return false; |
7033 | // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte |
7034 | // memory access is slow above. |
7035 | |
7036 | unsigned Opc; |
7037 | if (FoldedBCast) { |
7038 | Opc = getBroadcastOpcode(I, RC, Subtarget); |
7039 | } else { |
7040 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
7041 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
7042 | Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget); |
7043 | } |
7044 | |
7045 | Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps); |
7046 | NewNodes.push_back(Load); |
7047 | |
7048 | // Preserve memory reference information. |
7049 | DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs); |
7050 | } |
7051 | |
7052 | // Emit the data processing instruction. |
7053 | std::vector<EVT> VTs; |
7054 | const TargetRegisterClass *DstRC = nullptr; |
7055 | if (MCID.getNumDefs() > 0) { |
7056 | DstRC = getRegClass(MCID, 0, &RI, MF); |
7057 | VTs.push_back(*TRI.legalclasstypes_begin(*DstRC)); |
7058 | } |
7059 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
7060 | EVT VT = N->getValueType(i); |
7061 | if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) |
7062 | VTs.push_back(VT); |
7063 | } |
7064 | if (Load) |
7065 | BeforeOps.push_back(SDValue(Load, 0)); |
7066 | llvm::append_range(BeforeOps, AfterOps); |
7067 | // Change CMP32ri r, 0 back to TEST32rr r, r, etc. |
7068 | switch (Opc) { |
7069 | default: break; |
7070 | case X86::CMP64ri32: |
7071 | case X86::CMP64ri8: |
7072 | case X86::CMP32ri: |
7073 | case X86::CMP32ri8: |
7074 | case X86::CMP16ri: |
7075 | case X86::CMP16ri8: |
7076 | case X86::CMP8ri: |
7077 | if (isNullConstant(BeforeOps[1])) { |
7078 | switch (Opc) { |
7079 | default: llvm_unreachable("Unreachable!")::llvm::llvm_unreachable_internal("Unreachable!", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 7079); |
7080 | case X86::CMP64ri8: |
7081 | case X86::CMP64ri32: Opc = X86::TEST64rr; break; |
7082 | case X86::CMP32ri8: |
7083 | case X86::CMP32ri: Opc = X86::TEST32rr; break; |
7084 | case X86::CMP16ri8: |
7085 | case X86::CMP16ri: Opc = X86::TEST16rr; break; |
7086 | case X86::CMP8ri: Opc = X86::TEST8rr; break; |
7087 | } |
7088 | BeforeOps[1] = BeforeOps[0]; |
7089 | } |
7090 | } |
7091 | SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); |
7092 | NewNodes.push_back(NewNode); |
7093 | |
7094 | // Emit the store instruction. |
7095 | if (FoldedStore) { |
7096 | AddrOps.pop_back(); |
7097 | AddrOps.push_back(SDValue(NewNode, 0)); |
7098 | AddrOps.push_back(Chain); |
7099 | auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
7100 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
7101 | Subtarget.isUnalignedMem16Slow()) |
7102 | // Do not introduce a slow unaligned store. |
7103 | return false; |
7104 | // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte |
7105 | // memory access is slow above. |
7106 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
7107 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
7108 | SDNode *Store = |
7109 | DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), |
7110 | dl, MVT::Other, AddrOps); |
7111 | NewNodes.push_back(Store); |
7112 | |
7113 | // Preserve memory reference information. |
7114 | DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs); |
7115 | } |
7116 | |
7117 | return true; |
7118 | } |
7119 | |
7120 | unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, |
7121 | bool UnfoldLoad, bool UnfoldStore, |
7122 | unsigned *LoadRegIndex) const { |
7123 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc); |
7124 | if (I == nullptr) |
7125 | return 0; |
7126 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
7127 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
7128 | if (UnfoldLoad && !FoldedLoad) |
7129 | return 0; |
7130 | if (UnfoldStore && !FoldedStore) |
7131 | return 0; |
7132 | if (LoadRegIndex) |
7133 | *LoadRegIndex = I->Flags & TB_INDEX_MASK; |
7134 | return I->DstOp; |
7135 | } |
7136 | |
7137 | bool |
7138 | X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, |
7139 | int64_t &Offset1, int64_t &Offset2) const { |
7140 | if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) |
7141 | return false; |
7142 | unsigned Opc1 = Load1->getMachineOpcode(); |
7143 | unsigned Opc2 = Load2->getMachineOpcode(); |
7144 | switch (Opc1) { |
7145 | default: return false; |
7146 | case X86::MOV8rm: |
7147 | case X86::MOV16rm: |
7148 | case X86::MOV32rm: |
7149 | case X86::MOV64rm: |
7150 | case X86::LD_Fp32m: |
7151 | case X86::LD_Fp64m: |
7152 | case X86::LD_Fp80m: |
7153 | case X86::MOVSSrm: |
7154 | case X86::MOVSSrm_alt: |
7155 | case X86::MOVSDrm: |
7156 | case X86::MOVSDrm_alt: |
7157 | case X86::MMX_MOVD64rm: |
7158 | case X86::MMX_MOVQ64rm: |
7159 | case X86::MOVAPSrm: |
7160 | case X86::MOVUPSrm: |
7161 | case X86::MOVAPDrm: |
7162 | case X86::MOVUPDrm: |
7163 | case X86::MOVDQArm: |
7164 | case X86::MOVDQUrm: |
7165 | // AVX load instructions |
7166 | case X86::VMOVSSrm: |
7167 | case X86::VMOVSSrm_alt: |
7168 | case X86::VMOVSDrm: |
7169 | case X86::VMOVSDrm_alt: |
7170 | case X86::VMOVAPSrm: |
7171 | case X86::VMOVUPSrm: |
7172 | case X86::VMOVAPDrm: |
7173 | case X86::VMOVUPDrm: |
7174 | case X86::VMOVDQArm: |
7175 | case X86::VMOVDQUrm: |
7176 | case X86::VMOVAPSYrm: |
7177 | case X86::VMOVUPSYrm: |
7178 | case X86::VMOVAPDYrm: |
7179 | case X86::VMOVUPDYrm: |
7180 | case X86::VMOVDQAYrm: |
7181 | case X86::VMOVDQUYrm: |
7182 | // AVX512 load instructions |
7183 | case X86::VMOVSSZrm: |
7184 | case X86::VMOVSSZrm_alt: |
7185 | case X86::VMOVSDZrm: |
7186 | case X86::VMOVSDZrm_alt: |
7187 | case X86::VMOVAPSZ128rm: |
7188 | case X86::VMOVUPSZ128rm: |
7189 | case X86::VMOVAPSZ128rm_NOVLX: |
7190 | case X86::VMOVUPSZ128rm_NOVLX: |
7191 | case X86::VMOVAPDZ128rm: |
7192 | case X86::VMOVUPDZ128rm: |
7193 | case X86::VMOVDQU8Z128rm: |
7194 | case X86::VMOVDQU16Z128rm: |
7195 | case X86::VMOVDQA32Z128rm: |
7196 | case X86::VMOVDQU32Z128rm: |
7197 | case X86::VMOVDQA64Z128rm: |
7198 | case X86::VMOVDQU64Z128rm: |
7199 | case X86::VMOVAPSZ256rm: |
7200 | case X86::VMOVUPSZ256rm: |
7201 | case X86::VMOVAPSZ256rm_NOVLX: |
7202 | case X86::VMOVUPSZ256rm_NOVLX: |
7203 | case X86::VMOVAPDZ256rm: |
7204 | case X86::VMOVUPDZ256rm: |
7205 | case X86::VMOVDQU8Z256rm: |
7206 | case X86::VMOVDQU16Z256rm: |
7207 | case X86::VMOVDQA32Z256rm: |
7208 | case X86::VMOVDQU32Z256rm: |
7209 | case X86::VMOVDQA64Z256rm: |
7210 | case X86::VMOVDQU64Z256rm: |
7211 | case X86::VMOVAPSZrm: |
7212 | case X86::VMOVUPSZrm: |
7213 | case X86::VMOVAPDZrm: |
7214 | case X86::VMOVUPDZrm: |
7215 | case X86::VMOVDQU8Zrm: |
7216 | case X86::VMOVDQU16Zrm: |
7217 | case X86::VMOVDQA32Zrm: |
7218 | case X86::VMOVDQU32Zrm: |
7219 | case X86::VMOVDQA64Zrm: |
7220 | case X86::VMOVDQU64Zrm: |
7221 | case X86::KMOVBkm: |
7222 | case X86::KMOVWkm: |
7223 | case X86::KMOVDkm: |
7224 | case X86::KMOVQkm: |
7225 | break; |
7226 | } |
7227 | switch (Opc2) { |
7228 | default: return false; |
7229 | case X86::MOV8rm: |
7230 | case X86::MOV16rm: |
7231 | case X86::MOV32rm: |
7232 | case X86::MOV64rm: |
7233 | case X86::LD_Fp32m: |
7234 | case X86::LD_Fp64m: |
7235 | case X86::LD_Fp80m: |
7236 | case X86::MOVSSrm: |
7237 | case X86::MOVSSrm_alt: |
7238 | case X86::MOVSDrm: |
7239 | case X86::MOVSDrm_alt: |
7240 | case X86::MMX_MOVD64rm: |
7241 | case X86::MMX_MOVQ64rm: |
7242 | case X86::MOVAPSrm: |
7243 | case X86::MOVUPSrm: |
7244 | case X86::MOVAPDrm: |
7245 | case X86::MOVUPDrm: |
7246 | case X86::MOVDQArm: |
7247 | case X86::MOVDQUrm: |
7248 | // AVX load instructions |
7249 | case X86::VMOVSSrm: |
7250 | case X86::VMOVSSrm_alt: |
7251 | case X86::VMOVSDrm: |
7252 | case X86::VMOVSDrm_alt: |
7253 | case X86::VMOVAPSrm: |
7254 | case X86::VMOVUPSrm: |
7255 | case X86::VMOVAPDrm: |
7256 | case X86::VMOVUPDrm: |
7257 | case X86::VMOVDQArm: |
7258 | case X86::VMOVDQUrm: |
7259 | case X86::VMOVAPSYrm: |
7260 | case X86::VMOVUPSYrm: |
7261 | case X86::VMOVAPDYrm: |
7262 | case X86::VMOVUPDYrm: |
7263 | case X86::VMOVDQAYrm: |
7264 | case X86::VMOVDQUYrm: |
7265 | // AVX512 load instructions |
7266 | case X86::VMOVSSZrm: |
7267 | case X86::VMOVSSZrm_alt: |
7268 | case X86::VMOVSDZrm: |
7269 | case X86::VMOVSDZrm_alt: |
7270 | case X86::VMOVAPSZ128rm: |
7271 | case X86::VMOVUPSZ128rm: |
7272 | case X86::VMOVAPSZ128rm_NOVLX: |
7273 | case X86::VMOVUPSZ128rm_NOVLX: |
7274 | case X86::VMOVAPDZ128rm: |
7275 | case X86::VMOVUPDZ128rm: |
7276 | case X86::VMOVDQU8Z128rm: |
7277 | case X86::VMOVDQU16Z128rm: |
7278 | case X86::VMOVDQA32Z128rm: |
7279 | case X86::VMOVDQU32Z128rm: |
7280 | case X86::VMOVDQA64Z128rm: |
7281 | case X86::VMOVDQU64Z128rm: |
7282 | case X86::VMOVAPSZ256rm: |
7283 | case X86::VMOVUPSZ256rm: |
7284 | case X86::VMOVAPSZ256rm_NOVLX: |
7285 | case X86::VMOVUPSZ256rm_NOVLX: |
7286 | case X86::VMOVAPDZ256rm: |
7287 | case X86::VMOVUPDZ256rm: |
7288 | case X86::VMOVDQU8Z256rm: |
7289 | case X86::VMOVDQU16Z256rm: |
7290 | case X86::VMOVDQA32Z256rm: |
7291 | case X86::VMOVDQU32Z256rm: |
7292 | case X86::VMOVDQA64Z256rm: |
7293 | case X86::VMOVDQU64Z256rm: |
7294 | case X86::VMOVAPSZrm: |
7295 | case X86::VMOVUPSZrm: |
7296 | case X86::VMOVAPDZrm: |
7297 | case X86::VMOVUPDZrm: |
7298 | case X86::VMOVDQU8Zrm: |
7299 | case X86::VMOVDQU16Zrm: |
7300 | case X86::VMOVDQA32Zrm: |
7301 | case X86::VMOVDQU32Zrm: |
7302 | case X86::VMOVDQA64Zrm: |
7303 | case X86::VMOVDQU64Zrm: |
7304 | case X86::KMOVBkm: |
7305 | case X86::KMOVWkm: |
7306 | case X86::KMOVDkm: |
7307 | case X86::KMOVQkm: |
7308 | break; |
7309 | } |
7310 | |
7311 | // Lambda to check if both the loads have the same value for an operand index. |
7312 | auto HasSameOp = [&](int I) { |
7313 | return Load1->getOperand(I) == Load2->getOperand(I); |
7314 | }; |
7315 | |
7316 | // All operands except the displacement should match. |
7317 | if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) || |
7318 | !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg)) |
7319 | return false; |
7320 | |
7321 | // Chain Operand must be the same. |
7322 | if (!HasSameOp(5)) |
7323 | return false; |
7324 | |
7325 | // Now let's examine if the displacements are constants. |
7326 | auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp)); |
7327 | auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp)); |
7328 | if (!Disp1 || !Disp2) |
7329 | return false; |
7330 | |
7331 | Offset1 = Disp1->getSExtValue(); |
7332 | Offset2 = Disp2->getSExtValue(); |
7333 | return true; |
7334 | } |
7335 | |
7336 | bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, |
7337 | int64_t Offset1, int64_t Offset2, |
7338 | unsigned NumLoads) const { |
7339 | assert(Offset2 > Offset1)(static_cast <bool> (Offset2 > Offset1) ? void (0) : __assert_fail ("Offset2 > Offset1", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 7339, __extension__ __PRETTY_FUNCTION__)); |
7340 | if ((Offset2 - Offset1) / 8 > 64) |
7341 | return false; |
7342 | |
7343 | unsigned Opc1 = Load1->getMachineOpcode(); |
7344 | unsigned Opc2 = Load2->getMachineOpcode(); |
7345 | if (Opc1 != Opc2) |
7346 | return false; // FIXME: overly conservative? |
7347 | |
7348 | switch (Opc1) { |
7349 | default: break; |
7350 | case X86::LD_Fp32m: |
7351 | case X86::LD_Fp64m: |
7352 | case X86::LD_Fp80m: |
7353 | case X86::MMX_MOVD64rm: |
7354 | case X86::MMX_MOVQ64rm: |
7355 | return false; |
7356 | } |
7357 | |
7358 | EVT VT = Load1->getValueType(0); |
7359 | switch (VT.getSimpleVT().SimpleTy) { |
7360 | default: |
7361 | // XMM registers. In 64-bit mode we can be a bit more aggressive since we |
7362 | // have 16 of them to play with. |
7363 | if (Subtarget.is64Bit()) { |
7364 | if (NumLoads >= 3) |
7365 | return false; |
7366 | } else if (NumLoads) { |
7367 | return false; |
7368 | } |
7369 | break; |
7370 | case MVT::i8: |
7371 | case MVT::i16: |
7372 | case MVT::i32: |
7373 | case MVT::i64: |
7374 | case MVT::f32: |
7375 | case MVT::f64: |
7376 | if (NumLoads) |
7377 | return false; |
7378 | break; |
7379 | } |
7380 | |
7381 | return true; |
7382 | } |
7383 | |
7384 | bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
7385 | const MachineBasicBlock *MBB, |
7386 | const MachineFunction &MF) const { |
7387 | |
7388 | // ENDBR instructions should not be scheduled around. |
7389 | unsigned Opcode = MI.getOpcode(); |
7390 | if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 || |
7391 | Opcode == X86::PLDTILECFGV) |
7392 | return true; |
7393 | |
7394 | return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF); |
7395 | } |
7396 | |
7397 | bool X86InstrInfo:: |
7398 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
7399 | assert(Cond.size() == 1 && "Invalid X86 branch condition!")(static_cast <bool> (Cond.size() == 1 && "Invalid X86 branch condition!" ) ? void (0) : __assert_fail ("Cond.size() == 1 && \"Invalid X86 branch condition!\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7399, __extension__ __PRETTY_FUNCTION__)); |
7400 | X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); |
7401 | Cond[0].setImm(GetOppositeBranchCondition(CC)); |
7402 | return false; |
7403 | } |
7404 | |
7405 | bool X86InstrInfo:: |
7406 | isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { |
7407 | // FIXME: Return false for x87 stack register classes for now. We can't |
7408 | // allow any loads of these registers before FpGet_ST0_80. |
7409 | return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass || |
7410 | RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass || |
7411 | RC == &X86::RFP80RegClass); |
7412 | } |
7413 | |
7414 | /// Return a virtual register initialized with the |
7415 | /// the global base register value. Output instructions required to |
7416 | /// initialize the register in the function entry block, if necessary. |
7417 | /// |
7418 | /// TODO: Eliminate this and move the code to X86MachineFunctionInfo. |
7419 | /// |
7420 | unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { |
7421 | assert((!Subtarget.is64Bit() ||(static_cast <bool> ((!Subtarget.is64Bit() || MF->getTarget ().getCodeModel() == CodeModel::Medium || MF->getTarget(). getCodeModel() == CodeModel::Large) && "X86-64 PIC uses RIP relative addressing" ) ? void (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7424, __extension__ __PRETTY_FUNCTION__)) |
7422 | MF->getTarget().getCodeModel() == CodeModel::Medium ||(static_cast <bool> ((!Subtarget.is64Bit() || MF->getTarget ().getCodeModel() == CodeModel::Medium || MF->getTarget(). getCodeModel() == CodeModel::Large) && "X86-64 PIC uses RIP relative addressing" ) ? void (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7424, __extension__ __PRETTY_FUNCTION__)) |
7423 | MF->getTarget().getCodeModel() == CodeModel::Large) &&(static_cast <bool> ((!Subtarget.is64Bit() || MF->getTarget ().getCodeModel() == CodeModel::Medium || MF->getTarget(). getCodeModel() == CodeModel::Large) && "X86-64 PIC uses RIP relative addressing" ) ? void (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7424, __extension__ __PRETTY_FUNCTION__)) |
7424 | "X86-64 PIC uses RIP relative addressing")(static_cast <bool> ((!Subtarget.is64Bit() || MF->getTarget ().getCodeModel() == CodeModel::Medium || MF->getTarget(). getCodeModel() == CodeModel::Large) && "X86-64 PIC uses RIP relative addressing" ) ? void (0) : __assert_fail ("(!Subtarget.is64Bit() || MF->getTarget().getCodeModel() == CodeModel::Medium || MF->getTarget().getCodeModel() == CodeModel::Large) && \"X86-64 PIC uses RIP relative addressing\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7424, __extension__ __PRETTY_FUNCTION__)); |
7425 | |
7426 | X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
7427 | Register GlobalBaseReg = X86FI->getGlobalBaseReg(); |
7428 | if (GlobalBaseReg != 0) |
7429 | return GlobalBaseReg; |
7430 | |
7431 | // Create the register. The code to initialize it is inserted |
7432 | // later, by the CGBR pass (below). |
7433 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
7434 | GlobalBaseReg = RegInfo.createVirtualRegister( |
7435 | Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); |
7436 | X86FI->setGlobalBaseReg(GlobalBaseReg); |
7437 | return GlobalBaseReg; |
7438 | } |
7439 | |
7440 | // These are the replaceable SSE instructions. Some of these have Int variants |
7441 | // that we don't include here. We don't want to replace instructions selected |
7442 | // by intrinsics. |
7443 | static const uint16_t ReplaceableInstrs[][3] = { |
7444 | //PackedSingle PackedDouble PackedInt |
7445 | { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, |
7446 | { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, |
7447 | { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, |
7448 | { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, |
7449 | { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, |
7450 | { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, |
7451 | { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr }, |
7452 | { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr }, |
7453 | { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, |
7454 | { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm }, |
7455 | { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, |
7456 | { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm }, |
7457 | { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, |
7458 | { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, |
7459 | { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, |
7460 | { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, |
7461 | { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, |
7462 | { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, |
7463 | { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, |
7464 | { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, |
7465 | { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, |
7466 | { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm }, |
7467 | { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr }, |
7468 | { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm }, |
7469 | { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr }, |
7470 | { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm }, |
7471 | { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr }, |
7472 | { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm }, |
7473 | { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr }, |
7474 | { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr }, |
7475 | { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr }, |
7476 | // AVX 128-bit support |
7477 | { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, |
7478 | { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, |
7479 | { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, |
7480 | { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, |
7481 | { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, |
7482 | { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, |
7483 | { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr }, |
7484 | { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr }, |
7485 | { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, |
7486 | { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm }, |
7487 | { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, |
7488 | { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm }, |
7489 | { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, |
7490 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, |
7491 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, |
7492 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, |
7493 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, |
7494 | { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, |
7495 | { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, |
7496 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, |
7497 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, |
7498 | { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm }, |
7499 | { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr }, |
7500 | { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm }, |
7501 | { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr }, |
7502 | { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm }, |
7503 | { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr }, |
7504 | { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm }, |
7505 | { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr }, |
7506 | { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr }, |
7507 | { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr }, |
7508 | // AVX 256-bit support |
7509 | { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, |
7510 | { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, |
7511 | { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, |
7512 | { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, |
7513 | { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, |
7514 | { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }, |
7515 | { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm }, |
7516 | { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr }, |
7517 | { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi }, |
7518 | { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri }, |
7519 | // AVX512 support |
7520 | { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr }, |
7521 | { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr }, |
7522 | { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr }, |
7523 | { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr }, |
7524 | { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr }, |
7525 | { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr }, |
7526 | { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm }, |
7527 | { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm }, |
7528 | { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm }, |
7529 | { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm }, |
7530 | { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr }, |
7531 | { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm }, |
7532 | { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr }, |
7533 | { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm }, |
7534 | { X86::VBROADCASTSSZrr, X86::VBROADCASTSSZrr, X86::VPBROADCASTDZrr }, |
7535 | { X86::VBROADCASTSSZrm, X86::VBROADCASTSSZrm, X86::VPBROADCASTDZrm }, |
7536 | { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128rr }, |
7537 | { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128rm }, |
7538 | { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr }, |
7539 | { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm }, |
7540 | { X86::VBROADCASTSDZrr, X86::VBROADCASTSDZrr, X86::VPBROADCASTQZrr }, |
7541 | { X86::VBROADCASTSDZrm, X86::VBROADCASTSDZrm, X86::VPBROADCASTQZrm }, |
7542 | { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr }, |
7543 | { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm }, |
7544 | { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr }, |
7545 | { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm }, |
7546 | { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr }, |
7547 | { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm }, |
7548 | { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr }, |
7549 | { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm }, |
7550 | { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr }, |
7551 | { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm }, |
7552 | { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr }, |
7553 | { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm }, |
7554 | { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr }, |
7555 | { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr }, |
7556 | { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr }, |
7557 | { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr }, |
7558 | { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr }, |
7559 | { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr }, |
7560 | { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr }, |
7561 | { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr }, |
7562 | { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr }, |
7563 | { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr }, |
7564 | { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr }, |
7565 | { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr }, |
7566 | { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi }, |
7567 | { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri }, |
7568 | { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi }, |
7569 | { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri }, |
7570 | { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi }, |
7571 | { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri }, |
7572 | { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi }, |
7573 | { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri }, |
7574 | { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm }, |
7575 | { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr }, |
7576 | { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi }, |
7577 | { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri }, |
7578 | { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm }, |
7579 | { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr }, |
7580 | { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm }, |
7581 | { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr }, |
7582 | { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi }, |
7583 | { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri }, |
7584 | { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm }, |
7585 | { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr }, |
7586 | { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm }, |
7587 | { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr }, |
7588 | { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm }, |
7589 | { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr }, |
7590 | { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm }, |
7591 | { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr }, |
7592 | { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm }, |
7593 | { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr }, |
7594 | { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm }, |
7595 | { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr }, |
7596 | { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm }, |
7597 | { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr }, |
7598 | { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm }, |
7599 | { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr }, |
7600 | { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm }, |
7601 | { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr }, |
7602 | { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm }, |
7603 | { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr }, |
7604 | { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm }, |
7605 | { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr }, |
7606 | { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm }, |
7607 | { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr }, |
7608 | { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm }, |
7609 | { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr }, |
7610 | { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr }, |
7611 | { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr }, |
7612 | }; |
7613 | |
7614 | static const uint16_t ReplaceableInstrsAVX2[][3] = { |
7615 | //PackedSingle PackedDouble PackedInt |
7616 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, |
7617 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, |
7618 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, |
7619 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, |
7620 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, |
7621 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, |
7622 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, |
7623 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, |
7624 | { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, |
7625 | { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, |
7626 | { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, |
7627 | { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, |
7628 | { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm}, |
7629 | { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr}, |
7630 | { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, |
7631 | { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, |
7632 | { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, |
7633 | { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}, |
7634 | { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 }, |
7635 | { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri }, |
7636 | { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi }, |
7637 | { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi }, |
7638 | { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri }, |
7639 | { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm }, |
7640 | { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr }, |
7641 | { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm }, |
7642 | { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr }, |
7643 | { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm }, |
7644 | { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr }, |
7645 | { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm }, |
7646 | { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr }, |
7647 | }; |
7648 | |
7649 | static const uint16_t ReplaceableInstrsFP[][3] = { |
7650 | //PackedSingle PackedDouble |
7651 | { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END }, |
7652 | { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END }, |
7653 | { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END }, |
7654 | { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END }, |
7655 | { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END }, |
7656 | { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END }, |
7657 | { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END }, |
7658 | { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END }, |
7659 | { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END }, |
7660 | }; |
7661 | |
7662 | static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { |
7663 | //PackedSingle PackedDouble PackedInt |
7664 | { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, |
7665 | { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, |
7666 | { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, |
7667 | { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, |
7668 | }; |
7669 | |
7670 | static const uint16_t ReplaceableInstrsAVX512[][4] = { |
7671 | // Two integer columns for 64-bit and 32-bit elements. |
7672 | //PackedSingle PackedDouble PackedInt PackedInt |
7673 | { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr }, |
7674 | { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm }, |
7675 | { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr }, |
7676 | { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr }, |
7677 | { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm }, |
7678 | { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr }, |
7679 | { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm }, |
7680 | { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr }, |
7681 | { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr }, |
7682 | { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm }, |
7683 | { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr }, |
7684 | { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm }, |
7685 | { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, |
7686 | { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, |
7687 | { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, |
7688 | }; |
7689 | |
7690 | static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { |
7691 | // Two integer columns for 64-bit and 32-bit elements. |
7692 | //PackedSingle PackedDouble PackedInt PackedInt |
7693 | { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
7694 | { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
7695 | { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
7696 | { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
7697 | { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
7698 | { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
7699 | { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
7700 | { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
7701 | { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
7702 | { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
7703 | { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
7704 | { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
7705 | { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
7706 | { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
7707 | { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
7708 | { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
7709 | { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm }, |
7710 | { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr }, |
7711 | { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm }, |
7712 | { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr }, |
7713 | { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm }, |
7714 | { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr }, |
7715 | { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm }, |
7716 | { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr }, |
7717 | }; |
7718 | |
7719 | static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = { |
7720 | // Two integer columns for 64-bit and 32-bit elements. |
7721 | //PackedSingle PackedDouble |
7722 | //PackedInt PackedInt |
7723 | { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk, |
7724 | X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk }, |
7725 | { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz, |
7726 | X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz }, |
7727 | { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk, |
7728 | X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk }, |
7729 | { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz, |
7730 | X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz }, |
7731 | { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk, |
7732 | X86::VPANDQZ128rmk, X86::VPANDDZ128rmk }, |
7733 | { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz, |
7734 | X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz }, |
7735 | { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk, |
7736 | X86::VPANDQZ128rrk, X86::VPANDDZ128rrk }, |
7737 | { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz, |
7738 | X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz }, |
7739 | { X86::VORPSZ128rmk, X86::VORPDZ128rmk, |
7740 | X86::VPORQZ128rmk, X86::VPORDZ128rmk }, |
7741 | { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz, |
7742 | X86::VPORQZ128rmkz, X86::VPORDZ128rmkz }, |
7743 | { X86::VORPSZ128rrk, X86::VORPDZ128rrk, |
7744 | X86::VPORQZ128rrk, X86::VPORDZ128rrk }, |
7745 | { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz, |
7746 | X86::VPORQZ128rrkz, X86::VPORDZ128rrkz }, |
7747 | { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk, |
7748 | X86::VPXORQZ128rmk, X86::VPXORDZ128rmk }, |
7749 | { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz, |
7750 | X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz }, |
7751 | { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk, |
7752 | X86::VPXORQZ128rrk, X86::VPXORDZ128rrk }, |
7753 | { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz, |
7754 | X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz }, |
7755 | { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk, |
7756 | X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk }, |
7757 | { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz, |
7758 | X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz }, |
7759 | { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk, |
7760 | X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk }, |
7761 | { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz, |
7762 | X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz }, |
7763 | { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk, |
7764 | X86::VPANDQZ256rmk, X86::VPANDDZ256rmk }, |
7765 | { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz, |
7766 | X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz }, |
7767 | { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk, |
7768 | X86::VPANDQZ256rrk, X86::VPANDDZ256rrk }, |
7769 | { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz, |
7770 | X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz }, |
7771 | { X86::VORPSZ256rmk, X86::VORPDZ256rmk, |
7772 | X86::VPORQZ256rmk, X86::VPORDZ256rmk }, |
7773 | { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz, |
7774 | X86::VPORQZ256rmkz, X86::VPORDZ256rmkz }, |
7775 | { X86::VORPSZ256rrk, X86::VORPDZ256rrk, |
7776 | X86::VPORQZ256rrk, X86::VPORDZ256rrk }, |
7777 | { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz, |
7778 | X86::VPORQZ256rrkz, X86::VPORDZ256rrkz }, |
7779 | { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk, |
7780 | X86::VPXORQZ256rmk, X86::VPXORDZ256rmk }, |
7781 | { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz, |
7782 | X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz }, |
7783 | { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk, |
7784 | X86::VPXORQZ256rrk, X86::VPXORDZ256rrk }, |
7785 | { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz, |
7786 | X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz }, |
7787 | { X86::VANDNPSZrmk, X86::VANDNPDZrmk, |
7788 | X86::VPANDNQZrmk, X86::VPANDNDZrmk }, |
7789 | { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz, |
7790 | X86::VPANDNQZrmkz, X86::VPANDNDZrmkz }, |
7791 | { X86::VANDNPSZrrk, X86::VANDNPDZrrk, |
7792 | X86::VPANDNQZrrk, X86::VPANDNDZrrk }, |
7793 | { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz, |
7794 | X86::VPANDNQZrrkz, X86::VPANDNDZrrkz }, |
7795 | { X86::VANDPSZrmk, X86::VANDPDZrmk, |
7796 | X86::VPANDQZrmk, X86::VPANDDZrmk }, |
7797 | { X86::VANDPSZrmkz, X86::VANDPDZrmkz, |
7798 | X86::VPANDQZrmkz, X86::VPANDDZrmkz }, |
7799 | { X86::VANDPSZrrk, X86::VANDPDZrrk, |
7800 | X86::VPANDQZrrk, X86::VPANDDZrrk }, |
7801 | { X86::VANDPSZrrkz, X86::VANDPDZrrkz, |
7802 | X86::VPANDQZrrkz, X86::VPANDDZrrkz }, |
7803 | { X86::VORPSZrmk, X86::VORPDZrmk, |
7804 | X86::VPORQZrmk, X86::VPORDZrmk }, |
7805 | { X86::VORPSZrmkz, X86::VORPDZrmkz, |
7806 | X86::VPORQZrmkz, X86::VPORDZrmkz }, |
7807 | { X86::VORPSZrrk, X86::VORPDZrrk, |
7808 | X86::VPORQZrrk, X86::VPORDZrrk }, |
7809 | { X86::VORPSZrrkz, X86::VORPDZrrkz, |
7810 | X86::VPORQZrrkz, X86::VPORDZrrkz }, |
7811 | { X86::VXORPSZrmk, X86::VXORPDZrmk, |
7812 | X86::VPXORQZrmk, X86::VPXORDZrmk }, |
7813 | { X86::VXORPSZrmkz, X86::VXORPDZrmkz, |
7814 | X86::VPXORQZrmkz, X86::VPXORDZrmkz }, |
7815 | { X86::VXORPSZrrk, X86::VXORPDZrrk, |
7816 | X86::VPXORQZrrk, X86::VPXORDZrrk }, |
7817 | { X86::VXORPSZrrkz, X86::VXORPDZrrkz, |
7818 | X86::VPXORQZrrkz, X86::VPXORDZrrkz }, |
7819 | // Broadcast loads can be handled the same as masked operations to avoid |
7820 | // changing element size. |
7821 | { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb, |
7822 | X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb }, |
7823 | { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb, |
7824 | X86::VPANDQZ128rmb, X86::VPANDDZ128rmb }, |
7825 | { X86::VORPSZ128rmb, X86::VORPDZ128rmb, |
7826 | X86::VPORQZ128rmb, X86::VPORDZ128rmb }, |
7827 | { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb, |
7828 | X86::VPXORQZ128rmb, X86::VPXORDZ128rmb }, |
7829 | { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb, |
7830 | X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb }, |
7831 | { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb, |
7832 | X86::VPANDQZ256rmb, X86::VPANDDZ256rmb }, |
7833 | { X86::VORPSZ256rmb, X86::VORPDZ256rmb, |
7834 | X86::VPORQZ256rmb, X86::VPORDZ256rmb }, |
7835 | { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb, |
7836 | X86::VPXORQZ256rmb, X86::VPXORDZ256rmb }, |
7837 | { X86::VANDNPSZrmb, X86::VANDNPDZrmb, |
7838 | X86::VPANDNQZrmb, X86::VPANDNDZrmb }, |
7839 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
7840 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
7841 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
7842 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
7843 | { X86::VORPSZrmb, X86::VORPDZrmb, |
7844 | X86::VPORQZrmb, X86::VPORDZrmb }, |
7845 | { X86::VXORPSZrmb, X86::VXORPDZrmb, |
7846 | X86::VPXORQZrmb, X86::VPXORDZrmb }, |
7847 | { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk, |
7848 | X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk }, |
7849 | { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk, |
7850 | X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk }, |
7851 | { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk, |
7852 | X86::VPORQZ128rmbk, X86::VPORDZ128rmbk }, |
7853 | { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk, |
7854 | X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk }, |
7855 | { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk, |
7856 | X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk }, |
7857 | { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk, |
7858 | X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk }, |
7859 | { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk, |
7860 | X86::VPORQZ256rmbk, X86::VPORDZ256rmbk }, |
7861 | { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk, |
7862 | X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk }, |
7863 | { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk, |
7864 | X86::VPANDNQZrmbk, X86::VPANDNDZrmbk }, |
7865 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
7866 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
7867 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
7868 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
7869 | { X86::VORPSZrmbk, X86::VORPDZrmbk, |
7870 | X86::VPORQZrmbk, X86::VPORDZrmbk }, |
7871 | { X86::VXORPSZrmbk, X86::VXORPDZrmbk, |
7872 | X86::VPXORQZrmbk, X86::VPXORDZrmbk }, |
7873 | { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz, |
7874 | X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz}, |
7875 | { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz, |
7876 | X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz }, |
7877 | { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz, |
7878 | X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz }, |
7879 | { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz, |
7880 | X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz }, |
7881 | { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz, |
7882 | X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz}, |
7883 | { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz, |
7884 | X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz }, |
7885 | { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz, |
7886 | X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz }, |
7887 | { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz, |
7888 | X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz }, |
7889 | { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz, |
7890 | X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz }, |
7891 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
7892 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
7893 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
7894 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
7895 | { X86::VORPSZrmbkz, X86::VORPDZrmbkz, |
7896 | X86::VPORQZrmbkz, X86::VPORDZrmbkz }, |
7897 | { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz, |
7898 | X86::VPXORQZrmbkz, X86::VPXORDZrmbkz }, |
7899 | }; |
7900 | |
7901 | // NOTE: These should only be used by the custom domain methods. |
7902 | static const uint16_t ReplaceableBlendInstrs[][3] = { |
7903 | //PackedSingle PackedDouble PackedInt |
7904 | { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi }, |
7905 | { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri }, |
7906 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi }, |
7907 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri }, |
7908 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi }, |
7909 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri }, |
7910 | }; |
7911 | static const uint16_t ReplaceableBlendAVX2Instrs[][3] = { |
7912 | //PackedSingle PackedDouble PackedInt |
7913 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi }, |
7914 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri }, |
7915 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi }, |
7916 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri }, |
7917 | }; |
7918 | |
7919 | // Special table for changing EVEX logic instructions to VEX. |
7920 | // TODO: Should we run EVEX->VEX earlier? |
7921 | static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = { |
7922 | // Two integer columns for 64-bit and 32-bit elements. |
7923 | //PackedSingle PackedDouble PackedInt PackedInt |
7924 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
7925 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
7926 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
7927 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
7928 | { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
7929 | { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
7930 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
7931 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
7932 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
7933 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
7934 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
7935 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
7936 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
7937 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
7938 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
7939 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
7940 | }; |
7941 | |
7942 | // FIXME: Some shuffle and unpack instructions have equivalents in different |
7943 | // domains, but they require a bit more work than just switching opcodes. |
7944 | |
7945 | static const uint16_t *lookup(unsigned opcode, unsigned domain, |
7946 | ArrayRef<uint16_t[3]> Table) { |
7947 | for (const uint16_t (&Row)[3] : Table) |
7948 | if (Row[domain-1] == opcode) |
7949 | return Row; |
7950 | return nullptr; |
7951 | } |
7952 | |
7953 | static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain, |
7954 | ArrayRef<uint16_t[4]> Table) { |
7955 | // If this is the integer domain make sure to check both integer columns. |
7956 | for (const uint16_t (&Row)[4] : Table) |
7957 | if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode)) |
7958 | return Row; |
7959 | return nullptr; |
7960 | } |
7961 | |
7962 | // Helper to attempt to widen/narrow blend masks. |
7963 | static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, |
7964 | unsigned NewWidth, unsigned *pNewMask = nullptr) { |
7965 | assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&(static_cast <bool> (((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && "Illegal blend mask scale") ? void (0) : __assert_fail ("((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && \"Illegal blend mask scale\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7966, __extension__ __PRETTY_FUNCTION__)) |
7966 | "Illegal blend mask scale")(static_cast <bool> (((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && "Illegal blend mask scale") ? void (0) : __assert_fail ("((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && \"Illegal blend mask scale\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 7966, __extension__ __PRETTY_FUNCTION__)); |
7967 | unsigned NewMask = 0; |
7968 | |
7969 | if ((OldWidth % NewWidth) == 0) { |
7970 | unsigned Scale = OldWidth / NewWidth; |
7971 | unsigned SubMask = (1u << Scale) - 1; |
7972 | for (unsigned i = 0; i != NewWidth; ++i) { |
7973 | unsigned Sub = (OldMask >> (i * Scale)) & SubMask; |
7974 | if (Sub == SubMask) |
7975 | NewMask |= (1u << i); |
7976 | else if (Sub != 0x0) |
7977 | return false; |
7978 | } |
7979 | } else { |
7980 | unsigned Scale = NewWidth / OldWidth; |
7981 | unsigned SubMask = (1u << Scale) - 1; |
7982 | for (unsigned i = 0; i != OldWidth; ++i) { |
7983 | if (OldMask & (1 << i)) { |
7984 | NewMask |= (SubMask << (i * Scale)); |
7985 | } |
7986 | } |
7987 | } |
7988 | |
7989 | if (pNewMask) |
7990 | *pNewMask = NewMask; |
7991 | return true; |
7992 | } |
7993 | |
7994 | uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { |
7995 | unsigned Opcode = MI.getOpcode(); |
7996 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
7997 | |
7998 | auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) { |
7999 | uint16_t validDomains = 0; |
8000 | if (MI.getOperand(NumOperands - 1).isImm()) { |
8001 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm(); |
8002 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4)) |
8003 | validDomains |= 0x2; // PackedSingle |
8004 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2)) |
8005 | validDomains |= 0x4; // PackedDouble |
8006 | if (!Is256 || Subtarget.hasAVX2()) |
8007 | validDomains |= 0x8; // PackedInt |
8008 | } |
8009 | return validDomains; |
8010 | }; |
8011 | |
8012 | switch (Opcode) { |
8013 | case X86::BLENDPDrmi: |
8014 | case X86::BLENDPDrri: |
8015 | case X86::VBLENDPDrmi: |
8016 | case X86::VBLENDPDrri: |
8017 | return GetBlendDomains(2, false); |
8018 | case X86::VBLENDPDYrmi: |
8019 | case X86::VBLENDPDYrri: |
8020 | return GetBlendDomains(4, true); |
8021 | case X86::BLENDPSrmi: |
8022 | case X86::BLENDPSrri: |
8023 | case X86::VBLENDPSrmi: |
8024 | case X86::VBLENDPSrri: |
8025 | case X86::VPBLENDDrmi: |
8026 | case X86::VPBLENDDrri: |
8027 | return GetBlendDomains(4, false); |
8028 | case X86::VBLENDPSYrmi: |
8029 | case X86::VBLENDPSYrri: |
8030 | case X86::VPBLENDDYrmi: |
8031 | case X86::VPBLENDDYrri: |
8032 | return GetBlendDomains(8, true); |
8033 | case X86::PBLENDWrmi: |
8034 | case X86::PBLENDWrri: |
8035 | case X86::VPBLENDWrmi: |
8036 | case X86::VPBLENDWrri: |
8037 | // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks. |
8038 | case X86::VPBLENDWYrmi: |
8039 | case X86::VPBLENDWYrri: |
8040 | return GetBlendDomains(8, false); |
8041 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
8042 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
8043 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
8044 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
8045 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
8046 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
8047 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
8048 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
8049 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
8050 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
8051 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
8052 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
8053 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
8054 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
8055 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
8056 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: |
8057 | // If we don't have DQI see if we can still switch from an EVEX integer |
8058 | // instruction to a VEX floating point instruction. |
8059 | if (Subtarget.hasDQI()) |
8060 | return 0; |
8061 | |
8062 | if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16) |
8063 | return 0; |
8064 | if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16) |
8065 | return 0; |
8066 | // Register forms will have 3 operands. Memory form will have more. |
8067 | if (NumOperands == 3 && |
8068 | RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16) |
8069 | return 0; |
8070 | |
8071 | // All domains are valid. |
8072 | return 0xe; |
8073 | case X86::MOVHLPSrr: |
8074 | // We can swap domains when both inputs are the same register. |
8075 | // FIXME: This doesn't catch all the cases we would like. If the input |
8076 | // register isn't KILLed by the instruction, the two address instruction |
8077 | // pass puts a COPY on one input. The other input uses the original |
8078 | // register. This prevents the same physical register from being used by |
8079 | // both inputs. |
8080 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
8081 | MI.getOperand(0).getSubReg() == 0 && |
8082 | MI.getOperand(1).getSubReg() == 0 && |
8083 | MI.getOperand(2).getSubReg() == 0) |
8084 | return 0x6; |
8085 | return 0; |
8086 | case X86::SHUFPDrri: |
8087 | return 0x6; |
8088 | } |
8089 | return 0; |
8090 | } |
8091 | |
8092 | bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI, |
8093 | unsigned Domain) const { |
8094 | assert(Domain > 0 && Domain < 4 && "Invalid execution domain")(static_cast <bool> (Domain > 0 && Domain < 4 && "Invalid execution domain") ? void (0) : __assert_fail ("Domain > 0 && Domain < 4 && \"Invalid execution domain\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8094, __extension__ __PRETTY_FUNCTION__)); |
8095 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
8096 | assert(dom && "Not an SSE instruction")(static_cast <bool> (dom && "Not an SSE instruction" ) ? void (0) : __assert_fail ("dom && \"Not an SSE instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8096, __extension__ __PRETTY_FUNCTION__)); |
8097 | |
8098 | unsigned Opcode = MI.getOpcode(); |
8099 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
8100 | |
8101 | auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) { |
8102 | if (MI.getOperand(NumOperands - 1).isImm()) { |
8103 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255; |
8104 | Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm); |
8105 | unsigned NewImm = Imm; |
8106 | |
8107 | const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs); |
8108 | if (!table) |
8109 | table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); |
8110 | |
8111 | if (Domain == 1) { // PackedSingle |
8112 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
8113 | } else if (Domain == 2) { // PackedDouble |
8114 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm); |
8115 | } else if (Domain == 3) { // PackedInt |
8116 | if (Subtarget.hasAVX2()) { |
8117 | // If we are already VPBLENDW use that, else use VPBLENDD. |
8118 | if ((ImmWidth / (Is256 ? 2 : 1)) != 8) { |
8119 | table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); |
8120 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
8121 | } |
8122 | } else { |
8123 | assert(!Is256 && "128-bit vector expected")(static_cast <bool> (!Is256 && "128-bit vector expected" ) ? void (0) : __assert_fail ("!Is256 && \"128-bit vector expected\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8123, __extension__ __PRETTY_FUNCTION__)); |
8124 | AdjustBlendMask(Imm, ImmWidth, 8, &NewImm); |
8125 | } |
8126 | } |
8127 | |
8128 | assert(table && table[Domain - 1] && "Unknown domain op")(static_cast <bool> (table && table[Domain - 1] && "Unknown domain op") ? void (0) : __assert_fail ( "table && table[Domain - 1] && \"Unknown domain op\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8128, __extension__ __PRETTY_FUNCTION__)); |
8129 | MI.setDesc(get(table[Domain - 1])); |
8130 | MI.getOperand(NumOperands - 1).setImm(NewImm & 255); |
8131 | } |
8132 | return true; |
8133 | }; |
8134 | |
8135 | switch (Opcode) { |
8136 | case X86::BLENDPDrmi: |
8137 | case X86::BLENDPDrri: |
8138 | case X86::VBLENDPDrmi: |
8139 | case X86::VBLENDPDrri: |
8140 | return SetBlendDomain(2, false); |
8141 | case X86::VBLENDPDYrmi: |
8142 | case X86::VBLENDPDYrri: |
8143 | return SetBlendDomain(4, true); |
8144 | case X86::BLENDPSrmi: |
8145 | case X86::BLENDPSrri: |
8146 | case X86::VBLENDPSrmi: |
8147 | case X86::VBLENDPSrri: |
8148 | case X86::VPBLENDDrmi: |
8149 | case X86::VPBLENDDrri: |
8150 | return SetBlendDomain(4, false); |
8151 | case X86::VBLENDPSYrmi: |
8152 | case X86::VBLENDPSYrri: |
8153 | case X86::VPBLENDDYrmi: |
8154 | case X86::VPBLENDDYrri: |
8155 | return SetBlendDomain(8, true); |
8156 | case X86::PBLENDWrmi: |
8157 | case X86::PBLENDWrri: |
8158 | case X86::VPBLENDWrmi: |
8159 | case X86::VPBLENDWrri: |
8160 | return SetBlendDomain(8, false); |
8161 | case X86::VPBLENDWYrmi: |
8162 | case X86::VPBLENDWYrri: |
8163 | return SetBlendDomain(16, true); |
8164 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
8165 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
8166 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
8167 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
8168 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
8169 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
8170 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
8171 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
8172 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
8173 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
8174 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
8175 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
8176 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
8177 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
8178 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
8179 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: { |
8180 | // Without DQI, convert EVEX instructions to VEX instructions. |
8181 | if (Subtarget.hasDQI()) |
8182 | return false; |
8183 | |
8184 | const uint16_t *table = lookupAVX512(MI.getOpcode(), dom, |
8185 | ReplaceableCustomAVX512LogicInstrs); |
8186 | assert(table && "Instruction not found in table?")(static_cast <bool> (table && "Instruction not found in table?" ) ? void (0) : __assert_fail ("table && \"Instruction not found in table?\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8186, __extension__ __PRETTY_FUNCTION__)); |
8187 | // Don't change integer Q instructions to D instructions and |
8188 | // use D intructions if we started with a PS instruction. |
8189 | if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
8190 | Domain = 4; |
8191 | MI.setDesc(get(table[Domain - 1])); |
8192 | return true; |
8193 | } |
8194 | case X86::UNPCKHPDrr: |
8195 | case X86::MOVHLPSrr: |
8196 | // We just need to commute the instruction which will switch the domains. |
8197 | if (Domain != dom && Domain != 3 && |
8198 | MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
8199 | MI.getOperand(0).getSubReg() == 0 && |
8200 | MI.getOperand(1).getSubReg() == 0 && |
8201 | MI.getOperand(2).getSubReg() == 0) { |
8202 | commuteInstruction(MI, false); |
8203 | return true; |
8204 | } |
8205 | // We must always return true for MOVHLPSrr. |
8206 | if (Opcode == X86::MOVHLPSrr) |
8207 | return true; |
8208 | break; |
8209 | case X86::SHUFPDrri: { |
8210 | if (Domain == 1) { |
8211 | unsigned Imm = MI.getOperand(3).getImm(); |
8212 | unsigned NewImm = 0x44; |
8213 | if (Imm & 1) NewImm |= 0x0a; |
8214 | if (Imm & 2) NewImm |= 0xa0; |
8215 | MI.getOperand(3).setImm(NewImm); |
8216 | MI.setDesc(get(X86::SHUFPSrri)); |
8217 | } |
8218 | return true; |
8219 | } |
8220 | } |
8221 | return false; |
8222 | } |
8223 | |
8224 | std::pair<uint16_t, uint16_t> |
8225 | X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { |
8226 | uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
8227 | unsigned opcode = MI.getOpcode(); |
8228 | uint16_t validDomains = 0; |
8229 | if (domain) { |
8230 | // Attempt to match for custom instructions. |
8231 | validDomains = getExecutionDomainCustom(MI); |
8232 | if (validDomains) |
8233 | return std::make_pair(domain, validDomains); |
8234 | |
8235 | if (lookup(opcode, domain, ReplaceableInstrs)) { |
8236 | validDomains = 0xe; |
8237 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) { |
8238 | validDomains = Subtarget.hasAVX2() ? 0xe : 0x6; |
8239 | } else if (lookup(opcode, domain, ReplaceableInstrsFP)) { |
8240 | validDomains = 0x6; |
8241 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) { |
8242 | // Insert/extract instructions should only effect domain if AVX2 |
8243 | // is enabled. |
8244 | if (!Subtarget.hasAVX2()) |
8245 | return std::make_pair(0, 0); |
8246 | validDomains = 0xe; |
8247 | } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) { |
8248 | validDomains = 0xe; |
8249 | } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain, |
8250 | ReplaceableInstrsAVX512DQ)) { |
8251 | validDomains = 0xe; |
8252 | } else if (Subtarget.hasDQI()) { |
8253 | if (const uint16_t *table = lookupAVX512(opcode, domain, |
8254 | ReplaceableInstrsAVX512DQMasked)) { |
8255 | if (domain == 1 || (domain == 3 && table[3] == opcode)) |
8256 | validDomains = 0xa; |
8257 | else |
8258 | validDomains = 0xc; |
8259 | } |
8260 | } |
8261 | } |
8262 | return std::make_pair(domain, validDomains); |
8263 | } |
8264 | |
8265 | void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { |
8266 | assert(Domain>0 && Domain<4 && "Invalid execution domain")(static_cast <bool> (Domain>0 && Domain<4 && "Invalid execution domain") ? void (0) : __assert_fail ("Domain>0 && Domain<4 && \"Invalid execution domain\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8266, __extension__ __PRETTY_FUNCTION__)); |
8267 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
8268 | assert(dom && "Not an SSE instruction")(static_cast <bool> (dom && "Not an SSE instruction" ) ? void (0) : __assert_fail ("dom && \"Not an SSE instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8268, __extension__ __PRETTY_FUNCTION__)); |
8269 | |
8270 | // Attempt to match for custom instructions. |
8271 | if (setExecutionDomainCustom(MI, Domain)) |
8272 | return; |
8273 | |
8274 | const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs); |
8275 | if (!table) { // try the other table |
8276 | assert((Subtarget.hasAVX2() || Domain < 3) &&(static_cast <bool> ((Subtarget.hasAVX2() || Domain < 3) && "256-bit vector operations only available in AVX2" ) ? void (0) : __assert_fail ("(Subtarget.hasAVX2() || Domain < 3) && \"256-bit vector operations only available in AVX2\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8277, __extension__ __PRETTY_FUNCTION__)) |
8277 | "256-bit vector operations only available in AVX2")(static_cast <bool> ((Subtarget.hasAVX2() || Domain < 3) && "256-bit vector operations only available in AVX2" ) ? void (0) : __assert_fail ("(Subtarget.hasAVX2() || Domain < 3) && \"256-bit vector operations only available in AVX2\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8277, __extension__ __PRETTY_FUNCTION__)); |
8278 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2); |
8279 | } |
8280 | if (!table) { // try the FP table |
8281 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP); |
8282 | assert((!table || Domain < 3) &&(static_cast <bool> ((!table || Domain < 3) && "Can only select PackedSingle or PackedDouble") ? void (0) : __assert_fail ("(!table || Domain < 3) && \"Can only select PackedSingle or PackedDouble\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8283, __extension__ __PRETTY_FUNCTION__)) |
8283 | "Can only select PackedSingle or PackedDouble")(static_cast <bool> ((!table || Domain < 3) && "Can only select PackedSingle or PackedDouble") ? void (0) : __assert_fail ("(!table || Domain < 3) && \"Can only select PackedSingle or PackedDouble\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8283, __extension__ __PRETTY_FUNCTION__)); |
8284 | } |
8285 | if (!table) { // try the other table |
8286 | assert(Subtarget.hasAVX2() &&(static_cast <bool> (Subtarget.hasAVX2() && "256-bit insert/extract only available in AVX2" ) ? void (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit insert/extract only available in AVX2\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8287, __extension__ __PRETTY_FUNCTION__)) |
8287 | "256-bit insert/extract only available in AVX2")(static_cast <bool> (Subtarget.hasAVX2() && "256-bit insert/extract only available in AVX2" ) ? void (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit insert/extract only available in AVX2\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8287, __extension__ __PRETTY_FUNCTION__)); |
8288 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract); |
8289 | } |
8290 | if (!table) { // try the AVX512 table |
8291 | assert(Subtarget.hasAVX512() && "Requires AVX-512")(static_cast <bool> (Subtarget.hasAVX512() && "Requires AVX-512" ) ? void (0) : __assert_fail ("Subtarget.hasAVX512() && \"Requires AVX-512\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8291, __extension__ __PRETTY_FUNCTION__)); |
8292 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512); |
8293 | // Don't change integer Q instructions to D instructions. |
8294 | if (table && Domain == 3 && table[3] == MI.getOpcode()) |
8295 | Domain = 4; |
8296 | } |
8297 | if (!table) { // try the AVX512DQ table |
8298 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ")(static_cast <bool> ((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ") ? void (0) : __assert_fail ("(Subtarget.hasDQI() || Domain >= 3) && \"Requires AVX-512DQ\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8298, __extension__ __PRETTY_FUNCTION__)); |
8299 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ); |
8300 | // Don't change integer Q instructions to D instructions and |
8301 | // use D instructions if we started with a PS instruction. |
8302 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
8303 | Domain = 4; |
8304 | } |
8305 | if (!table) { // try the AVX512DQMasked table |
8306 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ")(static_cast <bool> ((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ") ? void (0) : __assert_fail ("(Subtarget.hasDQI() || Domain >= 3) && \"Requires AVX-512DQ\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8306, __extension__ __PRETTY_FUNCTION__)); |
8307 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked); |
8308 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
8309 | Domain = 4; |
8310 | } |
8311 | assert(table && "Cannot change domain")(static_cast <bool> (table && "Cannot change domain" ) ? void (0) : __assert_fail ("table && \"Cannot change domain\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8311, __extension__ __PRETTY_FUNCTION__)); |
8312 | MI.setDesc(get(table[Domain - 1])); |
8313 | } |
8314 | |
8315 | /// Return the noop instruction to use for a noop. |
8316 | MCInst X86InstrInfo::getNop() const { |
8317 | MCInst Nop; |
8318 | Nop.setOpcode(X86::NOOP); |
8319 | return Nop; |
8320 | } |
8321 | |
8322 | bool X86InstrInfo::isHighLatencyDef(int opc) const { |
8323 | switch (opc) { |
8324 | default: return false; |
8325 | case X86::DIVPDrm: |
8326 | case X86::DIVPDrr: |
8327 | case X86::DIVPSrm: |
8328 | case X86::DIVPSrr: |
8329 | case X86::DIVSDrm: |
8330 | case X86::DIVSDrm_Int: |
8331 | case X86::DIVSDrr: |
8332 | case X86::DIVSDrr_Int: |
8333 | case X86::DIVSSrm: |
8334 | case X86::DIVSSrm_Int: |
8335 | case X86::DIVSSrr: |
8336 | case X86::DIVSSrr_Int: |
8337 | case X86::SQRTPDm: |
8338 | case X86::SQRTPDr: |
8339 | case X86::SQRTPSm: |
8340 | case X86::SQRTPSr: |
8341 | case X86::SQRTSDm: |
8342 | case X86::SQRTSDm_Int: |
8343 | case X86::SQRTSDr: |
8344 | case X86::SQRTSDr_Int: |
8345 | case X86::SQRTSSm: |
8346 | case X86::SQRTSSm_Int: |
8347 | case X86::SQRTSSr: |
8348 | case X86::SQRTSSr_Int: |
8349 | // AVX instructions with high latency |
8350 | case X86::VDIVPDrm: |
8351 | case X86::VDIVPDrr: |
8352 | case X86::VDIVPDYrm: |
8353 | case X86::VDIVPDYrr: |
8354 | case X86::VDIVPSrm: |
8355 | case X86::VDIVPSrr: |
8356 | case X86::VDIVPSYrm: |
8357 | case X86::VDIVPSYrr: |
8358 | case X86::VDIVSDrm: |
8359 | case X86::VDIVSDrm_Int: |
8360 | case X86::VDIVSDrr: |
8361 | case X86::VDIVSDrr_Int: |
8362 | case X86::VDIVSSrm: |
8363 | case X86::VDIVSSrm_Int: |
8364 | case X86::VDIVSSrr: |
8365 | case X86::VDIVSSrr_Int: |
8366 | case X86::VSQRTPDm: |
8367 | case X86::VSQRTPDr: |
8368 | case X86::VSQRTPDYm: |
8369 | case X86::VSQRTPDYr: |
8370 | case X86::VSQRTPSm: |
8371 | case X86::VSQRTPSr: |
8372 | case X86::VSQRTPSYm: |
8373 | case X86::VSQRTPSYr: |
8374 | case X86::VSQRTSDm: |
8375 | case X86::VSQRTSDm_Int: |
8376 | case X86::VSQRTSDr: |
8377 | case X86::VSQRTSDr_Int: |
8378 | case X86::VSQRTSSm: |
8379 | case X86::VSQRTSSm_Int: |
8380 | case X86::VSQRTSSr: |
8381 | case X86::VSQRTSSr_Int: |
8382 | // AVX512 instructions with high latency |
8383 | case X86::VDIVPDZ128rm: |
8384 | case X86::VDIVPDZ128rmb: |
8385 | case X86::VDIVPDZ128rmbk: |
8386 | case X86::VDIVPDZ128rmbkz: |
8387 | case X86::VDIVPDZ128rmk: |
8388 | case X86::VDIVPDZ128rmkz: |
8389 | case X86::VDIVPDZ128rr: |
8390 | case X86::VDIVPDZ128rrk: |
8391 | case X86::VDIVPDZ128rrkz: |
8392 | case X86::VDIVPDZ256rm: |
8393 | case X86::VDIVPDZ256rmb: |
8394 | case X86::VDIVPDZ256rmbk: |
8395 | case X86::VDIVPDZ256rmbkz: |
8396 | case X86::VDIVPDZ256rmk: |
8397 | case X86::VDIVPDZ256rmkz: |
8398 | case X86::VDIVPDZ256rr: |
8399 | case X86::VDIVPDZ256rrk: |
8400 | case X86::VDIVPDZ256rrkz: |
8401 | case X86::VDIVPDZrrb: |
8402 | case X86::VDIVPDZrrbk: |
8403 | case X86::VDIVPDZrrbkz: |
8404 | case X86::VDIVPDZrm: |
8405 | case X86::VDIVPDZrmb: |
8406 | case X86::VDIVPDZrmbk: |
8407 | case X86::VDIVPDZrmbkz: |
8408 | case X86::VDIVPDZrmk: |
8409 | case X86::VDIVPDZrmkz: |
8410 | case X86::VDIVPDZrr: |
8411 | case X86::VDIVPDZrrk: |
8412 | case X86::VDIVPDZrrkz: |
8413 | case X86::VDIVPSZ128rm: |
8414 | case X86::VDIVPSZ128rmb: |
8415 | case X86::VDIVPSZ128rmbk: |
8416 | case X86::VDIVPSZ128rmbkz: |
8417 | case X86::VDIVPSZ128rmk: |
8418 | case X86::VDIVPSZ128rmkz: |
8419 | case X86::VDIVPSZ128rr: |
8420 | case X86::VDIVPSZ128rrk: |
8421 | case X86::VDIVPSZ128rrkz: |
8422 | case X86::VDIVPSZ256rm: |
8423 | case X86::VDIVPSZ256rmb: |
8424 | case X86::VDIVPSZ256rmbk: |
8425 | case X86::VDIVPSZ256rmbkz: |
8426 | case X86::VDIVPSZ256rmk: |
8427 | case X86::VDIVPSZ256rmkz: |
8428 | case X86::VDIVPSZ256rr: |
8429 | case X86::VDIVPSZ256rrk: |
8430 | case X86::VDIVPSZ256rrkz: |
8431 | case X86::VDIVPSZrrb: |
8432 | case X86::VDIVPSZrrbk: |
8433 | case X86::VDIVPSZrrbkz: |
8434 | case X86::VDIVPSZrm: |
8435 | case X86::VDIVPSZrmb: |
8436 | case X86::VDIVPSZrmbk: |
8437 | case X86::VDIVPSZrmbkz: |
8438 | case X86::VDIVPSZrmk: |
8439 | case X86::VDIVPSZrmkz: |
8440 | case X86::VDIVPSZrr: |
8441 | case X86::VDIVPSZrrk: |
8442 | case X86::VDIVPSZrrkz: |
8443 | case X86::VDIVSDZrm: |
8444 | case X86::VDIVSDZrr: |
8445 | case X86::VDIVSDZrm_Int: |
8446 | case X86::VDIVSDZrm_Intk: |
8447 | case X86::VDIVSDZrm_Intkz: |
8448 | case X86::VDIVSDZrr_Int: |
8449 | case X86::VDIVSDZrr_Intk: |
8450 | case X86::VDIVSDZrr_Intkz: |
8451 | case X86::VDIVSDZrrb_Int: |
8452 | case X86::VDIVSDZrrb_Intk: |
8453 | case X86::VDIVSDZrrb_Intkz: |
8454 | case X86::VDIVSSZrm: |
8455 | case X86::VDIVSSZrr: |
8456 | case X86::VDIVSSZrm_Int: |
8457 | case X86::VDIVSSZrm_Intk: |
8458 | case X86::VDIVSSZrm_Intkz: |
8459 | case X86::VDIVSSZrr_Int: |
8460 | case X86::VDIVSSZrr_Intk: |
8461 | case X86::VDIVSSZrr_Intkz: |
8462 | case X86::VDIVSSZrrb_Int: |
8463 | case X86::VDIVSSZrrb_Intk: |
8464 | case X86::VDIVSSZrrb_Intkz: |
8465 | case X86::VSQRTPDZ128m: |
8466 | case X86::VSQRTPDZ128mb: |
8467 | case X86::VSQRTPDZ128mbk: |
8468 | case X86::VSQRTPDZ128mbkz: |
8469 | case X86::VSQRTPDZ128mk: |
8470 | case X86::VSQRTPDZ128mkz: |
8471 | case X86::VSQRTPDZ128r: |
8472 | case X86::VSQRTPDZ128rk: |
8473 | case X86::VSQRTPDZ128rkz: |
8474 | case X86::VSQRTPDZ256m: |
8475 | case X86::VSQRTPDZ256mb: |
8476 | case X86::VSQRTPDZ256mbk: |
8477 | case X86::VSQRTPDZ256mbkz: |
8478 | case X86::VSQRTPDZ256mk: |
8479 | case X86::VSQRTPDZ256mkz: |
8480 | case X86::VSQRTPDZ256r: |
8481 | case X86::VSQRTPDZ256rk: |
8482 | case X86::VSQRTPDZ256rkz: |
8483 | case X86::VSQRTPDZm: |
8484 | case X86::VSQRTPDZmb: |
8485 | case X86::VSQRTPDZmbk: |
8486 | case X86::VSQRTPDZmbkz: |
8487 | case X86::VSQRTPDZmk: |
8488 | case X86::VSQRTPDZmkz: |
8489 | case X86::VSQRTPDZr: |
8490 | case X86::VSQRTPDZrb: |
8491 | case X86::VSQRTPDZrbk: |
8492 | case X86::VSQRTPDZrbkz: |
8493 | case X86::VSQRTPDZrk: |
8494 | case X86::VSQRTPDZrkz: |
8495 | case X86::VSQRTPSZ128m: |
8496 | case X86::VSQRTPSZ128mb: |
8497 | case X86::VSQRTPSZ128mbk: |
8498 | case X86::VSQRTPSZ128mbkz: |
8499 | case X86::VSQRTPSZ128mk: |
8500 | case X86::VSQRTPSZ128mkz: |
8501 | case X86::VSQRTPSZ128r: |
8502 | case X86::VSQRTPSZ128rk: |
8503 | case X86::VSQRTPSZ128rkz: |
8504 | case X86::VSQRTPSZ256m: |
8505 | case X86::VSQRTPSZ256mb: |
8506 | case X86::VSQRTPSZ256mbk: |
8507 | case X86::VSQRTPSZ256mbkz: |
8508 | case X86::VSQRTPSZ256mk: |
8509 | case X86::VSQRTPSZ256mkz: |
8510 | case X86::VSQRTPSZ256r: |
8511 | case X86::VSQRTPSZ256rk: |
8512 | case X86::VSQRTPSZ256rkz: |
8513 | case X86::VSQRTPSZm: |
8514 | case X86::VSQRTPSZmb: |
8515 | case X86::VSQRTPSZmbk: |
8516 | case X86::VSQRTPSZmbkz: |
8517 | case X86::VSQRTPSZmk: |
8518 | case X86::VSQRTPSZmkz: |
8519 | case X86::VSQRTPSZr: |
8520 | case X86::VSQRTPSZrb: |
8521 | case X86::VSQRTPSZrbk: |
8522 | case X86::VSQRTPSZrbkz: |
8523 | case X86::VSQRTPSZrk: |
8524 | case X86::VSQRTPSZrkz: |
8525 | case X86::VSQRTSDZm: |
8526 | case X86::VSQRTSDZm_Int: |
8527 | case X86::VSQRTSDZm_Intk: |
8528 | case X86::VSQRTSDZm_Intkz: |
8529 | case X86::VSQRTSDZr: |
8530 | case X86::VSQRTSDZr_Int: |
8531 | case X86::VSQRTSDZr_Intk: |
8532 | case X86::VSQRTSDZr_Intkz: |
8533 | case X86::VSQRTSDZrb_Int: |
8534 | case X86::VSQRTSDZrb_Intk: |
8535 | case X86::VSQRTSDZrb_Intkz: |
8536 | case X86::VSQRTSSZm: |
8537 | case X86::VSQRTSSZm_Int: |
8538 | case X86::VSQRTSSZm_Intk: |
8539 | case X86::VSQRTSSZm_Intkz: |
8540 | case X86::VSQRTSSZr: |
8541 | case X86::VSQRTSSZr_Int: |
8542 | case X86::VSQRTSSZr_Intk: |
8543 | case X86::VSQRTSSZr_Intkz: |
8544 | case X86::VSQRTSSZrb_Int: |
8545 | case X86::VSQRTSSZrb_Intk: |
8546 | case X86::VSQRTSSZrb_Intkz: |
8547 | |
8548 | case X86::VGATHERDPDYrm: |
8549 | case X86::VGATHERDPDZ128rm: |
8550 | case X86::VGATHERDPDZ256rm: |
8551 | case X86::VGATHERDPDZrm: |
8552 | case X86::VGATHERDPDrm: |
8553 | case X86::VGATHERDPSYrm: |
8554 | case X86::VGATHERDPSZ128rm: |
8555 | case X86::VGATHERDPSZ256rm: |
8556 | case X86::VGATHERDPSZrm: |
8557 | case X86::VGATHERDPSrm: |
8558 | case X86::VGATHERPF0DPDm: |
8559 | case X86::VGATHERPF0DPSm: |
8560 | case X86::VGATHERPF0QPDm: |
8561 | case X86::VGATHERPF0QPSm: |
8562 | case X86::VGATHERPF1DPDm: |
8563 | case X86::VGATHERPF1DPSm: |
8564 | case X86::VGATHERPF1QPDm: |
8565 | case X86::VGATHERPF1QPSm: |
8566 | case X86::VGATHERQPDYrm: |
8567 | case X86::VGATHERQPDZ128rm: |
8568 | case X86::VGATHERQPDZ256rm: |
8569 | case X86::VGATHERQPDZrm: |
8570 | case X86::VGATHERQPDrm: |
8571 | case X86::VGATHERQPSYrm: |
8572 | case X86::VGATHERQPSZ128rm: |
8573 | case X86::VGATHERQPSZ256rm: |
8574 | case X86::VGATHERQPSZrm: |
8575 | case X86::VGATHERQPSrm: |
8576 | case X86::VPGATHERDDYrm: |
8577 | case X86::VPGATHERDDZ128rm: |
8578 | case X86::VPGATHERDDZ256rm: |
8579 | case X86::VPGATHERDDZrm: |
8580 | case X86::VPGATHERDDrm: |
8581 | case X86::VPGATHERDQYrm: |
8582 | case X86::VPGATHERDQZ128rm: |
8583 | case X86::VPGATHERDQZ256rm: |
8584 | case X86::VPGATHERDQZrm: |
8585 | case X86::VPGATHERDQrm: |
8586 | case X86::VPGATHERQDYrm: |
8587 | case X86::VPGATHERQDZ128rm: |
8588 | case X86::VPGATHERQDZ256rm: |
8589 | case X86::VPGATHERQDZrm: |
8590 | case X86::VPGATHERQDrm: |
8591 | case X86::VPGATHERQQYrm: |
8592 | case X86::VPGATHERQQZ128rm: |
8593 | case X86::VPGATHERQQZ256rm: |
8594 | case X86::VPGATHERQQZrm: |
8595 | case X86::VPGATHERQQrm: |
8596 | case X86::VSCATTERDPDZ128mr: |
8597 | case X86::VSCATTERDPDZ256mr: |
8598 | case X86::VSCATTERDPDZmr: |
8599 | case X86::VSCATTERDPSZ128mr: |
8600 | case X86::VSCATTERDPSZ256mr: |
8601 | case X86::VSCATTERDPSZmr: |
8602 | case X86::VSCATTERPF0DPDm: |
8603 | case X86::VSCATTERPF0DPSm: |
8604 | case X86::VSCATTERPF0QPDm: |
8605 | case X86::VSCATTERPF0QPSm: |
8606 | case X86::VSCATTERPF1DPDm: |
8607 | case X86::VSCATTERPF1DPSm: |
8608 | case X86::VSCATTERPF1QPDm: |
8609 | case X86::VSCATTERPF1QPSm: |
8610 | case X86::VSCATTERQPDZ128mr: |
8611 | case X86::VSCATTERQPDZ256mr: |
8612 | case X86::VSCATTERQPDZmr: |
8613 | case X86::VSCATTERQPSZ128mr: |
8614 | case X86::VSCATTERQPSZ256mr: |
8615 | case X86::VSCATTERQPSZmr: |
8616 | case X86::VPSCATTERDDZ128mr: |
8617 | case X86::VPSCATTERDDZ256mr: |
8618 | case X86::VPSCATTERDDZmr: |
8619 | case X86::VPSCATTERDQZ128mr: |
8620 | case X86::VPSCATTERDQZ256mr: |
8621 | case X86::VPSCATTERDQZmr: |
8622 | case X86::VPSCATTERQDZ128mr: |
8623 | case X86::VPSCATTERQDZ256mr: |
8624 | case X86::VPSCATTERQDZmr: |
8625 | case X86::VPSCATTERQQZ128mr: |
8626 | case X86::VPSCATTERQQZ256mr: |
8627 | case X86::VPSCATTERQQZmr: |
8628 | return true; |
8629 | } |
8630 | } |
8631 | |
8632 | bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, |
8633 | const MachineRegisterInfo *MRI, |
8634 | const MachineInstr &DefMI, |
8635 | unsigned DefIdx, |
8636 | const MachineInstr &UseMI, |
8637 | unsigned UseIdx) const { |
8638 | return isHighLatencyDef(DefMI.getOpcode()); |
8639 | } |
8640 | |
8641 | bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, |
8642 | const MachineBasicBlock *MBB) const { |
8643 | assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&(static_cast <bool> (Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 && Inst.getNumDefs () <= 2 && "Reassociation needs binary operators") ? void (0) : __assert_fail ("Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 && Inst.getNumDefs() <= 2 && \"Reassociation needs binary operators\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8644, __extension__ __PRETTY_FUNCTION__)) |
8644 | Inst.getNumDefs() <= 2 && "Reassociation needs binary operators")(static_cast <bool> (Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 && Inst.getNumDefs () <= 2 && "Reassociation needs binary operators") ? void (0) : __assert_fail ("Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 && Inst.getNumDefs() <= 2 && \"Reassociation needs binary operators\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8644, __extension__ __PRETTY_FUNCTION__)); |
8645 | |
8646 | // Integer binary math/logic instructions have a third source operand: |
8647 | // the EFLAGS register. That operand must be both defined here and never |
8648 | // used; ie, it must be dead. If the EFLAGS operand is live, then we can |
8649 | // not change anything because rearranging the operands could affect other |
8650 | // instructions that depend on the exact status flags (zero, sign, etc.) |
8651 | // that are set by using these particular operands with this operation. |
8652 | const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS); |
8653 | assert((Inst.getNumDefs() == 1 || FlagDef) &&(static_cast <bool> ((Inst.getNumDefs() == 1 || FlagDef ) && "Implicit def isn't flags?") ? void (0) : __assert_fail ("(Inst.getNumDefs() == 1 || FlagDef) && \"Implicit def isn't flags?\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8654, __extension__ __PRETTY_FUNCTION__)) |
8654 | "Implicit def isn't flags?")(static_cast <bool> ((Inst.getNumDefs() == 1 || FlagDef ) && "Implicit def isn't flags?") ? void (0) : __assert_fail ("(Inst.getNumDefs() == 1 || FlagDef) && \"Implicit def isn't flags?\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 8654, __extension__ __PRETTY_FUNCTION__)); |
8655 | if (FlagDef && !FlagDef->isDead()) |
8656 | return false; |
8657 | |
8658 | return TargetInstrInfo::hasReassociableOperands(Inst, MBB); |
8659 | } |
8660 | |
8661 | // TODO: There are many more machine instruction opcodes to match: |
8662 | // 1. Other data types (integer, vectors) |
8663 | // 2. Other math / logic operations (xor, or) |
8664 | // 3. Other forms of the same operation (intrinsics and other variants) |
8665 | bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { |
8666 | switch (Inst.getOpcode()) { |
8667 | case X86::AND8rr: |
8668 | case X86::AND16rr: |
8669 | case X86::AND32rr: |
8670 | case X86::AND64rr: |
8671 | case X86::OR8rr: |
8672 | case X86::OR16rr: |
8673 | case X86::OR32rr: |
8674 | case X86::OR64rr: |
8675 | case X86::XOR8rr: |
8676 | case X86::XOR16rr: |
8677 | case X86::XOR32rr: |
8678 | case X86::XOR64rr: |
8679 | case X86::IMUL16rr: |
8680 | case X86::IMUL32rr: |
8681 | case X86::IMUL64rr: |
8682 | case X86::PANDrr: |
8683 | case X86::PORrr: |
8684 | case X86::PXORrr: |
8685 | case X86::ANDPDrr: |
8686 | case X86::ANDPSrr: |
8687 | case X86::ORPDrr: |
8688 | case X86::ORPSrr: |
8689 | case X86::XORPDrr: |
8690 | case X86::XORPSrr: |
8691 | case X86::PADDBrr: |
8692 | case X86::PADDWrr: |
8693 | case X86::PADDDrr: |
8694 | case X86::PADDQrr: |
8695 | case X86::PMULLWrr: |
8696 | case X86::PMULLDrr: |
8697 | case X86::PMAXSBrr: |
8698 | case X86::PMAXSDrr: |
8699 | case X86::PMAXSWrr: |
8700 | case X86::PMAXUBrr: |
8701 | case X86::PMAXUDrr: |
8702 | case X86::PMAXUWrr: |
8703 | case X86::PMINSBrr: |
8704 | case X86::PMINSDrr: |
8705 | case X86::PMINSWrr: |
8706 | case X86::PMINUBrr: |
8707 | case X86::PMINUDrr: |
8708 | case X86::PMINUWrr: |
8709 | case X86::VPANDrr: |
8710 | case X86::VPANDYrr: |
8711 | case X86::VPANDDZ128rr: |
8712 | case X86::VPANDDZ256rr: |
8713 | case X86::VPANDDZrr: |
8714 | case X86::VPANDQZ128rr: |
8715 | case X86::VPANDQZ256rr: |
8716 | case X86::VPANDQZrr: |
8717 | case X86::VPORrr: |
8718 | case X86::VPORYrr: |
8719 | case X86::VPORDZ128rr: |
8720 | case X86::VPORDZ256rr: |
8721 | case X86::VPORDZrr: |
8722 | case X86::VPORQZ128rr: |
8723 | case X86::VPORQZ256rr: |
8724 | case X86::VPORQZrr: |
8725 | case X86::VPXORrr: |
8726 | case X86::VPXORYrr: |
8727 | case X86::VPXORDZ128rr: |
8728 | case X86::VPXORDZ256rr: |
8729 | case X86::VPXORDZrr: |
8730 | case X86::VPXORQZ128rr: |
8731 | case X86::VPXORQZ256rr: |
8732 | case X86::VPXORQZrr: |
8733 | case X86::VANDPDrr: |
8734 | case X86::VANDPSrr: |
8735 | case X86::VANDPDYrr: |
8736 | case X86::VANDPSYrr: |
8737 | case X86::VANDPDZ128rr: |
8738 | case X86::VANDPSZ128rr: |
8739 | case X86::VANDPDZ256rr: |
8740 | case X86::VANDPSZ256rr: |
8741 | case X86::VANDPDZrr: |
8742 | case X86::VANDPSZrr: |
8743 | case X86::VORPDrr: |
8744 | case X86::VORPSrr: |
8745 | case X86::VORPDYrr: |
8746 | case X86::VORPSYrr: |
8747 | case X86::VORPDZ128rr: |
8748 | case X86::VORPSZ128rr: |
8749 | case X86::VORPDZ256rr: |
8750 | case X86::VORPSZ256rr: |
8751 | case X86::VORPDZrr: |
8752 | case X86::VORPSZrr: |
8753 | case X86::VXORPDrr: |
8754 | case X86::VXORPSrr: |
8755 | case X86::VXORPDYrr: |
8756 | case X86::VXORPSYrr: |
8757 | case X86::VXORPDZ128rr: |
8758 | case X86::VXORPSZ128rr: |
8759 | case X86::VXORPDZ256rr: |
8760 | case X86::VXORPSZ256rr: |
8761 | case X86::VXORPDZrr: |
8762 | case X86::VXORPSZrr: |
8763 | case X86::KADDBrr: |
8764 | case X86::KADDWrr: |
8765 | case X86::KADDDrr: |
8766 | case X86::KADDQrr: |
8767 | case X86::KANDBrr: |
8768 | case X86::KANDWrr: |
8769 | case X86::KANDDrr: |
8770 | case X86::KANDQrr: |
8771 | case X86::KORBrr: |
8772 | case X86::KORWrr: |
8773 | case X86::KORDrr: |
8774 | case X86::KORQrr: |
8775 | case X86::KXORBrr: |
8776 | case X86::KXORWrr: |
8777 | case X86::KXORDrr: |
8778 | case X86::KXORQrr: |
8779 | case X86::VPADDBrr: |
8780 | case X86::VPADDWrr: |
8781 | case X86::VPADDDrr: |
8782 | case X86::VPADDQrr: |
8783 | case X86::VPADDBYrr: |
8784 | case X86::VPADDWYrr: |
8785 | case X86::VPADDDYrr: |
8786 | case X86::VPADDQYrr: |
8787 | case X86::VPADDBZ128rr: |
8788 | case X86::VPADDWZ128rr: |
8789 | case X86::VPADDDZ128rr: |
8790 | case X86::VPADDQZ128rr: |
8791 | case X86::VPADDBZ256rr: |
8792 | case X86::VPADDWZ256rr: |
8793 | case X86::VPADDDZ256rr: |
8794 | case X86::VPADDQZ256rr: |
8795 | case X86::VPADDBZrr: |
8796 | case X86::VPADDWZrr: |
8797 | case X86::VPADDDZrr: |
8798 | case X86::VPADDQZrr: |
8799 | case X86::VPMULLWrr: |
8800 | case X86::VPMULLWYrr: |
8801 | case X86::VPMULLWZ128rr: |
8802 | case X86::VPMULLWZ256rr: |
8803 | case X86::VPMULLWZrr: |
8804 | case X86::VPMULLDrr: |
8805 | case X86::VPMULLDYrr: |
8806 | case X86::VPMULLDZ128rr: |
8807 | case X86::VPMULLDZ256rr: |
8808 | case X86::VPMULLDZrr: |
8809 | case X86::VPMULLQZ128rr: |
8810 | case X86::VPMULLQZ256rr: |
8811 | case X86::VPMULLQZrr: |
8812 | case X86::VPMAXSBrr: |
8813 | case X86::VPMAXSBYrr: |
8814 | case X86::VPMAXSBZ128rr: |
8815 | case X86::VPMAXSBZ256rr: |
8816 | case X86::VPMAXSBZrr: |
8817 | case X86::VPMAXSDrr: |
8818 | case X86::VPMAXSDYrr: |
8819 | case X86::VPMAXSDZ128rr: |
8820 | case X86::VPMAXSDZ256rr: |
8821 | case X86::VPMAXSDZrr: |
8822 | case X86::VPMAXSQZ128rr: |
8823 | case X86::VPMAXSQZ256rr: |
8824 | case X86::VPMAXSQZrr: |
8825 | case X86::VPMAXSWrr: |
8826 | case X86::VPMAXSWYrr: |
8827 | case X86::VPMAXSWZ128rr: |
8828 | case X86::VPMAXSWZ256rr: |
8829 | case X86::VPMAXSWZrr: |
8830 | case X86::VPMAXUBrr: |
8831 | case X86::VPMAXUBYrr: |
8832 | case X86::VPMAXUBZ128rr: |
8833 | case X86::VPMAXUBZ256rr: |
8834 | case X86::VPMAXUBZrr: |
8835 | case X86::VPMAXUDrr: |
8836 | case X86::VPMAXUDYrr: |
8837 | case X86::VPMAXUDZ128rr: |
8838 | case X86::VPMAXUDZ256rr: |
8839 | case X86::VPMAXUDZrr: |
8840 | case X86::VPMAXUQZ128rr: |
8841 | case X86::VPMAXUQZ256rr: |
8842 | case X86::VPMAXUQZrr: |
8843 | case X86::VPMAXUWrr: |
8844 | case X86::VPMAXUWYrr: |
8845 | case X86::VPMAXUWZ128rr: |
8846 | case X86::VPMAXUWZ256rr: |
8847 | case X86::VPMAXUWZrr: |
8848 | case X86::VPMINSBrr: |
8849 | case X86::VPMINSBYrr: |
8850 | case X86::VPMINSBZ128rr: |
8851 | case X86::VPMINSBZ256rr: |
8852 | case X86::VPMINSBZrr: |
8853 | case X86::VPMINSDrr: |
8854 | case X86::VPMINSDYrr: |
8855 | case X86::VPMINSDZ128rr: |
8856 | case X86::VPMINSDZ256rr: |
8857 | case X86::VPMINSDZrr: |
8858 | case X86::VPMINSQZ128rr: |
8859 | case X86::VPMINSQZ256rr: |
8860 | case X86::VPMINSQZrr: |
8861 | case X86::VPMINSWrr: |
8862 | case X86::VPMINSWYrr: |
8863 | case X86::VPMINSWZ128rr: |
8864 | case X86::VPMINSWZ256rr: |
8865 | case X86::VPMINSWZrr: |
8866 | case X86::VPMINUBrr: |
8867 | case X86::VPMINUBYrr: |
8868 | case X86::VPMINUBZ128rr: |
8869 | case X86::VPMINUBZ256rr: |
8870 | case X86::VPMINUBZrr: |
8871 | case X86::VPMINUDrr: |
8872 | case X86::VPMINUDYrr: |
8873 | case X86::VPMINUDZ128rr: |
8874 | case X86::VPMINUDZ256rr: |
8875 | case X86::VPMINUDZrr: |
8876 | case X86::VPMINUQZ128rr: |
8877 | case X86::VPMINUQZ256rr: |
8878 | case X86::VPMINUQZrr: |
8879 | case X86::VPMINUWrr: |
8880 | case X86::VPMINUWYrr: |
8881 | case X86::VPMINUWZ128rr: |
8882 | case X86::VPMINUWZ256rr: |
8883 | case X86::VPMINUWZrr: |
8884 | // Normal min/max instructions are not commutative because of NaN and signed |
8885 | // zero semantics, but these are. Thus, there's no need to check for global |
8886 | // relaxed math; the instructions themselves have the properties we need. |
8887 | case X86::MAXCPDrr: |
8888 | case X86::MAXCPSrr: |
8889 | case X86::MAXCSDrr: |
8890 | case X86::MAXCSSrr: |
8891 | case X86::MINCPDrr: |
8892 | case X86::MINCPSrr: |
8893 | case X86::MINCSDrr: |
8894 | case X86::MINCSSrr: |
8895 | case X86::VMAXCPDrr: |
8896 | case X86::VMAXCPSrr: |
8897 | case X86::VMAXCPDYrr: |
8898 | case X86::VMAXCPSYrr: |
8899 | case X86::VMAXCPDZ128rr: |
8900 | case X86::VMAXCPSZ128rr: |
8901 | case X86::VMAXCPDZ256rr: |
8902 | case X86::VMAXCPSZ256rr: |
8903 | case X86::VMAXCPDZrr: |
8904 | case X86::VMAXCPSZrr: |
8905 | case X86::VMAXCSDrr: |
8906 | case X86::VMAXCSSrr: |
8907 | case X86::VMAXCSDZrr: |
8908 | case X86::VMAXCSSZrr: |
8909 | case X86::VMINCPDrr: |
8910 | case X86::VMINCPSrr: |
8911 | case X86::VMINCPDYrr: |
8912 | case X86::VMINCPSYrr: |
8913 | case X86::VMINCPDZ128rr: |
8914 | case X86::VMINCPSZ128rr: |
8915 | case X86::VMINCPDZ256rr: |
8916 | case X86::VMINCPSZ256rr: |
8917 | case X86::VMINCPDZrr: |
8918 | case X86::VMINCPSZrr: |
8919 | case X86::VMINCSDrr: |
8920 | case X86::VMINCSSrr: |
8921 | case X86::VMINCSDZrr: |
8922 | case X86::VMINCSSZrr: |
8923 | case X86::VMAXCPHZ128rr: |
8924 | case X86::VMAXCPHZ256rr: |
8925 | case X86::VMAXCPHZrr: |
8926 | case X86::VMAXCSHZrr: |
8927 | case X86::VMINCPHZ128rr: |
8928 | case X86::VMINCPHZ256rr: |
8929 | case X86::VMINCPHZrr: |
8930 | case X86::VMINCSHZrr: |
8931 | return true; |
8932 | case X86::ADDPDrr: |
8933 | case X86::ADDPSrr: |
8934 | case X86::ADDSDrr: |
8935 | case X86::ADDSSrr: |
8936 | case X86::MULPDrr: |
8937 | case X86::MULPSrr: |
8938 | case X86::MULSDrr: |
8939 | case X86::MULSSrr: |
8940 | case X86::VADDPDrr: |
8941 | case X86::VADDPSrr: |
8942 | case X86::VADDPDYrr: |
8943 | case X86::VADDPSYrr: |
8944 | case X86::VADDPDZ128rr: |
8945 | case X86::VADDPSZ128rr: |
8946 | case X86::VADDPDZ256rr: |
8947 | case X86::VADDPSZ256rr: |
8948 | case X86::VADDPDZrr: |
8949 | case X86::VADDPSZrr: |
8950 | case X86::VADDSDrr: |
8951 | case X86::VADDSSrr: |
8952 | case X86::VADDSDZrr: |
8953 | case X86::VADDSSZrr: |
8954 | case X86::VMULPDrr: |
8955 | case X86::VMULPSrr: |
8956 | case X86::VMULPDYrr: |
8957 | case X86::VMULPSYrr: |
8958 | case X86::VMULPDZ128rr: |
8959 | case X86::VMULPSZ128rr: |
8960 | case X86::VMULPDZ256rr: |
8961 | case X86::VMULPSZ256rr: |
8962 | case X86::VMULPDZrr: |
8963 | case X86::VMULPSZrr: |
8964 | case X86::VMULSDrr: |
8965 | case X86::VMULSSrr: |
8966 | case X86::VMULSDZrr: |
8967 | case X86::VMULSSZrr: |
8968 | case X86::VADDPHZ128rr: |
8969 | case X86::VADDPHZ256rr: |
8970 | case X86::VADDPHZrr: |
8971 | case X86::VADDSHZrr: |
8972 | case X86::VMULPHZ128rr: |
8973 | case X86::VMULPHZ256rr: |
8974 | case X86::VMULPHZrr: |
8975 | case X86::VMULSHZrr: |
8976 | return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) && |
8977 | Inst.getFlag(MachineInstr::MIFlag::FmNsz); |
8978 | default: |
8979 | return false; |
8980 | } |
8981 | } |
8982 | |
8983 | /// If \p DescribedReg overlaps with the MOVrr instruction's destination |
8984 | /// register then, if possible, describe the value in terms of the source |
8985 | /// register. |
8986 | static Optional<ParamLoadedValue> |
8987 | describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, |
8988 | const TargetRegisterInfo *TRI) { |
8989 | Register DestReg = MI.getOperand(0).getReg(); |
8990 | Register SrcReg = MI.getOperand(1).getReg(); |
8991 | |
8992 | auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {}); |
8993 | |
8994 | // If the described register is the destination, just return the source. |
8995 | if (DestReg == DescribedReg) |
8996 | return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); |
8997 | |
8998 | // If the described register is a sub-register of the destination register, |
8999 | // then pick out the source register's corresponding sub-register. |
9000 | if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) { |
9001 | Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx); |
9002 | return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr); |
9003 | } |
9004 | |
9005 | // The remaining case to consider is when the described register is a |
9006 | // super-register of the destination register. MOV8rr and MOV16rr does not |
9007 | // write to any of the other bytes in the register, meaning that we'd have to |
9008 | // describe the value using a combination of the source register and the |
9009 | // non-overlapping bits in the described register, which is not currently |
9010 | // possible. |
9011 | if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr || |
9012 | !TRI->isSuperRegister(DestReg, DescribedReg)) |
9013 | return None; |
9014 | |
9015 | assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case")(static_cast <bool> (MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case") ? void (0) : __assert_fail ("MI.getOpcode() == X86::MOV32rr && \"Unexpected super-register case\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9015, __extension__ __PRETTY_FUNCTION__)); |
9016 | return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); |
9017 | } |
9018 | |
9019 | Optional<ParamLoadedValue> |
9020 | X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { |
9021 | const MachineOperand *Op = nullptr; |
9022 | DIExpression *Expr = nullptr; |
9023 | |
9024 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
9025 | |
9026 | switch (MI.getOpcode()) { |
9027 | case X86::LEA32r: |
9028 | case X86::LEA64r: |
9029 | case X86::LEA64_32r: { |
9030 | // We may need to describe a 64-bit parameter with a 32-bit LEA. |
9031 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
9032 | return None; |
9033 | |
9034 | // Operand 4 could be global address. For now we do not support |
9035 | // such situation. |
9036 | if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm()) |
9037 | return None; |
9038 | |
9039 | const MachineOperand &Op1 = MI.getOperand(1); |
9040 | const MachineOperand &Op2 = MI.getOperand(3); |
9041 | assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||(static_cast <bool> (Op2.isReg() && (Op2.getReg () == X86::NoRegister || Register::isPhysicalRegister(Op2.getReg ()))) ? void (0) : __assert_fail ("Op2.isReg() && (Op2.getReg() == X86::NoRegister || Register::isPhysicalRegister(Op2.getReg()))" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9042, __extension__ __PRETTY_FUNCTION__)) |
9042 | Register::isPhysicalRegister(Op2.getReg())))(static_cast <bool> (Op2.isReg() && (Op2.getReg () == X86::NoRegister || Register::isPhysicalRegister(Op2.getReg ()))) ? void (0) : __assert_fail ("Op2.isReg() && (Op2.getReg() == X86::NoRegister || Register::isPhysicalRegister(Op2.getReg()))" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9042, __extension__ __PRETTY_FUNCTION__)); |
9043 | |
9044 | // Omit situations like: |
9045 | // %rsi = lea %rsi, 4, ... |
9046 | if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) || |
9047 | Op2.getReg() == MI.getOperand(0).getReg()) |
9048 | return None; |
9049 | else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister && |
9050 | TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) || |
9051 | (Op2.getReg() != X86::NoRegister && |
9052 | TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg()))) |
9053 | return None; |
9054 | |
9055 | int64_t Coef = MI.getOperand(2).getImm(); |
9056 | int64_t Offset = MI.getOperand(4).getImm(); |
9057 | SmallVector<uint64_t, 8> Ops; |
9058 | |
9059 | if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) { |
9060 | Op = &Op1; |
9061 | } else if (Op1.isFI()) |
9062 | Op = &Op1; |
9063 | |
9064 | if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) { |
9065 | Ops.push_back(dwarf::DW_OP_constu); |
9066 | Ops.push_back(Coef + 1); |
9067 | Ops.push_back(dwarf::DW_OP_mul); |
9068 | } else { |
9069 | if (Op && Op2.getReg() != X86::NoRegister) { |
9070 | int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false); |
9071 | if (dwarfReg < 0) |
9072 | return None; |
9073 | else if (dwarfReg < 32) { |
9074 | Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg); |
9075 | Ops.push_back(0); |
9076 | } else { |
9077 | Ops.push_back(dwarf::DW_OP_bregx); |
9078 | Ops.push_back(dwarfReg); |
9079 | Ops.push_back(0); |
9080 | } |
9081 | } else if (!Op) { |
9082 | assert(Op2.getReg() != X86::NoRegister)(static_cast <bool> (Op2.getReg() != X86::NoRegister) ? void (0) : __assert_fail ("Op2.getReg() != X86::NoRegister", "llvm/lib/Target/X86/X86InstrInfo.cpp", 9082, __extension__ __PRETTY_FUNCTION__ )); |
9083 | Op = &Op2; |
9084 | } |
9085 | |
9086 | if (Coef > 1) { |
9087 | assert(Op2.getReg() != X86::NoRegister)(static_cast <bool> (Op2.getReg() != X86::NoRegister) ? void (0) : __assert_fail ("Op2.getReg() != X86::NoRegister", "llvm/lib/Target/X86/X86InstrInfo.cpp", 9087, __extension__ __PRETTY_FUNCTION__ )); |
9088 | Ops.push_back(dwarf::DW_OP_constu); |
9089 | Ops.push_back(Coef); |
9090 | Ops.push_back(dwarf::DW_OP_mul); |
9091 | } |
9092 | |
9093 | if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) && |
9094 | Op2.getReg() != X86::NoRegister) { |
9095 | Ops.push_back(dwarf::DW_OP_plus); |
9096 | } |
9097 | } |
9098 | |
9099 | DIExpression::appendOffset(Ops, Offset); |
9100 | Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops); |
9101 | |
9102 | return ParamLoadedValue(*Op, Expr);; |
9103 | } |
9104 | case X86::MOV8ri: |
9105 | case X86::MOV16ri: |
9106 | // TODO: Handle MOV8ri and MOV16ri. |
9107 | return None; |
9108 | case X86::MOV32ri: |
9109 | case X86::MOV64ri: |
9110 | case X86::MOV64ri32: |
9111 | // MOV32ri may be used for producing zero-extended 32-bit immediates in |
9112 | // 64-bit parameters, so we need to consider super-registers. |
9113 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
9114 | return None; |
9115 | return ParamLoadedValue(MI.getOperand(1), Expr); |
9116 | case X86::MOV8rr: |
9117 | case X86::MOV16rr: |
9118 | case X86::MOV32rr: |
9119 | case X86::MOV64rr: |
9120 | return describeMOVrrLoadedValue(MI, Reg, TRI); |
9121 | case X86::XOR32rr: { |
9122 | // 64-bit parameters are zero-materialized using XOR32rr, so also consider |
9123 | // super-registers. |
9124 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
9125 | return None; |
9126 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) |
9127 | return ParamLoadedValue(MachineOperand::CreateImm(0), Expr); |
9128 | return None; |
9129 | } |
9130 | case X86::MOVSX64rr32: { |
9131 | // We may need to describe the lower 32 bits of the MOVSX; for example, in |
9132 | // cases like this: |
9133 | // |
9134 | // $ebx = [...] |
9135 | // $rdi = MOVSX64rr32 $ebx |
9136 | // $esi = MOV32rr $edi |
9137 | if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg)) |
9138 | return None; |
9139 | |
9140 | Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {}); |
9141 | |
9142 | // If the described register is the destination register we need to |
9143 | // sign-extend the source register from 32 bits. The other case we handle |
9144 | // is when the described register is the 32-bit sub-register of the |
9145 | // destination register, in case we just need to return the source |
9146 | // register. |
9147 | if (Reg == MI.getOperand(0).getReg()) |
9148 | Expr = DIExpression::appendExt(Expr, 32, 64, true); |
9149 | else |
9150 | assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&(static_cast <bool> (X86MCRegisterClasses[X86::GR32RegClassID ].contains(Reg) && "Unhandled sub-register case for MOVSX64rr32" ) ? void (0) : __assert_fail ("X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) && \"Unhandled sub-register case for MOVSX64rr32\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9151, __extension__ __PRETTY_FUNCTION__)) |
9151 | "Unhandled sub-register case for MOVSX64rr32")(static_cast <bool> (X86MCRegisterClasses[X86::GR32RegClassID ].contains(Reg) && "Unhandled sub-register case for MOVSX64rr32" ) ? void (0) : __assert_fail ("X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) && \"Unhandled sub-register case for MOVSX64rr32\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9151, __extension__ __PRETTY_FUNCTION__)); |
9152 | |
9153 | return ParamLoadedValue(MI.getOperand(1), Expr); |
9154 | } |
9155 | default: |
9156 | assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction")(static_cast <bool> (!MI.isMoveImmediate() && "Unexpected MoveImm instruction" ) ? void (0) : __assert_fail ("!MI.isMoveImmediate() && \"Unexpected MoveImm instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9156, __extension__ __PRETTY_FUNCTION__)); |
9157 | return TargetInstrInfo::describeLoadedValue(MI, Reg); |
9158 | } |
9159 | } |
9160 | |
9161 | /// This is an architecture-specific helper function of reassociateOps. |
9162 | /// Set special operand attributes for new instructions after reassociation. |
9163 | void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, |
9164 | MachineInstr &OldMI2, |
9165 | MachineInstr &NewMI1, |
9166 | MachineInstr &NewMI2) const { |
9167 | // Propagate FP flags from the original instructions. |
9168 | // But clear poison-generating flags because those may not be valid now. |
9169 | // TODO: There should be a helper function for copying only fast-math-flags. |
9170 | uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags(); |
9171 | NewMI1.setFlags(IntersectedFlags); |
9172 | NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap); |
9173 | NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap); |
9174 | NewMI1.clearFlag(MachineInstr::MIFlag::IsExact); |
9175 | |
9176 | NewMI2.setFlags(IntersectedFlags); |
9177 | NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap); |
9178 | NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap); |
9179 | NewMI2.clearFlag(MachineInstr::MIFlag::IsExact); |
9180 | |
9181 | // Integer instructions may define an implicit EFLAGS dest register operand. |
9182 | MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS); |
9183 | MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS); |
9184 | |
9185 | assert(!OldFlagDef1 == !OldFlagDef2 &&(static_cast <bool> (!OldFlagDef1 == !OldFlagDef2 && "Unexpected instruction type for reassociation") ? void (0) : __assert_fail ("!OldFlagDef1 == !OldFlagDef2 && \"Unexpected instruction type for reassociation\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9186, __extension__ __PRETTY_FUNCTION__)) |
9186 | "Unexpected instruction type for reassociation")(static_cast <bool> (!OldFlagDef1 == !OldFlagDef2 && "Unexpected instruction type for reassociation") ? void (0) : __assert_fail ("!OldFlagDef1 == !OldFlagDef2 && \"Unexpected instruction type for reassociation\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9186, __extension__ __PRETTY_FUNCTION__)); |
9187 | |
9188 | if (!OldFlagDef1 || !OldFlagDef2) |
9189 | return; |
9190 | |
9191 | assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&(static_cast <bool> (OldFlagDef1->isDead() && OldFlagDef2->isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? void (0) : __assert_fail ("OldFlagDef1->isDead() && OldFlagDef2->isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9192, __extension__ __PRETTY_FUNCTION__)) |
9192 | "Must have dead EFLAGS operand in reassociable instruction")(static_cast <bool> (OldFlagDef1->isDead() && OldFlagDef2->isDead() && "Must have dead EFLAGS operand in reassociable instruction" ) ? void (0) : __assert_fail ("OldFlagDef1->isDead() && OldFlagDef2->isDead() && \"Must have dead EFLAGS operand in reassociable instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9192, __extension__ __PRETTY_FUNCTION__)); |
9193 | |
9194 | MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS); |
9195 | MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS); |
9196 | |
9197 | assert(NewFlagDef1 && NewFlagDef2 &&(static_cast <bool> (NewFlagDef1 && NewFlagDef2 && "Unexpected operand in reassociable instruction") ? void (0) : __assert_fail ("NewFlagDef1 && NewFlagDef2 && \"Unexpected operand in reassociable instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9198, __extension__ __PRETTY_FUNCTION__)) |
9198 | "Unexpected operand in reassociable instruction")(static_cast <bool> (NewFlagDef1 && NewFlagDef2 && "Unexpected operand in reassociable instruction") ? void (0) : __assert_fail ("NewFlagDef1 && NewFlagDef2 && \"Unexpected operand in reassociable instruction\"" , "llvm/lib/Target/X86/X86InstrInfo.cpp", 9198, __extension__ __PRETTY_FUNCTION__)); |
9199 | |
9200 | // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations |
9201 | // of this pass or other passes. The EFLAGS operands must be dead in these new |
9202 | // instructions because the EFLAGS operands in the original instructions must |
9203 | // be dead in order for reassociation to occur. |
9204 | NewFlagDef1->setIsDead(); |
9205 | NewFlagDef2->setIsDead(); |
9206 | } |
9207 | |
9208 | std::pair<unsigned, unsigned> |
9209 | X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { |
9210 | return std::make_pair(TF, 0u); |
9211 | } |
9212 | |
9213 | ArrayRef<std::pair<unsigned, const char *>> |
9214 | X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { |
9215 | using namespace X86II; |
9216 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
9217 | {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, |
9218 | {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, |
9219 | {MO_GOT, "x86-got"}, |
9220 | {MO_GOTOFF, "x86-gotoff"}, |
9221 | {MO_GOTPCREL, "x86-gotpcrel"}, |
9222 | {MO_GOTPCREL_NORELAX, "x86-gotpcrel-norelax"}, |
9223 | {MO_PLT, "x86-plt"}, |
9224 | {MO_TLSGD, "x86-tlsgd"}, |
9225 | {MO_TLSLD, "x86-tlsld"}, |
9226 | {MO_TLSLDM, "x86-tlsldm"}, |
9227 | {MO_GOTTPOFF, "x86-gottpoff"}, |
9228 | {MO_INDNTPOFF, "x86-indntpoff"}, |
9229 | {MO_TPOFF, "x86-tpoff"}, |
9230 | {MO_DTPOFF, "x86-dtpoff"}, |
9231 | {MO_NTPOFF, "x86-ntpoff"}, |
9232 | {MO_GOTNTPOFF, "x86-gotntpoff"}, |
9233 | {MO_DLLIMPORT, "x86-dllimport"}, |
9234 | {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, |
9235 | {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, |
9236 | {MO_TLVP, "x86-tlvp"}, |
9237 | {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, |
9238 | {MO_SECREL, "x86-secrel"}, |
9239 | {MO_COFFSTUB, "x86-coffstub"}}; |
9240 | return makeArrayRef(TargetFlags); |
9241 | } |
9242 | |
9243 | namespace { |
9244 | /// Create Global Base Reg pass. This initializes the PIC |
9245 | /// global base register for x86-32. |
9246 | struct CGBR : public MachineFunctionPass { |
9247 | static char ID; |
9248 | CGBR() : MachineFunctionPass(ID) {} |
9249 | |
9250 | bool runOnMachineFunction(MachineFunction &MF) override { |
9251 | const X86TargetMachine *TM = |
9252 | static_cast<const X86TargetMachine *>(&MF.getTarget()); |
9253 | const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); |
9254 | |
9255 | // Don't do anything in the 64-bit small and kernel code models. They use |
9256 | // RIP-relative addressing for everything. |
9257 | if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || |
9258 | TM->getCodeModel() == CodeModel::Kernel)) |
9259 | return false; |
9260 | |
9261 | // Only emit a global base reg in PIC mode. |
9262 | if (!TM->isPositionIndependent()) |
9263 | return false; |
9264 | |
9265 | X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
9266 | Register GlobalBaseReg = X86FI->getGlobalBaseReg(); |
9267 | |
9268 | // If we didn't need a GlobalBaseReg, don't insert code. |
9269 | if (GlobalBaseReg == 0) |
9270 | return false; |
9271 | |
9272 | // Insert the set of GlobalBaseReg into the first MBB of the function |
9273 | MachineBasicBlock &FirstMBB = MF.front(); |
9274 | MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
9275 | DebugLoc DL = FirstMBB.findDebugLoc(MBBI); |
9276 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
9277 | const X86InstrInfo *TII = STI.getInstrInfo(); |
9278 | |
9279 | Register PC; |
9280 | if (STI.isPICStyleGOT()) |
9281 | PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
9282 | else |
9283 | PC = GlobalBaseReg; |
9284 | |
9285 | if (STI.is64Bit()) { |
9286 | if (TM->getCodeModel() == CodeModel::Medium) { |
9287 | // In the medium code model, use a RIP-relative LEA to materialize the |
9288 | // GOT. |
9289 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) |
9290 | .addReg(X86::RIP) |
9291 | .addImm(0) |
9292 | .addReg(0) |
9293 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") |
9294 | .addReg(0); |
9295 | } else if (TM->getCodeModel() == CodeModel::Large) { |
9296 | // In the large code model, we are aiming for this code, though the |
9297 | // register allocation may vary: |
9298 | // leaq .LN$pb(%rip), %rax |
9299 | // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx |
9300 | // addq %rcx, %rax |
9301 | // RAX now holds address of _GLOBAL_OFFSET_TABLE_. |
9302 | Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); |
9303 | Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); |
9304 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg) |
9305 | .addReg(X86::RIP) |
9306 | .addImm(0) |
9307 | .addReg(0) |
9308 | .addSym(MF.getPICBaseSymbol()) |
9309 | .addReg(0); |
9310 | std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol()); |
9311 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg) |
9312 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
9313 | X86II::MO_PIC_BASE_OFFSET); |
9314 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC) |
9315 | .addReg(PBReg, RegState::Kill) |
9316 | .addReg(GOTReg, RegState::Kill); |
9317 | } else { |
9318 | llvm_unreachable("unexpected code model")::llvm::llvm_unreachable_internal("unexpected code model", "llvm/lib/Target/X86/X86InstrInfo.cpp" , 9318); |
9319 | } |
9320 | } else { |
9321 | // Operand of MovePCtoStack is completely ignored by asm printer. It's |
9322 | // only used in JIT code emission as displacement to pc. |
9323 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); |
9324 | |
9325 | // If we're using vanilla 'GOT' PIC style, we should use relative |
9326 | // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. |
9327 | if (STI.isPICStyleGOT()) { |
9328 | // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], |
9329 | // %some_register |
9330 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) |
9331 | .addReg(PC) |
9332 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
9333 | X86II::MO_GOT_ABSOLUTE_ADDRESS); |
9334 | } |
9335 | } |
9336 | |
9337 | return true; |
9338 | } |
9339 | |
9340 | StringRef getPassName() const override { |
9341 | return "X86 PIC Global Base Reg Initialization"; |
9342 | } |
9343 | |
9344 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
9345 | AU.setPreservesCFG(); |
9346 | MachineFunctionPass::getAnalysisUsage(AU); |
9347 | } |
9348 | }; |
9349 | } // namespace |
9350 | |
9351 | char CGBR::ID = 0; |
9352 | FunctionPass* |
9353 | llvm::createX86GlobalBaseRegPass() { return new CGBR(); } |
9354 | |
9355 | namespace { |
9356 | struct LDTLSCleanup : public MachineFunctionPass { |
9357 | static char ID; |
9358 | LDTLSCleanup() : MachineFunctionPass(ID) {} |
9359 | |
9360 | bool runOnMachineFunction(MachineFunction &MF) override { |
9361 | if (skipFunction(MF.getFunction())) |
9362 | return false; |
9363 | |
9364 | X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
9365 | if (MFI->getNumLocalDynamicTLSAccesses() < 2) { |
9366 | // No point folding accesses if there isn't at least two. |
9367 | return false; |
9368 | } |
9369 | |
9370 | MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); |
9371 | return VisitNode(DT->getRootNode(), 0); |
9372 | } |
9373 | |
9374 | // Visit the dominator subtree rooted at Node in pre-order. |
9375 | // If TLSBaseAddrReg is non-null, then use that to replace any |
9376 | // TLS_base_addr instructions. Otherwise, create the register |
9377 | // when the first such instruction is seen, and then use it |
9378 | // as we encounter more instructions. |
9379 | bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { |
9380 | MachineBasicBlock *BB = Node->getBlock(); |
9381 | bool Changed = false; |
9382 | |
9383 | // Traverse the current block. |
9384 | for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; |
9385 | ++I) { |
9386 | switch (I->getOpcode()) { |
9387 | case X86::TLS_base_addr32: |
9388 | case X86::TLS_base_addr64: |
9389 | if (TLSBaseAddrReg) |
9390 | I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg); |
9391 | else |
9392 | I = SetRegister(*I, &TLSBaseAddrReg); |
9393 | Changed = true; |
9394 | break; |
9395 | default: |
9396 | break; |
9397 | } |
9398 | } |
9399 | |
9400 | // Visit the children of this block in the dominator tree. |
9401 | for (auto &I : *Node) { |
9402 | Changed |= VisitNode(I, TLSBaseAddrReg); |
9403 | } |
9404 | |
9405 | return Changed; |
9406 | } |
9407 | |
9408 | // Replace the TLS_base_addr instruction I with a copy from |
9409 | // TLSBaseAddrReg, returning the new instruction. |
9410 | MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I, |
9411 | unsigned TLSBaseAddrReg) { |
9412 | MachineFunction *MF = I.getParent()->getParent(); |
9413 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
9414 | const bool is64Bit = STI.is64Bit(); |
9415 | const X86InstrInfo *TII = STI.getInstrInfo(); |
9416 | |
9417 | // Insert a Copy from TLSBaseAddrReg to RAX/EAX. |
9418 | MachineInstr *Copy = |
9419 | BuildMI(*I.getParent(), I, I.getDebugLoc(), |
9420 | TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX) |
9421 | .addReg(TLSBaseAddrReg); |
9422 | |
9423 | // Erase the TLS_base_addr instruction. |
9424 | I.eraseFromParent(); |
9425 | |
9426 | return Copy; |
9427 | } |
9428 | |
9429 | // Create a virtual register in *TLSBaseAddrReg, and populate it by |
9430 | // inserting a copy instruction after I. Returns the new instruction. |
9431 | MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { |
9432 | MachineFunction *MF = I.getParent()->getParent(); |
9433 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
9434 | const bool is64Bit = STI.is64Bit(); |
9435 | const X86InstrInfo *TII = STI.getInstrInfo(); |
9436 | |
9437 | // Create a virtual register for the TLS base address. |
9438 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
9439 | *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit |
9440 | ? &X86::GR64RegClass |
9441 | : &X86::GR32RegClass); |
9442 | |
9443 | // Insert a copy from RAX/EAX to TLSBaseAddrReg. |
9444 | MachineInstr *Next = I.getNextNode(); |
9445 | MachineInstr *Copy = |
9446 | BuildMI(*I.getParent(), Next, I.getDebugLoc(), |
9447 | TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) |
9448 | .addReg(is64Bit ? X86::RAX : X86::EAX); |
9449 | |
9450 | return Copy; |
9451 | } |
9452 | |
9453 | StringRef getPassName() const override { |
9454 | return "Local Dynamic TLS Access Clean-up"; |
9455 | } |
9456 | |
9457 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
9458 | AU.setPreservesCFG(); |
9459 | AU.addRequired<MachineDominatorTree>(); |
9460 | MachineFunctionPass::getAnalysisUsage(AU); |
9461 | } |
9462 | }; |
9463 | } |
9464 | |
9465 | char LDTLSCleanup::ID = 0; |
9466 | FunctionPass* |
9467 | llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } |
9468 | |
9469 | /// Constants defining how certain sequences should be outlined. |
9470 | /// |
9471 | /// \p MachineOutlinerDefault implies that the function is called with a call |
9472 | /// instruction, and a return must be emitted for the outlined function frame. |
9473 | /// |
9474 | /// That is, |
9475 | /// |
9476 | /// I1 OUTLINED_FUNCTION: |
9477 | /// I2 --> call OUTLINED_FUNCTION I1 |
9478 | /// I3 I2 |
9479 | /// I3 |
9480 | /// ret |
9481 | /// |
9482 | /// * Call construction overhead: 1 (call instruction) |
9483 | /// * Frame construction overhead: 1 (return instruction) |
9484 | /// |
9485 | /// \p MachineOutlinerTailCall implies that the function is being tail called. |
9486 | /// A jump is emitted instead of a call, and the return is already present in |
9487 | /// the outlined sequence. That is, |
9488 | /// |
9489 | /// I1 OUTLINED_FUNCTION: |
9490 | /// I2 --> jmp OUTLINED_FUNCTION I1 |
9491 | /// ret I2 |
9492 | /// ret |
9493 | /// |
9494 | /// * Call construction overhead: 1 (jump instruction) |
9495 | /// * Frame construction overhead: 0 (don't need to return) |
9496 | /// |
9497 | enum MachineOutlinerClass { |
9498 | MachineOutlinerDefault, |
9499 | MachineOutlinerTailCall |
9500 | }; |
9501 | |
9502 | outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo( |
9503 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { |
9504 | unsigned SequenceSize = |
9505 | std::accumulate(RepeatedSequenceLocs[0].front(), |
9506 | std::next(RepeatedSequenceLocs[0].back()), 0, |
9507 | [](unsigned Sum, const MachineInstr &MI) { |
9508 | // FIXME: x86 doesn't implement getInstSizeInBytes, so |
9509 | // we can't tell the cost. Just assume each instruction |
9510 | // is one byte. |
9511 | if (MI.isDebugInstr() || MI.isKill()) |
9512 | return Sum; |
9513 | return Sum + 1; |
9514 | }); |
9515 | |
9516 | // We check to see if CFI Instructions are present, and if they are |
9517 | // we find the number of CFI Instructions in the candidates. |
9518 | unsigned CFICount = 0; |
9519 | for (auto &I : make_range(RepeatedSequenceLocs[0].front(), |
9520 | std::next(RepeatedSequenceLocs[0].back()))) { |
9521 | if (I.isCFIInstruction()) |
9522 | CFICount++; |
9523 | } |
9524 | |
9525 | // We compare the number of found CFI Instructions to the number of CFI |
9526 | // instructions in the parent function for each candidate. We must check this |
9527 | // since if we outline one of the CFI instructions in a function, we have to |
9528 | // outline them all for correctness. If we do not, the address offsets will be |
9529 | // incorrect between the two sections of the program. |
9530 | for (outliner::Candidate &C : RepeatedSequenceLocs) { |
9531 | std::vector<MCCFIInstruction> CFIInstructions = |
9532 | C.getMF()->getFrameInstructions(); |
9533 | |
9534 | if (CFICount > 0 && CFICount != CFIInstructions.size()) |
9535 | return outliner::OutlinedFunction(); |
9536 | } |
9537 | |
9538 | // FIXME: Use real size in bytes for call and ret instructions. |
9539 | if (RepeatedSequenceLocs[0].back()->isTerminator()) { |
9540 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
9541 | C.setCallInfo(MachineOutlinerTailCall, 1); |
9542 | |
9543 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, |
9544 | 0, // Number of bytes to emit frame. |
9545 | MachineOutlinerTailCall // Type of frame. |
9546 | ); |
9547 | } |
9548 | |
9549 | if (CFICount > 0) |
9550 | return outliner::OutlinedFunction(); |
9551 | |
9552 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
9553 | C.setCallInfo(MachineOutlinerDefault, 1); |
9554 | |
9555 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1, |
9556 | MachineOutlinerDefault); |
9557 | } |
9558 | |
9559 | bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, |
9560 | bool OutlineFromLinkOnceODRs) const { |
9561 | const Function &F = MF.getFunction(); |
9562 | |
9563 | // Does the function use a red zone? If it does, then we can't risk messing |
9564 | // with the stack. |
9565 | if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) { |
9566 | // It could have a red zone. If it does, then we don't want to touch it. |
9567 | const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
9568 | if (!X86FI || X86FI->getUsesRedZone()) |
9569 | return false; |
9570 | } |
9571 | |
9572 | // If we *don't* want to outline from things that could potentially be deduped |
9573 | // then return false. |
9574 | if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) |
9575 | return false; |
9576 | |
9577 | // This function is viable for outlining, so return true. |
9578 | return true; |
9579 | } |
9580 | |
9581 | outliner::InstrType |
9582 | X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { |
9583 | MachineInstr &MI = *MIT; |
9584 | // Don't allow debug values to impact outlining type. |
9585 | if (MI.isDebugInstr() || MI.isIndirectDebugValue()) |
9586 | return outliner::InstrType::Invisible; |
9587 | |
9588 | // At this point, KILL instructions don't really tell us much so we can go |
9589 | // ahead and skip over them. |
9590 | if (MI.isKill()) |
9591 | return outliner::InstrType::Invisible; |
9592 | |
9593 | // Is this a tail call? If yes, we can outline as a tail call. |
9594 | if (isTailCall(MI)) |
9595 | return outliner::InstrType::Legal; |
9596 | |
9597 | // Is this the terminator of a basic block? |
9598 | if (MI.isTerminator() || MI.isReturn()) { |
9599 | |
9600 | // Does its parent have any successors in its MachineFunction? |
9601 | if (MI.getParent()->succ_empty()) |
9602 | return outliner::InstrType::Legal; |
9603 | |
9604 | // It does, so we can't tail call it. |
9605 | return outliner::InstrType::Illegal; |
9606 | } |
9607 | |
9608 | // Don't outline anything that modifies or reads from the stack pointer. |
9609 | // |
9610 | // FIXME: There are instructions which are being manually built without |
9611 | // explicit uses/defs so we also have to check the MCInstrDesc. We should be |
9612 | // able to remove the extra checks once those are fixed up. For example, |
9613 | // sometimes we might get something like %rax = POP64r 1. This won't be |
9614 | // caught by modifiesRegister or readsRegister even though the instruction |
9615 | // really ought to be formed so that modifiesRegister/readsRegister would |
9616 | // catch it. |
9617 | if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) || |
9618 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) || |
9619 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP)) |
9620 | return outliner::InstrType::Illegal; |
9621 | |
9622 | // Outlined calls change the instruction pointer, so don't read from it. |
9623 | if (MI.readsRegister(X86::RIP, &RI) || |
9624 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) || |
9625 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP)) |
9626 | return outliner::InstrType::Illegal; |
9627 | |
9628 | // Positions can't safely be outlined. |
9629 | if (MI.isPosition()) |
9630 | return outliner::InstrType::Illegal; |
9631 | |
9632 | // Make sure none of the operands of this instruction do anything tricky. |
9633 | for (const MachineOperand &MOP : MI.operands()) |
9634 | if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || |
9635 | MOP.isTargetIndex()) |
9636 | return outliner::InstrType::Illegal; |
9637 | |
9638 | return outliner::InstrType::Legal; |
9639 | } |
9640 | |
9641 | void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB, |
9642 | MachineFunction &MF, |
9643 | const outliner::OutlinedFunction &OF) |
9644 | const { |
9645 | // If we're a tail call, we already have a return, so don't do anything. |
9646 | if (OF.FrameConstructionID == MachineOutlinerTailCall) |
9647 | return; |
9648 | |
9649 | // We're a normal call, so our sequence doesn't have a return instruction. |
9650 | // Add it in. |
9651 | MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RET64)); |
9652 | MBB.insert(MBB.end(), retq); |
9653 | } |
9654 | |
9655 | MachineBasicBlock::iterator |
9656 | X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB, |
9657 | MachineBasicBlock::iterator &It, |
9658 | MachineFunction &MF, |
9659 | outliner::Candidate &C) const { |
9660 | // Is it a tail call? |
9661 | if (C.CallConstructionID == MachineOutlinerTailCall) { |
9662 | // Yes, just insert a JMP. |
9663 | It = MBB.insert(It, |
9664 | BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64)) |
9665 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
9666 | } else { |
9667 | // No, insert a call. |
9668 | It = MBB.insert(It, |
9669 | BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32)) |
9670 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
9671 | } |
9672 | |
9673 | return It; |
9674 | } |
9675 | |
9676 | #define GET_INSTRINFO_HELPERS |
9677 | #include "X86GenInstrInfo.inc" |