Line data Source code
1 : //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file contains the implementation of the FastISel class.
11 : //
12 : // "Fast" instruction selection is designed to emit very poor code quickly.
13 : // Also, it is not designed to be able to do much lowering, so most illegal
14 : // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 : // also not intended to be able to do much optimization, except in a few cases
16 : // where doing optimizations reduces overall compile time. For example, folding
17 : // constants into immediate fields is often done, because it's cheap and it
18 : // reduces the number of instructions later phases have to examine.
19 : //
20 : // "Fast" instruction selection is able to fail gracefully and transfer
21 : // control to the SelectionDAG selector for operations that it doesn't
22 : // support. In many cases, this allows us to avoid duplicating a lot of
23 : // the complicated lowering logic that SelectionDAG currently has.
24 : //
25 : // The intended use for "fast" instruction selection is "-O0" mode
26 : // compilation, where the quality of the generated code is irrelevant when
27 : // weighed against the speed at which the code can be generated. Also,
28 : // at -O0, the LLVM optimizers are not running, and this makes the
29 : // compile time of codegen a much higher portion of the overall compile
30 : // time. Despite its limitations, "fast" instruction selection is able to
31 : // handle enough code on its own to provide noticeable overall speedups
32 : // in -O0 compiles.
33 : //
34 : // Basic operations are supported in a target-independent way, by reading
35 : // the same instruction descriptions that the SelectionDAG selector reads,
36 : // and identifying simple arithmetic operations that can be directly selected
37 : // from simple operators. More complicated operations currently require
38 : // target-specific code.
39 : //
40 : //===----------------------------------------------------------------------===//
41 :
42 : #include "llvm/CodeGen/FastISel.h"
43 : #include "llvm/ADT/APFloat.h"
44 : #include "llvm/ADT/APSInt.h"
45 : #include "llvm/ADT/DenseMap.h"
46 : #include "llvm/ADT/Optional.h"
47 : #include "llvm/ADT/SmallPtrSet.h"
48 : #include "llvm/ADT/SmallString.h"
49 : #include "llvm/ADT/SmallVector.h"
50 : #include "llvm/ADT/Statistic.h"
51 : #include "llvm/Analysis/BranchProbabilityInfo.h"
52 : #include "llvm/Analysis/TargetLibraryInfo.h"
53 : #include "llvm/CodeGen/Analysis.h"
54 : #include "llvm/CodeGen/FunctionLoweringInfo.h"
55 : #include "llvm/CodeGen/ISDOpcodes.h"
56 : #include "llvm/CodeGen/MachineBasicBlock.h"
57 : #include "llvm/CodeGen/MachineFrameInfo.h"
58 : #include "llvm/CodeGen/MachineInstr.h"
59 : #include "llvm/CodeGen/MachineInstrBuilder.h"
60 : #include "llvm/CodeGen/MachineMemOperand.h"
61 : #include "llvm/CodeGen/MachineModuleInfo.h"
62 : #include "llvm/CodeGen/MachineOperand.h"
63 : #include "llvm/CodeGen/MachineRegisterInfo.h"
64 : #include "llvm/CodeGen/StackMaps.h"
65 : #include "llvm/CodeGen/TargetInstrInfo.h"
66 : #include "llvm/CodeGen/TargetLowering.h"
67 : #include "llvm/CodeGen/TargetSubtargetInfo.h"
68 : #include "llvm/CodeGen/ValueTypes.h"
69 : #include "llvm/IR/Argument.h"
70 : #include "llvm/IR/Attributes.h"
71 : #include "llvm/IR/BasicBlock.h"
72 : #include "llvm/IR/CallSite.h"
73 : #include "llvm/IR/CallingConv.h"
74 : #include "llvm/IR/Constant.h"
75 : #include "llvm/IR/Constants.h"
76 : #include "llvm/IR/DataLayout.h"
77 : #include "llvm/IR/DebugInfo.h"
78 : #include "llvm/IR/DebugLoc.h"
79 : #include "llvm/IR/DerivedTypes.h"
80 : #include "llvm/IR/Function.h"
81 : #include "llvm/IR/GetElementPtrTypeIterator.h"
82 : #include "llvm/IR/GlobalValue.h"
83 : #include "llvm/IR/InlineAsm.h"
84 : #include "llvm/IR/InstrTypes.h"
85 : #include "llvm/IR/Instruction.h"
86 : #include "llvm/IR/Instructions.h"
87 : #include "llvm/IR/IntrinsicInst.h"
88 : #include "llvm/IR/LLVMContext.h"
89 : #include "llvm/IR/Mangler.h"
90 : #include "llvm/IR/Metadata.h"
91 : #include "llvm/IR/Operator.h"
92 : #include "llvm/IR/Type.h"
93 : #include "llvm/IR/User.h"
94 : #include "llvm/IR/Value.h"
95 : #include "llvm/MC/MCContext.h"
96 : #include "llvm/MC/MCInstrDesc.h"
97 : #include "llvm/MC/MCRegisterInfo.h"
98 : #include "llvm/Support/Casting.h"
99 : #include "llvm/Support/Debug.h"
100 : #include "llvm/Support/ErrorHandling.h"
101 : #include "llvm/Support/MachineValueType.h"
102 : #include "llvm/Support/MathExtras.h"
103 : #include "llvm/Support/raw_ostream.h"
104 : #include "llvm/Target/TargetMachine.h"
105 : #include "llvm/Target/TargetOptions.h"
106 : #include <algorithm>
107 : #include <cassert>
108 : #include <cstdint>
109 : #include <iterator>
110 : #include <utility>
111 :
112 : using namespace llvm;
113 :
114 : #define DEBUG_TYPE "isel"
115 :
116 : // FIXME: Remove this after the feature has proven reliable.
117 : static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values",
118 : cl::init(true), cl::Hidden,
119 : cl::desc("Sink local values in FastISel"));
120 :
121 : STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
122 : "target-independent selector");
123 : STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
124 : "target-specific selector");
125 : STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
126 :
127 : /// Set the current block to which generated machine instructions will be
128 : /// appended.
129 2807184 : void FastISel::startNewBlock() {
130 : assert(LocalValueMap.empty() &&
131 : "local values should be cleared after finishing a BB");
132 :
133 : // Instructions are appended to FuncInfo.MBB. If the basic block already
134 : // contains labels or copies, use the last instruction as the last local
135 : // value.
136 2807184 : EmitStartPt = nullptr;
137 5614368 : if (!FuncInfo.MBB->empty())
138 402016 : EmitStartPt = &FuncInfo.MBB->back();
139 2807184 : LastLocalValue = EmitStartPt;
140 2807184 : }
141 :
142 : /// Flush the local CSE map and sink anything we can.
143 2807181 : void FastISel::finishBasicBlock() { flushLocalValueMap(); }
144 :
145 212887 : bool FastISel::lowerArguments() {
146 212887 : if (!FuncInfo.CanLowerReturn)
147 : // Fallback to SDISel argument lowering code to deal with sret pointer
148 : // parameter.
149 : return false;
150 :
151 212770 : if (!fastLowerArguments())
152 : return false;
153 :
154 : // Enter arguments into ValueMap for uses in non-entry BBs.
155 194800 : for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
156 194800 : E = FuncInfo.Fn->arg_end();
157 491361 : I != E; ++I) {
158 296561 : DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
159 : assert(VI != LocalValueMap.end() && "Missed an argument?");
160 296561 : FuncInfo.ValueMap[&*I] = VI->second;
161 : }
162 : return true;
163 : }
164 :
165 : /// Return the defined register if this instruction defines exactly one
166 : /// virtual register and uses no other virtual registers. Otherwise return 0.
167 1781363 : static unsigned findSinkableLocalRegDef(MachineInstr &MI) {
168 : unsigned RegDef = 0;
169 8591047 : for (const MachineOperand &MO : MI.operands()) {
170 7011975 : if (!MO.isReg())
171 : continue;
172 3695592 : if (MO.isDef()) {
173 1874900 : if (RegDef)
174 : return 0;
175 1781363 : RegDef = MO.getReg();
176 3641384 : } else if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
177 : // This is another use of a vreg. Don't try to sink it.
178 : return 0;
179 : }
180 : }
181 : return RegDef;
182 : }
183 :
184 3935660 : void FastISel::flushLocalValueMap() {
185 : // Try to sink local values down to their first use so that we can give them a
186 : // better debug location. This has the side effect of shrinking local value
187 : // live ranges, which helps out fast regalloc.
188 3935660 : if (SinkLocalValues && LastLocalValue != EmitStartPt) {
189 : // Sink local value materialization instructions between EmitStartPt and
190 : // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
191 : // avoid inserting into the range that we're iterating over.
192 : MachineBasicBlock::reverse_iterator RE =
193 : EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
194 932757 : : FuncInfo.MBB->rend();
195 : MachineBasicBlock::reverse_iterator RI(LastLocalValue);
196 :
197 : InstOrderMap OrderMap;
198 2723229 : for (; RI != RE;) {
199 : MachineInstr &LocalMI = *RI;
200 : ++RI;
201 1790472 : bool Store = true;
202 1790472 : if (!LocalMI.isSafeToMove(nullptr, Store))
203 211400 : continue;
204 1781363 : unsigned DefReg = findSinkableLocalRegDef(LocalMI);
205 1781363 : if (DefReg == 0)
206 : continue;
207 :
208 1579072 : sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap);
209 : }
210 : }
211 :
212 3935660 : LocalValueMap.clear();
213 3935660 : LastLocalValue = EmitStartPt;
214 3935660 : recomputeInsertPt();
215 3935660 : SavedInsertPt = FuncInfo.InsertPt;
216 3935660 : LastFlushPoint = FuncInfo.InsertPt;
217 3935660 : }
218 :
219 : static bool isRegUsedByPhiNodes(unsigned DefReg,
220 : FunctionLoweringInfo &FuncInfo) {
221 1564473 : for (auto &P : FuncInfo.PHINodesToUpdate)
222 44166 : if (P.second == DefReg)
223 : return true;
224 : return false;
225 : }
226 :
227 : /// Build a map of instruction orders. Return the first terminator and its
228 : /// order. Consider EH_LABEL instructions to be terminators as well, since local
229 : /// values for phis after invokes must be materialized before the call.
230 885145 : void FastISel::InstOrderMap::initialize(
231 : MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) {
232 : unsigned Order = 0;
233 13658580 : for (MachineInstr &I : *MBB) {
234 26482050 : if (!FirstTerminator &&
235 12981387 : (I.isTerminator() || (I.isEHLabel() && &I != &MBB->front()))) {
236 255378 : FirstTerminator = &I;
237 255378 : FirstTerminatorOrder = Order;
238 : }
239 13245285 : Orders[&I] = Order++;
240 :
241 : // We don't need to order instructions past the last flush point.
242 26490570 : if (I.getIterator() == LastFlushPoint)
243 : break;
244 : }
245 885145 : }
246 :
247 1579072 : void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
248 : unsigned DefReg,
249 : InstOrderMap &OrderMap) {
250 : // If this register is used by a register fixup, MRI will not contain all
251 : // the uses until after register fixups, so don't attempt to sink or DCE
252 : // this instruction. Register fixups typically come from no-op cast
253 : // instructions, which replace the cast instruction vreg with the local
254 : // value vreg.
255 1579072 : if (FuncInfo.RegsWithFixups.count(DefReg))
256 55369 : return;
257 :
258 : // We can DCE this instruction if there are no uses and it wasn't a
259 : // materialized for a successor PHI node.
260 1529465 : bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
261 1529465 : if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
262 5762 : if (EmitStartPt == &LocalMI)
263 0 : EmitStartPt = EmitStartPt->getPrevNode();
264 : LLVM_DEBUG(dbgs() << "removing dead local value materialization "
265 : << LocalMI);
266 5762 : OrderMap.Orders.erase(&LocalMI);
267 5762 : LocalMI.eraseFromParent();
268 5762 : return;
269 : }
270 :
271 : // Number the instructions if we haven't yet so we can efficiently find the
272 : // earliest use.
273 1523703 : if (OrderMap.Orders.empty())
274 885145 : OrderMap.initialize(FuncInfo.MBB, LastFlushPoint);
275 :
276 : // Find the first user in the BB.
277 : MachineInstr *FirstUser = nullptr;
278 : unsigned FirstOrder = std::numeric_limits<unsigned>::max();
279 3081430 : for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) {
280 1557727 : auto I = OrderMap.Orders.find(&UseInst);
281 : assert(I != OrderMap.Orders.end() &&
282 : "local value used by instruction outside local region");
283 1557727 : unsigned UseOrder = I->second;
284 1557727 : if (UseOrder < FirstOrder) {
285 : FirstOrder = UseOrder;
286 : FirstUser = &UseInst;
287 : }
288 : }
289 :
290 : // The insertion point will be the first terminator or the first user,
291 : // whichever came first. If there was no terminator, this must be a
292 : // fallthrough block and the insertion point is the end of the block.
293 : MachineBasicBlock::instr_iterator SinkPos;
294 1523703 : if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) {
295 : FirstOrder = OrderMap.FirstTerminatorOrder;
296 9138 : SinkPos = OrderMap.FirstTerminator->getIterator();
297 1514565 : } else if (FirstUser) {
298 1514545 : SinkPos = FirstUser->getIterator();
299 : } else {
300 : assert(UsedByPHI && "must be users if not used by a phi");
301 20 : SinkPos = FuncInfo.MBB->instr_end();
302 : }
303 :
304 : // Collect all DBG_VALUEs before the new insertion position so that we can
305 : // sink them.
306 : SmallVector<MachineInstr *, 1> DbgValues;
307 3081441 : for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) {
308 1557738 : if (!DbgVal.isDebugValue())
309 : continue;
310 11 : unsigned UseOrder = OrderMap.Orders[&DbgVal];
311 11 : if (UseOrder < FirstOrder)
312 11 : DbgValues.push_back(&DbgVal);
313 : }
314 :
315 : // Sink LocalMI before SinkPos and assign it the same DebugLoc.
316 : LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
317 1523703 : FuncInfo.MBB->remove(&LocalMI);
318 1523703 : FuncInfo.MBB->insert(SinkPos, &LocalMI);
319 3047406 : if (SinkPos != FuncInfo.MBB->end())
320 1523683 : LocalMI.setDebugLoc(SinkPos->getDebugLoc());
321 :
322 : // Sink any debug values that we've collected.
323 1523714 : for (MachineInstr *DI : DbgValues) {
324 11 : FuncInfo.MBB->remove(DI);
325 11 : FuncInfo.MBB->insert(SinkPos, DI);
326 : }
327 : }
328 :
329 9223805 : bool FastISel::hasTrivialKill(const Value *V) {
330 : // Don't consider constants or arguments to have trivial kills.
331 : const Instruction *I = dyn_cast<Instruction>(V);
332 : if (!I)
333 : return false;
334 :
335 : // No-op casts are trivially coalesced by fast-isel.
336 : if (const auto *Cast = dyn_cast<CastInst>(I))
337 1852342 : if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
338 : return false;
339 :
340 : // Even the value might have only one use in the LLVM IR, it is possible that
341 : // FastISel might fold the use into another instruction and now there is more
342 : // than one use at the Machine Instruction level.
343 8129950 : unsigned Reg = lookUpRegForValue(V);
344 8129950 : if (Reg && !MRI.use_empty(Reg))
345 : return false;
346 :
347 : // GEPs with all zero indices are trivially coalesced by fast-isel.
348 : if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
349 1630966 : if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
350 : return false;
351 :
352 : // Only instructions with a single use in the same basic block are considered
353 : // to have trivial kills.
354 6507049 : return I->hasOneUse() &&
355 5958056 : !(I->getOpcode() == Instruction::BitCast ||
356 : I->getOpcode() == Instruction::PtrToInt ||
357 7385758 : I->getOpcode() == Instruction::IntToPtr) &&
358 5853397 : cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
359 : }
360 :
361 16092397 : unsigned FastISel::getRegForValue(const Value *V) {
362 16092397 : EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
363 : // Don't handle non-simple values in FastISel.
364 16092397 : if (!RealVT.isSimple())
365 : return 0;
366 :
367 : // Ignore illegal types. We must do this before looking up the value
368 : // in ValueMap because Arguments are given virtual registers regardless
369 : // of whether FastISel can handle them.
370 : MVT VT = RealVT.getSimpleVT();
371 16092310 : if (!TLI.isTypeLegal(VT)) {
372 : // Handle integer promotions, though, because they're common and easy.
373 228842 : if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
374 451476 : VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
375 : else
376 : return 0;
377 : }
378 :
379 : // Look up the value to see if we already have a register for it.
380 16089206 : unsigned Reg = lookUpRegForValue(V);
381 16089206 : if (Reg)
382 : return Reg;
383 :
384 : // In bottom-up mode, just create the virtual register which will be used
385 : // to hold the value. It will be materialized later.
386 10468319 : if (isa<Instruction>(V) &&
387 : (!isa<AllocaInst>(V) ||
388 1710788 : !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
389 8298630 : return FuncInfo.InitializeRegForValue(V);
390 :
391 2169689 : SavePoint SaveInsertPt = enterLocalValueArea();
392 :
393 : // Materialize the value in a register. Emit any instructions in the
394 : // local value area.
395 2169689 : Reg = materializeRegForValue(V, VT);
396 :
397 4339378 : leaveLocalValueArea(SaveInsertPt);
398 :
399 : return Reg;
400 : }
401 :
402 1343739 : unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
403 : unsigned Reg = 0;
404 : if (const auto *CI = dyn_cast<ConstantInt>(V)) {
405 121 : if (CI->getValue().getActiveBits() <= 64)
406 242 : Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
407 : } else if (isa<AllocaInst>(V))
408 855379 : Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
409 488239 : else if (isa<ConstantPointerNull>(V))
410 : // Translate this as an integer zero so that it can be
411 : // local-CSE'd with actual integer zeros.
412 13386 : Reg = getRegForValue(
413 13386 : Constant::getNullValue(DL.getIntPtrType(V->getContext())));
414 : else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
415 86 : if (CF->isNullValue())
416 17 : Reg = fastMaterializeFloatZero(CF);
417 : else
418 : // Try to emit the constant directly.
419 69 : Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
420 :
421 86 : if (!Reg) {
422 : // Try to emit the constant by using an integer constant with a cast.
423 : const APFloat &Flt = CF->getValueAPF();
424 82 : EVT IntVT = TLI.getPointerTy(DL);
425 82 : uint32_t IntBitWidth = IntVT.getSizeInBits();
426 82 : APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
427 : bool isExact;
428 82 : (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
429 82 : if (isExact) {
430 : unsigned IntegerReg =
431 17 : getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
432 17 : if (IntegerReg != 0)
433 17 : Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
434 17 : /*Kill=*/false);
435 : }
436 : }
437 : } else if (const auto *Op = dyn_cast<Operator>(V)) {
438 474250 : if (!selectOperator(Op, Op->getOpcode()))
439 277 : if (!isa<Instruction>(Op) ||
440 0 : !fastSelectInstruction(cast<Instruction>(Op)))
441 : return 0;
442 473973 : Reg = lookUpRegForValue(Op);
443 517 : } else if (isa<UndefValue>(V)) {
444 170 : Reg = createResultReg(TLI.getRegClassFor(VT));
445 170 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
446 340 : TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
447 : }
448 : return Reg;
449 : }
450 :
451 : /// Helper for getRegForValue. This function is called when the value isn't
452 : /// already available in a register and must be materialized with new
453 : /// instructions.
454 2169689 : unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
455 : unsigned Reg = 0;
456 : // Give the target-specific code a try first.
457 4339378 : if (isa<Constant>(V))
458 1314310 : Reg = fastMaterializeConstant(cast<Constant>(V));
459 :
460 : // If target-specific code couldn't or didn't want to handle the value, then
461 : // give target-independent code a try.
462 1314310 : if (!Reg)
463 1343739 : Reg = materializeConstant(V, VT);
464 :
465 : // Don't cache constant materializations in the general ValueMap.
466 : // To do so would require tracking what uses they dominate.
467 2169689 : if (Reg) {
468 2168983 : LocalValueMap[V] = Reg;
469 2168983 : LastLocalValue = MRI.getVRegDef(Reg);
470 : }
471 2169689 : return Reg;
472 : }
473 :
474 24693810 : unsigned FastISel::lookUpRegForValue(const Value *V) {
475 : // Look up the value to see if we already have a register for it. We
476 : // cache values defined by Instructions across blocks, and other values
477 : // only locally. This is because Instructions already have the SSA
478 : // def-dominates-use requirement enforced.
479 24693810 : DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
480 49387620 : if (I != FuncInfo.ValueMap.end())
481 11948331 : return I->second;
482 12745479 : return LocalValueMap[V];
483 : }
484 :
485 8990272 : void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
486 17980544 : if (!isa<Instruction>(I)) {
487 770534 : LocalValueMap[I] = Reg;
488 770534 : return;
489 : }
490 :
491 8219738 : unsigned &AssignedReg = FuncInfo.ValueMap[I];
492 8219738 : if (AssignedReg == 0)
493 : // Use the new register.
494 35776 : AssignedReg = Reg;
495 8183962 : else if (Reg != AssignedReg) {
496 : // Arrange for uses of AssignedReg to be replaced by uses of Reg.
497 16373025 : for (unsigned i = 0; i < NumRegs; i++) {
498 8189063 : FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
499 8189063 : FuncInfo.RegsWithFixups.insert(Reg + i);
500 : }
501 :
502 8183962 : AssignedReg = Reg;
503 : }
504 : }
505 :
506 99649 : std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
507 99649 : unsigned IdxN = getRegForValue(Idx);
508 99649 : if (IdxN == 0)
509 : // Unhandled operand. Halt "fast" selection and bail.
510 9 : return std::pair<unsigned, bool>(0, false);
511 :
512 99640 : bool IdxNIsKill = hasTrivialKill(Idx);
513 :
514 : // If the index is smaller or larger than intptr_t, truncate or extend it.
515 99640 : MVT PtrVT = TLI.getPointerTy(DL);
516 99640 : EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
517 99640 : if (IdxVT.bitsLT(PtrVT)) {
518 12 : IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
519 12 : IdxNIsKill);
520 : IdxNIsKill = true;
521 99628 : } else if (IdxVT.bitsGT(PtrVT)) {
522 : IdxN =
523 9 : fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
524 : IdxNIsKill = true;
525 : }
526 99640 : return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
527 : }
528 :
529 38446508 : void FastISel::recomputeInsertPt() {
530 38446508 : if (getLastLocalValue()) {
531 16486866 : FuncInfo.InsertPt = getLastLocalValue();
532 16486866 : FuncInfo.MBB = FuncInfo.InsertPt->getParent();
533 16486866 : ++FuncInfo.InsertPt;
534 : } else
535 21959642 : FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
536 :
537 : // Now skip past any EH_LABELs, which must remain at the beginning.
538 77076306 : while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
539 31195249 : FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
540 : ++FuncInfo.InsertPt;
541 38446508 : }
542 :
543 2223 : void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
544 : MachineBasicBlock::iterator E) {
545 : assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
546 : "Invalid iterator!");
547 5018 : while (I != E) {
548 : MachineInstr *Dead = &*I;
549 : ++I;
550 2795 : Dead->eraseFromParent();
551 : ++NumFastIselDead;
552 : }
553 2223 : recomputeInsertPt();
554 2223 : }
555 :
556 2169761 : FastISel::SavePoint FastISel::enterLocalValueArea() {
557 2169761 : MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
558 : DebugLoc OldDL = DbgLoc;
559 2169761 : recomputeInsertPt();
560 2169761 : DbgLoc = DebugLoc();
561 2169761 : SavePoint SP = {OldInsertPt, OldDL};
562 2169761 : return SP;
563 : }
564 :
565 2169761 : void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
566 4339522 : if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
567 2169565 : LastLocalValue = &*std::prev(FuncInfo.InsertPt);
568 :
569 : // Restore the previous insert position.
570 2169761 : FuncInfo.InsertPt = OldInsertPt.InsertPt;
571 : DbgLoc = OldInsertPt.DL;
572 2169761 : }
573 :
574 432689 : bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
575 432689 : EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
576 432689 : if (VT == MVT::Other || !VT.isSimple())
577 : // Unhandled type. Halt "fast" selection and bail.
578 : return false;
579 :
580 : // We only handle legal types. For example, on x86-32 the instruction
581 : // selector contains all of the 64-bit instructions from x86-64,
582 : // under the assumption that i64 won't be used if the target doesn't
583 : // support it.
584 432689 : if (!TLI.isTypeLegal(VT)) {
585 : // MVT::i1 is special. Allow AND, OR, or XOR because they
586 : // don't require additional zeroing, which makes them easy.
587 22024 : if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
588 : ISDOpcode == ISD::XOR))
589 44034 : VT = TLI.getTypeToTransformTo(I->getContext(), VT);
590 : else
591 : return false;
592 : }
593 :
594 : // Check if the first operand is a constant, and handle it as "ri". At -O0,
595 : // we don't have anything that canonicalizes operand order.
596 : if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
597 28976 : if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
598 12669 : unsigned Op1 = getRegForValue(I->getOperand(1));
599 12669 : if (!Op1)
600 : return false;
601 12669 : bool Op1IsKill = hasTrivialKill(I->getOperand(1));
602 :
603 : unsigned ResultReg =
604 12669 : fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
605 : CI->getZExtValue(), VT.getSimpleVT());
606 12669 : if (!ResultReg)
607 : return false;
608 :
609 : // We successfully emitted code for the given LLVM Instruction.
610 12669 : updateValueMap(I, ResultReg);
611 12669 : return true;
612 : }
613 :
614 419907 : unsigned Op0 = getRegForValue(I->getOperand(0));
615 419907 : if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
616 : return false;
617 419746 : bool Op0IsKill = hasTrivialKill(I->getOperand(0));
618 :
619 : // Check if the second operand is a constant and handle it appropriately.
620 : if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
621 265817 : uint64_t Imm = CI->getSExtValue();
622 :
623 : // Transform "sdiv exact X, 8" -> "sra X, 3".
624 49266 : if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
625 315083 : cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
626 33245 : Imm = Log2_64(Imm);
627 : ISDOpcode = ISD::SRA;
628 : }
629 :
630 : // Transform "urem x, pow2" -> "and x, pow2-1".
631 232572 : if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
632 : isPowerOf2_64(Imm)) {
633 : --Imm;
634 : ISDOpcode = ISD::AND;
635 : }
636 :
637 265817 : unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
638 : Op0IsKill, Imm, VT.getSimpleVT());
639 265817 : if (!ResultReg)
640 : return false;
641 :
642 : // We successfully emitted code for the given LLVM Instruction.
643 245183 : updateValueMap(I, ResultReg);
644 245183 : return true;
645 : }
646 :
647 153929 : unsigned Op1 = getRegForValue(I->getOperand(1));
648 153929 : if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
649 : return false;
650 153870 : bool Op1IsKill = hasTrivialKill(I->getOperand(1));
651 :
652 : // Now we have both operands in registers. Emit the instruction.
653 153870 : unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
654 153870 : ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
655 153870 : if (!ResultReg)
656 : // Target-specific code wasn't able to find a machine opcode for
657 : // the given ISD opcode and type. Halt "fast" selection and bail.
658 : return false;
659 :
660 : // We successfully emitted code for the given LLVM Instruction.
661 139777 : updateValueMap(I, ResultReg);
662 139777 : return true;
663 : }
664 :
665 1397006 : bool FastISel::selectGetElementPtr(const User *I) {
666 1397006 : unsigned N = getRegForValue(I->getOperand(0));
667 1397006 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
668 : return false;
669 1396946 : bool NIsKill = hasTrivialKill(I->getOperand(0));
670 :
671 : // Keep a running tab of the total offset to coalesce multiple N = N + Offset
672 : // into a single N = N + TotalOffset.
673 : uint64_t TotalOffs = 0;
674 : // FIXME: What's a good SWAG number for MaxOffs?
675 : uint64_t MaxOffs = 2048;
676 1396946 : MVT VT = TLI.getPointerTy(DL);
677 4022265 : for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
678 6647584 : GTI != E; ++GTI) {
679 : const Value *Idx = GTI.getOperand();
680 723463 : if (StructType *StTy = GTI.getStructTypeOrNull()) {
681 : uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
682 723463 : if (Field) {
683 : // N = N + Offset
684 198066 : TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
685 198066 : if (TotalOffs >= MaxOffs) {
686 452 : N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
687 452 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
688 17 : return false;
689 : NIsKill = true;
690 : TotalOffs = 0;
691 : }
692 : }
693 : } else {
694 1901873 : Type *Ty = GTI.getIndexedType();
695 :
696 : // If this is a constant subscript, handle it quickly.
697 : if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
698 1831329 : if (CI->isZero())
699 1831329 : continue;
700 : // N = N + Offset
701 272876 : uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
702 136438 : TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
703 136438 : if (TotalOffs >= MaxOffs) {
704 17001 : N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
705 17001 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
706 17 : return false;
707 : NIsKill = true;
708 : TotalOffs = 0;
709 : }
710 136438 : continue;
711 : }
712 70544 : if (TotalOffs) {
713 0 : N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
714 0 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
715 : return false;
716 : NIsKill = true;
717 : TotalOffs = 0;
718 : }
719 :
720 : // N = N + Idx * ElementSize;
721 70544 : uint64_t ElementSize = DL.getTypeAllocSize(Ty);
722 70544 : std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
723 70544 : unsigned IdxN = Pair.first;
724 70544 : bool IdxNIsKill = Pair.second;
725 70544 : if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
726 : return false;
727 :
728 70529 : if (ElementSize != 1) {
729 50610 : IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
730 50610 : if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
731 : return false;
732 : IdxNIsKill = true;
733 : }
734 70527 : N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
735 70527 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
736 : return false;
737 : }
738 : }
739 1396929 : if (TotalOffs) {
740 316606 : N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
741 316606 : if (!N) // Unhandled operand. Halt "fast" selection and bail.
742 : return false;
743 : }
744 :
745 : // We successfully emitted code for the given LLVM Instruction.
746 1396929 : updateValueMap(I, N);
747 1396929 : return true;
748 : }
749 :
750 65 : bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
751 : const CallInst *CI, unsigned StartIdx) {
752 249 : for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
753 : Value *Val = CI->getArgOperand(i);
754 : // Check for constants and encode them with a StackMaps::ConstantOp prefix.
755 : if (const auto *C = dyn_cast<ConstantInt>(Val)) {
756 36 : Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
757 18 : Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
758 101 : } else if (isa<ConstantPointerNull>(Val)) {
759 0 : Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
760 0 : Ops.push_back(MachineOperand::CreateImm(0));
761 : } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
762 : // Values coming from a stack location also require a special encoding,
763 : // but that is added later on by the target specific frame index
764 : // elimination implementation.
765 8 : auto SI = FuncInfo.StaticAllocaMap.find(AI);
766 16 : if (SI != FuncInfo.StaticAllocaMap.end())
767 16 : Ops.push_back(MachineOperand::CreateFI(SI->second));
768 : else
769 0 : return false;
770 : } else {
771 93 : unsigned Reg = getRegForValue(Val);
772 93 : if (!Reg)
773 : return false;
774 186 : Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
775 : }
776 : }
777 : return true;
778 : }
779 :
780 26 : bool FastISel::selectStackmap(const CallInst *I) {
781 : // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
782 : // [live variables...])
783 : assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
784 : "Stackmap cannot return a value.");
785 :
786 : // The stackmap intrinsic only records the live variables (the arguments
787 : // passed to it) and emits NOPS (if requested). Unlike the patchpoint
788 : // intrinsic, this won't be lowered to a function call. This means we don't
789 : // have to worry about calling conventions and target-specific lowering code.
790 : // Instead we perform the call lowering right here.
791 : //
792 : // CALLSEQ_START(0, 0...)
793 : // STACKMAP(id, nbytes, ...)
794 : // CALLSEQ_END(0, 0)
795 : //
796 : SmallVector<MachineOperand, 32> Ops;
797 :
798 : // Add the <id> and <numBytes> constants.
799 : assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
800 : "Expected a constant integer.");
801 26 : const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
802 52 : Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
803 :
804 : assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
805 : "Expected a constant integer.");
806 : const auto *NumBytes =
807 : cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
808 52 : Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
809 :
810 : // Push live variables for the stack map (skipping the first two arguments
811 : // <id> and <numBytes>).
812 26 : if (!addStackMapLiveVars(Ops, I, 2))
813 : return false;
814 :
815 : // We are not adding any register mask info here, because the stackmap doesn't
816 : // clobber anything.
817 :
818 : // Add scratch registers as implicit def and early clobber.
819 : CallingConv::ID CC = I->getCallingConv();
820 26 : const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
821 82 : for (unsigned i = 0; ScratchRegs[i]; ++i)
822 56 : Ops.push_back(MachineOperand::CreateReg(
823 : ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
824 56 : /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
825 :
826 : // Issue CALLSEQ_START
827 26 : unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
828 : auto Builder =
829 52 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
830 26 : const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
831 89 : for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
832 : Builder.addImm(0);
833 :
834 : // Issue STACKMAP.
835 26 : MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
836 52 : TII.get(TargetOpcode::STACKMAP));
837 231 : for (auto const &MO : Ops)
838 : MIB.add(MO);
839 :
840 : // Issue CALLSEQ_END
841 26 : unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
842 52 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
843 : .addImm(0)
844 : .addImm(0);
845 :
846 : // Inform the Frame Information that we have a stackmap in this function.
847 26 : FuncInfo.MF->getFrameInfo().setHasStackMap();
848 :
849 26 : return true;
850 : }
851 :
852 : /// Lower an argument list according to the target calling convention.
853 : ///
854 : /// This is a helper for lowering intrinsics that follow a target calling
855 : /// convention or require stack pointer adjustment. Only a subset of the
856 : /// intrinsic's operands need to participate in the calling convention.
857 39 : bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
858 : unsigned NumArgs, const Value *Callee,
859 : bool ForceRetVoidTy, CallLoweringInfo &CLI) {
860 : ArgListTy Args;
861 39 : Args.reserve(NumArgs);
862 :
863 : // Populate the argument list.
864 : ImmutableCallSite CS(CI);
865 154 : for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
866 115 : Value *V = CI->getOperand(ArgI);
867 :
868 : assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
869 :
870 : ArgListEntry Entry;
871 115 : Entry.Val = V;
872 115 : Entry.Ty = V->getType();
873 115 : Entry.setAttributes(&CS, ArgI);
874 115 : Args.push_back(Entry);
875 : }
876 :
877 39 : Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
878 38 : : CI->getType();
879 : CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
880 :
881 39 : return lowerCallTo(CLI);
882 : }
883 :
884 16 : FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
885 : const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
886 : StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
887 : SmallString<32> MangledName;
888 16 : Mangler::getNameWithPrefix(MangledName, Target, DL);
889 16 : MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
890 16 : return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
891 : }
892 :
893 39 : bool FastISel::selectPatchpoint(const CallInst *I) {
894 : // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
895 : // i32 <numBytes>,
896 : // i8* <target>,
897 : // i32 <numArgs>,
898 : // [Args...],
899 : // [live variables...])
900 : CallingConv::ID CC = I->getCallingConv();
901 39 : bool IsAnyRegCC = CC == CallingConv::AnyReg;
902 39 : bool HasDef = !I->getType()->isVoidTy();
903 39 : Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
904 :
905 : // Get the real number of arguments participating in the call <numArgs>
906 : assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
907 : "Expected a constant integer.");
908 : const auto *NumArgsVal =
909 : cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
910 39 : unsigned NumArgs = NumArgsVal->getZExtValue();
911 :
912 : // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
913 : // This includes all meta-operands up to but not including CC.
914 : unsigned NumMetaOpers = PatchPointOpers::CCPos;
915 : assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
916 : "Not enough arguments provided to the patchpoint intrinsic");
917 :
918 : // For AnyRegCC the arguments are lowered later on manually.
919 39 : unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
920 78 : CallLoweringInfo CLI;
921 : CLI.setIsPatchPoint();
922 39 : if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
923 : return false;
924 :
925 : assert(CLI.Call && "No call instruction specified.");
926 :
927 : SmallVector<MachineOperand, 32> Ops;
928 :
929 : // Add an explicit result reg if we use the anyreg calling convention.
930 39 : if (IsAnyRegCC && HasDef) {
931 : assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
932 0 : CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
933 0 : CLI.NumResultRegs = 1;
934 0 : Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
935 : }
936 :
937 : // Add the <id> and <numBytes> constants.
938 : assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
939 : "Expected a constant integer.");
940 : const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
941 78 : Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
942 :
943 : assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
944 : "Expected a constant integer.");
945 : const auto *NumBytes =
946 : cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
947 78 : Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
948 :
949 : // Add the call target.
950 : if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
951 : uint64_t CalleeConstAddr =
952 : cast<ConstantInt>(C->getOperand(0))->getZExtValue();
953 36 : Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
954 : } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
955 4 : if (C->getOpcode() == Instruction::IntToPtr) {
956 : uint64_t CalleeConstAddr =
957 : cast<ConstantInt>(C->getOperand(0))->getZExtValue();
958 8 : Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
959 : } else
960 0 : llvm_unreachable("Unsupported ConstantExpr.");
961 : } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
962 4 : Ops.push_back(MachineOperand::CreateGA(GV, 0));
963 13 : } else if (isa<ConstantPointerNull>(Callee))
964 13 : Ops.push_back(MachineOperand::CreateImm(0));
965 : else
966 0 : llvm_unreachable("Unsupported callee address.");
967 :
968 : // Adjust <numArgs> to account for any arguments that have been passed on
969 : // the stack instead.
970 39 : unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
971 78 : Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
972 :
973 : // Add the calling convention
974 78 : Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
975 :
976 : // Add the arguments we omitted previously. The register allocator should
977 : // place these in any free register.
978 39 : if (IsAnyRegCC) {
979 3 : for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
980 2 : unsigned Reg = getRegForValue(I->getArgOperand(i));
981 2 : if (!Reg)
982 : return false;
983 2 : Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
984 : }
985 : }
986 :
987 : // Push the arguments from the call instruction.
988 90 : for (auto Reg : CLI.OutRegs)
989 51 : Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
990 :
991 : // Push live variables for the stack map.
992 39 : if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
993 : return false;
994 :
995 : // Push the register mask info.
996 39 : Ops.push_back(MachineOperand::CreateRegMask(
997 39 : TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
998 :
999 : // Add scratch registers as implicit def and early clobber.
1000 39 : const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
1001 132 : for (unsigned i = 0; ScratchRegs[i]; ++i)
1002 93 : Ops.push_back(MachineOperand::CreateReg(
1003 : ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
1004 93 : /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
1005 :
1006 : // Add implicit defs (return values).
1007 60 : for (auto Reg : CLI.InRegs)
1008 21 : Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
1009 : /*IsImpl=*/true));
1010 :
1011 : // Insert the patchpoint instruction before the call generated by the target.
1012 78 : MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
1013 78 : TII.get(TargetOpcode::PATCHPOINT));
1014 :
1015 480 : for (auto &MO : Ops)
1016 : MIB.add(MO);
1017 :
1018 78 : MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1019 :
1020 : // Delete the original call instruction.
1021 39 : CLI.Call->eraseFromParent();
1022 :
1023 : // Inform the Frame Information that we have a patchpoint in this function.
1024 39 : FuncInfo.MF->getFrameInfo().setHasPatchPoint();
1025 :
1026 39 : if (CLI.NumResultRegs)
1027 21 : updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
1028 : return true;
1029 : }
1030 :
1031 0 : bool FastISel::selectXRayCustomEvent(const CallInst *I) {
1032 0 : const auto &Triple = TM.getTargetTriple();
1033 0 : if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1034 : return true; // don't do anything to this instruction.
1035 : SmallVector<MachineOperand, 8> Ops;
1036 0 : Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1037 0 : /*IsDef=*/false));
1038 0 : Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1039 0 : /*IsDef=*/false));
1040 : MachineInstrBuilder MIB =
1041 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1042 0 : TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
1043 0 : for (auto &MO : Ops)
1044 : MIB.add(MO);
1045 :
1046 : // Insert the Patchable Event Call instruction, that gets lowered properly.
1047 : return true;
1048 : }
1049 :
1050 0 : bool FastISel::selectXRayTypedEvent(const CallInst *I) {
1051 0 : const auto &Triple = TM.getTargetTriple();
1052 0 : if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1053 : return true; // don't do anything to this instruction.
1054 : SmallVector<MachineOperand, 8> Ops;
1055 0 : Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1056 0 : /*IsDef=*/false));
1057 0 : Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1058 0 : /*IsDef=*/false));
1059 0 : Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
1060 0 : /*IsDef=*/false));
1061 : MachineInstrBuilder MIB =
1062 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1063 0 : TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
1064 0 : for (auto &MO : Ops)
1065 : MIB.add(MO);
1066 :
1067 : // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
1068 : return true;
1069 : }
1070 :
1071 : /// Returns an AttributeList representing the attributes applied to the return
1072 : /// value of the given call.
1073 1172395 : static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
1074 : SmallVector<Attribute::AttrKind, 2> Attrs;
1075 1172395 : if (CLI.RetSExt)
1076 2547 : Attrs.push_back(Attribute::SExt);
1077 1172395 : if (CLI.RetZExt)
1078 38850 : Attrs.push_back(Attribute::ZExt);
1079 1172395 : if (CLI.IsInReg)
1080 0 : Attrs.push_back(Attribute::InReg);
1081 :
1082 1172395 : return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
1083 1172395 : Attrs);
1084 : }
1085 :
1086 44101 : bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
1087 : unsigned NumArgs) {
1088 44101 : MCContext &Ctx = MF->getContext();
1089 : SmallString<32> MangledName;
1090 88202 : Mangler::getNameWithPrefix(MangledName, SymName, DL);
1091 44101 : MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
1092 44101 : return lowerCallTo(CI, Sym, NumArgs);
1093 : }
1094 :
1095 44101 : bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
1096 : unsigned NumArgs) {
1097 : ImmutableCallSite CS(CI);
1098 :
1099 : FunctionType *FTy = CS.getFunctionType();
1100 : Type *RetTy = CS.getType();
1101 :
1102 : ArgListTy Args;
1103 44101 : Args.reserve(NumArgs);
1104 :
1105 : // Populate the argument list.
1106 : // Attributes for args start at offset 1, after the return attribute.
1107 176404 : for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
1108 132303 : Value *V = CI->getOperand(ArgI);
1109 :
1110 : assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
1111 :
1112 : ArgListEntry Entry;
1113 132303 : Entry.Val = V;
1114 132303 : Entry.Ty = V->getType();
1115 132303 : Entry.setAttributes(&CS, ArgI);
1116 132303 : Args.push_back(Entry);
1117 : }
1118 88202 : TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args);
1119 :
1120 88202 : CallLoweringInfo CLI;
1121 44101 : CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
1122 :
1123 44101 : return lowerCallTo(CLI);
1124 : }
1125 :
1126 1172395 : bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
1127 : // Handle the incoming return values from the call.
1128 : CLI.clearIns();
1129 : SmallVector<EVT, 4> RetTys;
1130 1172395 : ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1131 :
1132 : SmallVector<ISD::OutputArg, 4> Outs;
1133 1172395 : GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1134 :
1135 4689580 : bool CanLowerReturn = TLI.CanLowerReturn(
1136 1172395 : CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1137 :
1138 : // FIXME: sret demotion isn't supported yet - bail out.
1139 1172395 : if (!CanLowerReturn)
1140 : return false;
1141 :
1142 1489713 : for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1143 317322 : EVT VT = RetTys[I];
1144 317322 : MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1145 317322 : unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1146 634644 : for (unsigned i = 0; i != NumRegs; ++i) {
1147 : ISD::InputArg MyFlags;
1148 317322 : MyFlags.VT = RegisterVT;
1149 317322 : MyFlags.ArgVT = VT;
1150 317322 : MyFlags.Used = CLI.IsReturnValueUsed;
1151 317322 : if (CLI.RetSExt)
1152 : MyFlags.Flags.setSExt();
1153 317322 : if (CLI.RetZExt)
1154 : MyFlags.Flags.setZExt();
1155 317322 : if (CLI.IsInReg)
1156 : MyFlags.Flags.setInReg();
1157 317322 : CLI.Ins.push_back(MyFlags);
1158 : }
1159 : }
1160 :
1161 : // Handle all of the outgoing arguments.
1162 : CLI.clearOuts();
1163 3140897 : for (auto &Arg : CLI.getArgs()) {
1164 1968506 : Type *FinalType = Arg.Ty;
1165 1968506 : if (Arg.IsByVal)
1166 871 : FinalType = cast<PointerType>(Arg.Ty)->getElementType();
1167 3937012 : bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1168 1968506 : FinalType, CLI.CallConv, CLI.IsVarArg);
1169 :
1170 : ISD::ArgFlagsTy Flags;
1171 1968506 : if (Arg.IsZExt)
1172 : Flags.setZExt();
1173 1968506 : if (Arg.IsSExt)
1174 : Flags.setSExt();
1175 1968506 : if (Arg.IsInReg)
1176 : Flags.setInReg();
1177 1968506 : if (Arg.IsSRet)
1178 : Flags.setSRet();
1179 1968506 : if (Arg.IsSwiftSelf)
1180 : Flags.setSwiftSelf();
1181 1968506 : if (Arg.IsSwiftError)
1182 : Flags.setSwiftError();
1183 1968506 : if (Arg.IsByVal)
1184 : Flags.setByVal();
1185 1968506 : if (Arg.IsInAlloca) {
1186 : Flags.setInAlloca();
1187 : // Set the byval flag for CCAssignFn callbacks that don't know about
1188 : // inalloca. This way we can know how many bytes we should've allocated
1189 : // and how many bytes a callee cleanup function will pop. If we port
1190 : // inalloca to more targets, we'll have to add custom inalloca handling in
1191 : // the various CC lowering callbacks.
1192 : Flags.setByVal();
1193 : }
1194 1968506 : if (Arg.IsByVal || Arg.IsInAlloca) {
1195 877 : PointerType *Ty = cast<PointerType>(Arg.Ty);
1196 877 : Type *ElementTy = Ty->getElementType();
1197 877 : unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1198 : // For ByVal, alignment should come from FE. BE will guess if this info is
1199 : // not there, but there are cases it cannot get right.
1200 877 : unsigned FrameAlign = Arg.Alignment;
1201 877 : if (!FrameAlign)
1202 11 : FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1203 : Flags.setByValSize(FrameSize);
1204 : Flags.setByValAlign(FrameAlign);
1205 : }
1206 1968506 : if (Arg.IsNest)
1207 : Flags.setNest();
1208 1968506 : if (NeedsRegBlock)
1209 : Flags.setInConsecutiveRegs();
1210 1968506 : unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1211 : Flags.setOrigAlign(OriginalAlignment);
1212 :
1213 1968506 : CLI.OutVals.push_back(Arg.Val);
1214 1968506 : CLI.OutFlags.push_back(Flags);
1215 : }
1216 :
1217 1172391 : if (!fastLowerCall(CLI))
1218 : return false;
1219 :
1220 : // Set all unused physreg defs as dead.
1221 : assert(CLI.Call && "No call instruction specified.");
1222 2338350 : CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1223 :
1224 1169175 : if (CLI.NumResultRegs && CLI.CS)
1225 626504 : updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1226 :
1227 : return true;
1228 : }
1229 :
1230 1128239 : bool FastISel::lowerCall(const CallInst *CI) {
1231 : ImmutableCallSite CS(CI);
1232 :
1233 : FunctionType *FuncTy = CS.getFunctionType();
1234 : Type *RetTy = CS.getType();
1235 :
1236 : ArgListTy Args;
1237 : ArgListEntry Entry;
1238 1128239 : Args.reserve(CS.arg_size());
1239 :
1240 2964306 : for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1241 2964306 : i != e; ++i) {
1242 1836067 : Value *V = *i;
1243 :
1244 : // Skip empty types
1245 1836067 : if (V->getType()->isEmptyTy())
1246 : continue;
1247 :
1248 1836067 : Entry.Val = V;
1249 1836067 : Entry.Ty = V->getType();
1250 :
1251 : // Skip the first return-type Attribute to get to params.
1252 1836067 : Entry.setAttributes(&CS, i - CS.arg_begin());
1253 1836067 : Args.push_back(Entry);
1254 : }
1255 :
1256 : // Check if target-independent constraints permit a tail call here.
1257 : // Target-dependent constraints are checked within fastLowerCall.
1258 : bool IsTailCall = CI->isTailCall();
1259 1128239 : if (IsTailCall && !isInTailCallPosition(CS, TM))
1260 : IsTailCall = false;
1261 :
1262 2256478 : CallLoweringInfo CLI;
1263 1128239 : CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1264 : .setTailCall(IsTailCall);
1265 :
1266 1128239 : return lowerCallTo(CLI);
1267 : }
1268 :
1269 1416631 : bool FastISel::selectCall(const User *I) {
1270 : const CallInst *Call = cast<CallInst>(I);
1271 :
1272 : // Handle simple inline asms.
1273 : if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1274 : // If the inline asm has side effects, then make sure that no local value
1275 : // lives across by flushing the local value map.
1276 255 : if (IA->hasSideEffects())
1277 240 : flushLocalValueMap();
1278 :
1279 : // Don't attempt to handle constraints.
1280 255 : if (!IA->getConstraintString().empty())
1281 : return false;
1282 :
1283 : unsigned ExtraInfo = 0;
1284 10 : if (IA->hasSideEffects())
1285 : ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1286 10 : if (IA->isAlignStack())
1287 0 : ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1288 :
1289 10 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1290 20 : TII.get(TargetOpcode::INLINEASM))
1291 : .addExternalSymbol(IA->getAsmString().c_str())
1292 10 : .addImm(ExtraInfo);
1293 10 : return true;
1294 : }
1295 :
1296 1416376 : MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1297 1416376 : computeUsesVAFloatArgument(*Call, MMI);
1298 :
1299 : // Handle intrinsic function calls.
1300 : if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1301 288137 : return selectIntrinsicCall(II);
1302 :
1303 : // Usually, it does not make sense to initialize a value,
1304 : // make an unrelated function call and use the value, because
1305 : // it tends to be spilled on the stack. So, we move the pointer
1306 : // to the last local value to the beginning of the block, so that
1307 : // all the values which have already been materialized,
1308 : // appear after the call. It also makes sense to skip intrinsics
1309 : // since they tend to be inlined.
1310 1128239 : flushLocalValueMap();
1311 :
1312 1128239 : return lowerCall(Call);
1313 : }
1314 :
1315 288137 : bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1316 288137 : switch (II->getIntrinsicID()) {
1317 : default:
1318 : break;
1319 : // At -O0 we don't care about the lifetime intrinsics.
1320 : case Intrinsic::lifetime_start:
1321 : case Intrinsic::lifetime_end:
1322 : // The donothing intrinsic does, well, nothing.
1323 : case Intrinsic::donothing:
1324 : // Neither does the sideeffect intrinsic.
1325 : case Intrinsic::sideeffect:
1326 : // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1327 : case Intrinsic::assume:
1328 : return true;
1329 : case Intrinsic::dbg_declare: {
1330 : const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1331 : assert(DI->getVariable() && "Missing variable");
1332 411 : if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1333 : LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1334 : return true;
1335 : }
1336 :
1337 : const Value *Address = DI->getAddress();
1338 411 : if (!Address || isa<UndefValue>(Address)) {
1339 : LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1340 : return true;
1341 : }
1342 :
1343 : // Byval arguments with frame indices were already handled after argument
1344 : // lowering and before isel.
1345 : const auto *Arg =
1346 407 : dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1347 26 : if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1348 : return true;
1349 :
1350 : Optional<MachineOperand> Op;
1351 397 : if (unsigned Reg = lookUpRegForValue(Address))
1352 : Op = MachineOperand::CreateReg(Reg, false);
1353 :
1354 : // If we have a VLA that has a "use" in a metadata node that's then used
1355 : // here but it has no other uses, then we have a problem. E.g.,
1356 : //
1357 : // int foo (const int *x) {
1358 : // char a[*x];
1359 : // return 0;
1360 : // }
1361 : //
1362 : // If we assign 'a' a vreg and fast isel later on has to use the selection
1363 : // DAG isel, it will want to copy the value to the vreg. However, there are
1364 : // no uses, which goes counter to what selection DAG isel expects.
1365 397 : if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1366 : (!isa<AllocaInst>(Address) ||
1367 664 : !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1368 1 : Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1369 1 : false);
1370 :
1371 397 : if (Op) {
1372 : assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1373 : "Expected inlined-at fields to agree");
1374 : // A dbg.declare describes the address of a source variable, so lower it
1375 : // into an indirect DBG_VALUE.
1376 37 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1377 37 : TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1378 74 : *Op, DI->getVariable(), DI->getExpression());
1379 : } else {
1380 : // We can't yet handle anything else here because it would require
1381 : // generating code, thus altering codegen because of debug info.
1382 : LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1383 : }
1384 : return true;
1385 : }
1386 : case Intrinsic::dbg_value: {
1387 : // This form of DBG_VALUE is target-independent.
1388 : const DbgValueInst *DI = cast<DbgValueInst>(II);
1389 72 : const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1390 : const Value *V = DI->getValue();
1391 : assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1392 : "Expected inlined-at fields to agree");
1393 72 : if (!V) {
1394 : // Currently the optimizer can produce this; insert an undef to
1395 : // help debugging. Probably the optimizer should not do this.
1396 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1397 0 : DI->getVariable(), DI->getExpression());
1398 : } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1399 16 : if (CI->getBitWidth() > 64)
1400 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1401 : .addCImm(CI)
1402 : .addImm(0U)
1403 : .addMetadata(DI->getVariable())
1404 : .addMetadata(DI->getExpression());
1405 : else
1406 16 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1407 16 : .addImm(CI->getZExtValue())
1408 : .addImm(0U)
1409 : .addMetadata(DI->getVariable())
1410 : .addMetadata(DI->getExpression());
1411 : } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1412 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1413 : .addFPImm(CF)
1414 : .addImm(0U)
1415 : .addMetadata(DI->getVariable())
1416 : .addMetadata(DI->getExpression());
1417 56 : } else if (unsigned Reg = lookUpRegForValue(V)) {
1418 : // FIXME: This does not handle register-indirect values at offset 0.
1419 : bool IsIndirect = false;
1420 41 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1421 41 : DI->getVariable(), DI->getExpression());
1422 : } else {
1423 : // We can't yet handle anything else here because it would require
1424 : // generating code, thus altering codegen because of debug info.
1425 : LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1426 : }
1427 : return true;
1428 : }
1429 : case Intrinsic::dbg_label: {
1430 : const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1431 : assert(DI->getLabel() && "Missing label");
1432 5 : if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1433 : LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1434 : return true;
1435 : }
1436 :
1437 10 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1438 10 : TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1439 5 : return true;
1440 : }
1441 3 : case Intrinsic::objectsize: {
1442 3 : ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1443 3 : unsigned long long Res = CI->isZero() ? -1ULL : 0;
1444 3 : Constant *ResCI = ConstantInt::get(II->getType(), Res);
1445 3 : unsigned ResultReg = getRegForValue(ResCI);
1446 3 : if (!ResultReg)
1447 : return false;
1448 3 : updateValueMap(II, ResultReg);
1449 3 : return true;
1450 : }
1451 8 : case Intrinsic::launder_invariant_group:
1452 : case Intrinsic::strip_invariant_group:
1453 : case Intrinsic::expect: {
1454 8 : unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1455 8 : if (!ResultReg)
1456 : return false;
1457 8 : updateValueMap(II, ResultReg);
1458 8 : return true;
1459 : }
1460 26 : case Intrinsic::experimental_stackmap:
1461 26 : return selectStackmap(II);
1462 39 : case Intrinsic::experimental_patchpoint_void:
1463 : case Intrinsic::experimental_patchpoint_i64:
1464 39 : return selectPatchpoint(II);
1465 :
1466 0 : case Intrinsic::xray_customevent:
1467 0 : return selectXRayCustomEvent(II);
1468 0 : case Intrinsic::xray_typedevent:
1469 0 : return selectXRayTypedEvent(II);
1470 : }
1471 :
1472 287559 : return fastLowerIntrinsicCall(II);
1473 : }
1474 :
1475 202173 : bool FastISel::selectCast(const User *I, unsigned Opcode) {
1476 404346 : EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1477 202173 : EVT DstVT = TLI.getValueType(DL, I->getType());
1478 :
1479 404344 : if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1480 : !DstVT.isSimple())
1481 : // Unhandled type. Halt "fast" selection and bail.
1482 2 : return false;
1483 :
1484 : // Check if the destination type is legal.
1485 202171 : if (!TLI.isTypeLegal(DstVT))
1486 : return false;
1487 :
1488 : // Check if the source operand is legal.
1489 : if (!TLI.isTypeLegal(SrcVT))
1490 : return false;
1491 :
1492 161699 : unsigned InputReg = getRegForValue(I->getOperand(0));
1493 161699 : if (!InputReg)
1494 : // Unhandled operand. Halt "fast" selection and bail.
1495 : return false;
1496 :
1497 161694 : bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1498 :
1499 161694 : unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1500 161694 : Opcode, InputReg, InputRegIsKill);
1501 161694 : if (!ResultReg)
1502 : return false;
1503 :
1504 89197 : updateValueMap(I, ResultReg);
1505 89197 : return true;
1506 : }
1507 :
1508 873633 : bool FastISel::selectBitCast(const User *I) {
1509 : // If the bitcast doesn't change the type, just use the operand value.
1510 1747266 : if (I->getType() == I->getOperand(0)->getType()) {
1511 3 : unsigned Reg = getRegForValue(I->getOperand(0));
1512 3 : if (!Reg)
1513 : return false;
1514 3 : updateValueMap(I, Reg);
1515 3 : return true;
1516 : }
1517 :
1518 : // Bitcasts of other values become reg-reg copies or BITCAST operators.
1519 873630 : EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1520 873630 : EVT DstEVT = TLI.getValueType(DL, I->getType());
1521 : if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1522 873630 : !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1523 : // Unhandled type. Halt "fast" selection and bail.
1524 39 : return false;
1525 :
1526 : MVT SrcVT = SrcEVT.getSimpleVT();
1527 : MVT DstVT = DstEVT.getSimpleVT();
1528 873591 : unsigned Op0 = getRegForValue(I->getOperand(0));
1529 873591 : if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1530 : return false;
1531 873577 : bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1532 :
1533 : // First, try to perform the bitcast by inserting a reg-reg copy.
1534 : unsigned ResultReg = 0;
1535 873577 : if (SrcVT == DstVT) {
1536 869563 : const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1537 869563 : const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1538 : // Don't attempt a cross-class copy. It will likely fail.
1539 869563 : if (SrcClass == DstClass) {
1540 869563 : ResultReg = createResultReg(DstClass);
1541 1739126 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1542 1739126 : TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1543 : }
1544 : }
1545 :
1546 : // If the reg-reg copy failed, select a BITCAST opcode.
1547 869563 : if (!ResultReg)
1548 4014 : ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1549 :
1550 873577 : if (!ResultReg)
1551 : return false;
1552 :
1553 869592 : updateValueMap(I, ResultReg);
1554 869592 : return true;
1555 : }
1556 :
1557 : // Remove local value instructions starting from the instruction after
1558 : // SavedLastLocalValue to the current function insert point.
1559 482545 : void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1560 : {
1561 482545 : MachineInstr *CurLastLocalValue = getLastLocalValue();
1562 482545 : if (CurLastLocalValue != SavedLastLocalValue) {
1563 : // Find the first local value instruction to be deleted.
1564 : // This is the instruction after SavedLastLocalValue if it is non-NULL.
1565 : // Otherwise it's the first instruction in the block.
1566 : MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1567 121 : if (SavedLastLocalValue)
1568 : ++FirstDeadInst;
1569 : else
1570 70 : FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1571 : setLastLocalValue(SavedLastLocalValue);
1572 121 : removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1573 : }
1574 482545 : }
1575 :
1576 17327093 : bool FastISel::selectInstruction(const Instruction *I) {
1577 17327093 : MachineInstr *SavedLastLocalValue = getLastLocalValue();
1578 : // Just before the terminator instruction, insert instructions to
1579 : // feed PHI nodes in successor blocks.
1580 17327093 : if (I->isTerminator()) {
1581 2807182 : if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1582 : // PHI node handling may have generated local value instructions,
1583 : // even though it failed to handle all PHI nodes.
1584 : // We remove these instructions because SelectionDAGISel will generate
1585 : // them again.
1586 13832 : removeDeadLocalValueCode(SavedLastLocalValue);
1587 13832 : return false;
1588 : }
1589 : }
1590 :
1591 : // FastISel does not handle any operand bundles except OB_funclet.
1592 17313261 : if (ImmutableCallSite CS = ImmutableCallSite(I))
1593 1871935 : for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1594 0 : if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1595 0 : return false;
1596 :
1597 : DbgLoc = I->getDebugLoc();
1598 :
1599 17313261 : SavedInsertPt = FuncInfo.InsertPt;
1600 :
1601 : if (const auto *Call = dyn_cast<CallInst>(I)) {
1602 : const Function *F = Call->getCalledFunction();
1603 : LibFunc Func;
1604 :
1605 : // As a special case, don't handle calls to builtin library functions that
1606 : // may be translated directly to target instructions.
1607 1395661 : if (F && !F->hasLocalLinkage() && F->hasName() &&
1608 1395661 : LibInfo->getLibFunc(F->getName(), Func) &&
1609 61506 : LibInfo->hasOptimizedCodeGen(Func))
1610 1761 : return false;
1611 :
1612 : // Don't handle Intrinsic::trap if a trap function is specified.
1613 1416615 : if (F && F->getIntrinsicID() == Intrinsic::trap &&
1614 197 : Call->hasFnAttr("trap-func-name"))
1615 : return false;
1616 : }
1617 :
1618 : // First, try doing target-independent selection.
1619 17311500 : if (!SkipTargetIndependentISel) {
1620 17306632 : if (selectOperator(I, I->getOpcode())) {
1621 : ++NumFastIselSuccessIndependent;
1622 5909544 : DbgLoc = DebugLoc();
1623 5909544 : return true;
1624 : }
1625 : // Remove dead code.
1626 11397088 : recomputeInsertPt();
1627 22794176 : if (SavedInsertPt != FuncInfo.InsertPt)
1628 419 : removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1629 11397088 : SavedInsertPt = FuncInfo.InsertPt;
1630 : }
1631 : // Next, try calling the target to attempt to handle the instruction.
1632 11401956 : if (fastSelectInstruction(I)) {
1633 : ++NumFastIselSuccessTarget;
1634 10594454 : DbgLoc = DebugLoc();
1635 10594454 : return true;
1636 : }
1637 : // Remove dead code.
1638 807502 : recomputeInsertPt();
1639 1615004 : if (SavedInsertPt != FuncInfo.InsertPt)
1640 15 : removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1641 :
1642 1615004 : DbgLoc = DebugLoc();
1643 : // Undo phi node updates, because they will be added again by SelectionDAG.
1644 807502 : if (I->isTerminator()) {
1645 : // PHI node handling may have generated local value instructions.
1646 : // We remove them because SelectionDAGISel will generate them again.
1647 468713 : removeDeadLocalValueCode(SavedLastLocalValue);
1648 468713 : FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1649 : }
1650 : return false;
1651 : }
1652 :
1653 : /// Emit an unconditional branch to the given block, unless it is the immediate
1654 : /// (fall-through) successor, and update the CFG.
1655 1887534 : void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1656 : const DebugLoc &DbgLoc) {
1657 5270217 : if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1658 1495149 : FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1659 : // For more accurate line information if this is the only instruction
1660 : // in the block then emit it, otherwise we have the unconditional
1661 : // fall-through case, which needs no instructions.
1662 : } else {
1663 : // The unconditional branch case.
1664 923158 : TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1665 1846316 : SmallVector<MachineOperand, 0>(), DbgLoc);
1666 : }
1667 1887534 : if (FuncInfo.BPI) {
1668 : auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1669 247 : FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1670 247 : FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1671 : } else
1672 1887287 : FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1673 1887534 : }
1674 :
1675 534393 : void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1676 : MachineBasicBlock *TrueMBB,
1677 : MachineBasicBlock *FalseMBB) {
1678 : // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1679 : // happen in degenerate IR and MachineIR forbids to have a block twice in the
1680 : // successor/predecessor lists.
1681 534393 : if (TrueMBB != FalseMBB) {
1682 534390 : if (FuncInfo.BPI) {
1683 : auto BranchProbability =
1684 164 : FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1685 164 : FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1686 : } else
1687 534226 : FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1688 : }
1689 :
1690 534393 : fastEmitBranch(FalseMBB, DbgLoc);
1691 534393 : }
1692 :
1693 : /// Emit an FNeg operation.
1694 193 : bool FastISel::selectFNeg(const User *I) {
1695 193 : unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1696 193 : if (!OpReg)
1697 : return false;
1698 193 : bool OpRegIsKill = hasTrivialKill(I);
1699 :
1700 : // If the target has ISD::FNEG, use it.
1701 193 : EVT VT = TLI.getValueType(DL, I->getType());
1702 193 : unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1703 193 : OpReg, OpRegIsKill);
1704 193 : if (ResultReg) {
1705 7 : updateValueMap(I, ResultReg);
1706 7 : return true;
1707 : }
1708 :
1709 : // Bitcast the value to integer, twiddle the sign bit with xor,
1710 : // and then bitcast it back to floating-point.
1711 186 : if (VT.getSizeInBits() > 64)
1712 : return false;
1713 146 : EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1714 146 : if (!TLI.isTypeLegal(IntVT))
1715 : return false;
1716 :
1717 146 : unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1718 146 : ISD::BITCAST, OpReg, OpRegIsKill);
1719 146 : if (!IntReg)
1720 : return false;
1721 :
1722 146 : unsigned IntResultReg = fastEmit_ri_(
1723 : IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1724 146 : UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1725 146 : if (!IntResultReg)
1726 : return false;
1727 :
1728 146 : ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1729 146 : IntResultReg, /*IsKill=*/true);
1730 146 : if (!ResultReg)
1731 : return false;
1732 :
1733 146 : updateValueMap(I, ResultReg);
1734 146 : return true;
1735 : }
1736 :
1737 541897 : bool FastISel::selectExtractValue(const User *U) {
1738 : const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1739 : if (!EVI)
1740 : return false;
1741 :
1742 : // Make sure we only try to handle extracts with a legal result. But also
1743 : // allow i1 because it's easy.
1744 541897 : EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1745 541897 : if (!RealVT.isSimple())
1746 : return false;
1747 : MVT VT = RealVT.getSimpleVT();
1748 541897 : if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1749 5 : return false;
1750 :
1751 : const Value *Op0 = EVI->getOperand(0);
1752 541892 : Type *AggTy = Op0->getType();
1753 :
1754 : // Get the base result register.
1755 : unsigned ResultReg;
1756 541892 : DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1757 1083784 : if (I != FuncInfo.ValueMap.end())
1758 246788 : ResultReg = I->second;
1759 295104 : else if (isa<Instruction>(Op0))
1760 295101 : ResultReg = FuncInfo.InitializeRegForValue(Op0);
1761 : else
1762 : return false; // fast-isel can't handle aggregate constants at the moment
1763 :
1764 : // Get the actual result register, which is an offset from the base register.
1765 : unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1766 :
1767 : SmallVector<EVT, 4> AggValueVTs;
1768 541889 : ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1769 :
1770 781445 : for (unsigned i = 0; i < VTIndex; i++)
1771 479112 : ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1772 :
1773 541889 : updateValueMap(EVI, ResultReg);
1774 : return true;
1775 : }
1776 :
1777 17781797 : bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1778 17781797 : switch (Opcode) {
1779 70227 : case Instruction::Add:
1780 70227 : return selectBinaryOp(I, ISD::ADD);
1781 2038 : case Instruction::FAdd:
1782 2038 : return selectBinaryOp(I, ISD::FADD);
1783 105348 : case Instruction::Sub:
1784 105348 : return selectBinaryOp(I, ISD::SUB);
1785 2226 : case Instruction::FSub:
1786 : // FNeg is currently represented in LLVM IR as a special case of FSub.
1787 2226 : if (BinaryOperator::isFNeg(I))
1788 193 : return selectFNeg(I);
1789 2033 : return selectBinaryOp(I, ISD::FSUB);
1790 24550 : case Instruction::Mul:
1791 24550 : return selectBinaryOp(I, ISD::MUL);
1792 3545 : case Instruction::FMul:
1793 3545 : return selectBinaryOp(I, ISD::FMUL);
1794 49439 : case Instruction::SDiv:
1795 49439 : return selectBinaryOp(I, ISD::SDIV);
1796 6335 : case Instruction::UDiv:
1797 6335 : return selectBinaryOp(I, ISD::UDIV);
1798 2316 : case Instruction::FDiv:
1799 2316 : return selectBinaryOp(I, ISD::FDIV);
1800 1948 : case Instruction::SRem:
1801 1948 : return selectBinaryOp(I, ISD::SREM);
1802 10166 : case Instruction::URem:
1803 10166 : return selectBinaryOp(I, ISD::UREM);
1804 1 : case Instruction::FRem:
1805 1 : return selectBinaryOp(I, ISD::FREM);
1806 6737 : case Instruction::Shl:
1807 6737 : return selectBinaryOp(I, ISD::SHL);
1808 4328 : case Instruction::LShr:
1809 4328 : return selectBinaryOp(I, ISD::SRL);
1810 18878 : case Instruction::AShr:
1811 18878 : return selectBinaryOp(I, ISD::SRA);
1812 83723 : case Instruction::And:
1813 83723 : return selectBinaryOp(I, ISD::AND);
1814 10914 : case Instruction::Or:
1815 10914 : return selectBinaryOp(I, ISD::OR);
1816 30147 : case Instruction::Xor:
1817 30147 : return selectBinaryOp(I, ISD::XOR);
1818 :
1819 1397006 : case Instruction::GetElementPtr:
1820 1397006 : return selectGetElementPtr(I);
1821 :
1822 : case Instruction::Br: {
1823 : const BranchInst *BI = cast<BranchInst>(I);
1824 :
1825 1888252 : if (BI->isUnconditional()) {
1826 1352903 : const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1827 1352903 : MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1828 1352903 : fastEmitBranch(MSucc, BI->getDebugLoc());
1829 : return true;
1830 : }
1831 :
1832 : // Conditional branches are not handed yet.
1833 : // Halt "fast" selection and bail.
1834 : return false;
1835 : }
1836 :
1837 230730 : case Instruction::Unreachable:
1838 230730 : if (TM.Options.TrapUnreachable)
1839 108 : return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1840 : else
1841 : return true;
1842 :
1843 17 : case Instruction::Alloca:
1844 : // FunctionLowering has the static-sized case covered.
1845 34 : if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1846 0 : return true;
1847 :
1848 : // Dynamic-sized alloca is not handled yet.
1849 : return false;
1850 :
1851 1416582 : case Instruction::Call:
1852 1416582 : return selectCall(I);
1853 :
1854 873581 : case Instruction::BitCast:
1855 873581 : return selectBitCast(I);
1856 :
1857 150 : case Instruction::FPToSI:
1858 150 : return selectCast(I, ISD::FP_TO_SINT);
1859 126879 : case Instruction::ZExt:
1860 126879 : return selectCast(I, ISD::ZERO_EXTEND);
1861 48979 : case Instruction::SExt:
1862 48979 : return selectCast(I, ISD::SIGN_EXTEND);
1863 24952 : case Instruction::Trunc:
1864 24952 : return selectCast(I, ISD::TRUNCATE);
1865 1145 : case Instruction::SIToFP:
1866 1145 : return selectCast(I, ISD::SINT_TO_FP);
1867 :
1868 111887 : case Instruction::IntToPtr: // Deliberate fall-through.
1869 : case Instruction::PtrToInt: {
1870 223774 : EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1871 111887 : EVT DstVT = TLI.getValueType(DL, I->getType());
1872 111887 : if (DstVT.bitsGT(SrcVT))
1873 3 : return selectCast(I, ISD::ZERO_EXTEND);
1874 111884 : if (DstVT.bitsLT(SrcVT))
1875 18 : return selectCast(I, ISD::TRUNCATE);
1876 111866 : unsigned Reg = getRegForValue(I->getOperand(0));
1877 111866 : if (!Reg)
1878 : return false;
1879 111794 : updateValueMap(I, Reg);
1880 111794 : return true;
1881 : }
1882 :
1883 541897 : case Instruction::ExtractValue:
1884 541897 : return selectExtractValue(I);
1885 :
1886 : case Instruction::PHI:
1887 : llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1888 :
1889 : default:
1890 : // Unhandled instruction. Halt "fast" selection and bail.
1891 : return false;
1892 : }
1893 : }
1894 :
1895 212887 : FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1896 : const TargetLibraryInfo *LibInfo,
1897 212887 : bool SkipTargetIndependentISel)
1898 425774 : : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1899 425774 : MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1900 425774 : TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1901 212887 : TII(*MF->getSubtarget().getInstrInfo()),
1902 212887 : TLI(*MF->getSubtarget().getTargetLowering()),
1903 212887 : TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1904 212887 : SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1905 :
1906 : FastISel::~FastISel() = default;
1907 :
1908 0 : bool FastISel::fastLowerArguments() { return false; }
1909 :
1910 594 : bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1911 :
1912 163 : bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1913 163 : return false;
1914 : }
1915 :
1916 45 : unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1917 :
1918 0 : unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1919 : bool /*Op0IsKill*/) {
1920 0 : return 0;
1921 : }
1922 :
1923 0 : unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1924 : bool /*Op0IsKill*/, unsigned /*Op1*/,
1925 : bool /*Op1IsKill*/) {
1926 0 : return 0;
1927 : }
1928 :
1929 0 : unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1930 0 : return 0;
1931 : }
1932 :
1933 65 : unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1934 : const ConstantFP * /*FPImm*/) {
1935 65 : return 0;
1936 : }
1937 :
1938 11 : unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1939 : bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1940 11 : return 0;
1941 : }
1942 :
1943 : /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1944 : /// instruction with an immediate operand using fastEmit_ri.
1945 : /// If that fails, it materializes the immediate into a register and try
1946 : /// fastEmit_rr instead.
1947 663705 : unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1948 : bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1949 : // If this is a multiply by a power of two, emit this as a shift left.
1950 663705 : if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1951 : Opcode = ISD::SHL;
1952 56564 : Imm = Log2_64(Imm);
1953 607141 : } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1954 : // div x, 8 -> srl x, 3
1955 : Opcode = ISD::SRL;
1956 4924 : Imm = Log2_64(Imm);
1957 : }
1958 :
1959 : // Horrible hack (to be removed), check to make sure shift amounts are
1960 : // in-range.
1961 663705 : if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1962 117802 : Imm >= VT.getSizeInBits())
1963 : return 0;
1964 :
1965 : // First check if immediate type is legal. If not, we can't use the ri form.
1966 663701 : unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1967 663701 : if (ResultReg)
1968 : return ResultReg;
1969 22367 : unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1970 : bool IsImmKill = true;
1971 22367 : if (!MaterialReg) {
1972 : // This is a bit ugly/slow, but failing here means falling out of
1973 : // fast-isel, which would be very slow.
1974 : IntegerType *ITy =
1975 22339 : IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1976 22339 : MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1977 22339 : if (!MaterialReg)
1978 : return 0;
1979 : // FIXME: If the materialized register here has no uses yet then this
1980 : // will be the first use and we should be able to mark it as killed.
1981 : // However, the local value area for materialising constant expressions
1982 : // grows down, not up, which means that any constant expressions we generate
1983 : // later which also use 'Imm' could be after this instruction and therefore
1984 : // after this kill.
1985 : IsImmKill = false;
1986 : }
1987 22367 : return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1988 : }
1989 :
1990 9197500 : unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1991 18395000 : return MRI.createVirtualRegister(RC);
1992 : }
1993 :
1994 18178371 : unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1995 : unsigned OpNum) {
1996 18178371 : if (TargetRegisterInfo::isVirtualRegister(Op)) {
1997 : const TargetRegisterClass *RegClass =
1998 6132150 : TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1999 6132150 : if (!MRI.constrainRegClass(Op, RegClass)) {
2000 : // If it's not legal to COPY between the register classes, something
2001 : // has gone very wrong before we got here.
2002 0 : unsigned NewOp = createResultReg(RegClass);
2003 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2004 0 : TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2005 0 : return NewOp;
2006 : }
2007 : }
2008 : return Op;
2009 : }
2010 :
2011 93595 : unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2012 : const TargetRegisterClass *RC) {
2013 93595 : unsigned ResultReg = createResultReg(RC);
2014 93595 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2015 :
2016 93595 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
2017 93595 : return ResultReg;
2018 : }
2019 :
2020 115395 : unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2021 : const TargetRegisterClass *RC, unsigned Op0,
2022 : bool Op0IsKill) {
2023 115395 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2024 :
2025 115395 : unsigned ResultReg = createResultReg(RC);
2026 230790 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2027 :
2028 115395 : if (II.getNumDefs() >= 1)
2029 227432 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2030 113716 : .addReg(Op0, getKillRegState(Op0IsKill));
2031 : else {
2032 1679 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2033 1679 : .addReg(Op0, getKillRegState(Op0IsKill));
2034 1679 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2035 3358 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2036 : }
2037 :
2038 115395 : return ResultReg;
2039 : }
2040 :
2041 214625 : unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2042 : const TargetRegisterClass *RC, unsigned Op0,
2043 : bool Op0IsKill, unsigned Op1,
2044 : bool Op1IsKill) {
2045 214625 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2046 :
2047 214625 : unsigned ResultReg = createResultReg(RC);
2048 429250 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2049 429250 : Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2050 :
2051 214625 : if (II.getNumDefs() >= 1)
2052 429250 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2053 214625 : .addReg(Op0, getKillRegState(Op0IsKill))
2054 214625 : .addReg(Op1, getKillRegState(Op1IsKill));
2055 : else {
2056 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2057 0 : .addReg(Op0, getKillRegState(Op0IsKill))
2058 0 : .addReg(Op1, getKillRegState(Op1IsKill));
2059 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2060 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2061 : }
2062 214625 : return ResultReg;
2063 : }
2064 :
2065 58 : unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2066 : const TargetRegisterClass *RC, unsigned Op0,
2067 : bool Op0IsKill, unsigned Op1,
2068 : bool Op1IsKill, unsigned Op2,
2069 : bool Op2IsKill) {
2070 58 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2071 :
2072 58 : unsigned ResultReg = createResultReg(RC);
2073 116 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2074 116 : Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2075 116 : Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2076 :
2077 58 : if (II.getNumDefs() >= 1)
2078 116 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2079 58 : .addReg(Op0, getKillRegState(Op0IsKill))
2080 58 : .addReg(Op1, getKillRegState(Op1IsKill))
2081 58 : .addReg(Op2, getKillRegState(Op2IsKill));
2082 : else {
2083 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2084 0 : .addReg(Op0, getKillRegState(Op0IsKill))
2085 0 : .addReg(Op1, getKillRegState(Op1IsKill))
2086 0 : .addReg(Op2, getKillRegState(Op2IsKill));
2087 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2088 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2089 : }
2090 58 : return ResultReg;
2091 : }
2092 :
2093 682628 : unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2094 : const TargetRegisterClass *RC, unsigned Op0,
2095 : bool Op0IsKill, uint64_t Imm) {
2096 682628 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2097 :
2098 682628 : unsigned ResultReg = createResultReg(RC);
2099 1365256 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2100 :
2101 682628 : if (II.getNumDefs() >= 1)
2102 1365256 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2103 682628 : .addReg(Op0, getKillRegState(Op0IsKill))
2104 682628 : .addImm(Imm);
2105 : else {
2106 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2107 0 : .addReg(Op0, getKillRegState(Op0IsKill))
2108 0 : .addImm(Imm);
2109 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2110 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2111 : }
2112 682628 : return ResultReg;
2113 : }
2114 :
2115 361 : unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2116 : const TargetRegisterClass *RC, unsigned Op0,
2117 : bool Op0IsKill, uint64_t Imm1,
2118 : uint64_t Imm2) {
2119 361 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2120 :
2121 361 : unsigned ResultReg = createResultReg(RC);
2122 722 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2123 :
2124 361 : if (II.getNumDefs() >= 1)
2125 722 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2126 361 : .addReg(Op0, getKillRegState(Op0IsKill))
2127 361 : .addImm(Imm1)
2128 361 : .addImm(Imm2);
2129 : else {
2130 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2131 0 : .addReg(Op0, getKillRegState(Op0IsKill))
2132 0 : .addImm(Imm1)
2133 0 : .addImm(Imm2);
2134 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2135 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2136 : }
2137 361 : return ResultReg;
2138 : }
2139 :
2140 4 : unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2141 : const TargetRegisterClass *RC,
2142 : const ConstantFP *FPImm) {
2143 4 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2144 :
2145 4 : unsigned ResultReg = createResultReg(RC);
2146 :
2147 4 : if (II.getNumDefs() >= 1)
2148 8 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2149 : .addFPImm(FPImm);
2150 : else {
2151 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2152 : .addFPImm(FPImm);
2153 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2154 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2155 : }
2156 4 : return ResultReg;
2157 : }
2158 :
2159 320 : unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2160 : const TargetRegisterClass *RC, unsigned Op0,
2161 : bool Op0IsKill, unsigned Op1,
2162 : bool Op1IsKill, uint64_t Imm) {
2163 320 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2164 :
2165 320 : unsigned ResultReg = createResultReg(RC);
2166 640 : Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2167 640 : Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2168 :
2169 320 : if (II.getNumDefs() >= 1)
2170 640 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2171 320 : .addReg(Op0, getKillRegState(Op0IsKill))
2172 320 : .addReg(Op1, getKillRegState(Op1IsKill))
2173 320 : .addImm(Imm);
2174 : else {
2175 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2176 0 : .addReg(Op0, getKillRegState(Op0IsKill))
2177 0 : .addReg(Op1, getKillRegState(Op1IsKill))
2178 0 : .addImm(Imm);
2179 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2180 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2181 : }
2182 320 : return ResultReg;
2183 : }
2184 :
2185 245252 : unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2186 : const TargetRegisterClass *RC, uint64_t Imm) {
2187 245252 : unsigned ResultReg = createResultReg(RC);
2188 245252 : const MCInstrDesc &II = TII.get(MachineInstOpcode);
2189 :
2190 245252 : if (II.getNumDefs() >= 1)
2191 490504 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2192 245252 : .addImm(Imm);
2193 : else {
2194 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2195 0 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2196 0 : TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2197 : }
2198 245252 : return ResultReg;
2199 : }
2200 :
2201 26456 : unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2202 : bool Op0IsKill, uint32_t Idx) {
2203 26456 : unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2204 : assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
2205 : "Cannot yet extract from physregs");
2206 26456 : const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2207 26456 : MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2208 79368 : BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2209 79368 : ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2210 26456 : return ResultReg;
2211 : }
2212 :
2213 : /// Emit MachineInstrs to compute the value of Op with all but the least
2214 : /// significant bit set to zero.
2215 41129 : unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2216 41129 : return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2217 : }
2218 :
2219 : /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2220 : /// Emit code to ensure constants are copied into registers when needed.
2221 : /// Remember the virtual registers that need to be added to the Machine PHI
2222 : /// nodes as input. We cannot just directly add them, because expansion
2223 : /// might result in multiple MBB's for one BB. As such, the start of the
2224 : /// BB might correspond to a different MBB than the end.
2225 2807182 : bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2226 2807182 : const Instruction *TI = LLVMBB->getTerminator();
2227 :
2228 : SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2229 2807182 : FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2230 :
2231 : // Check successor nodes' PHI nodes that expect a constant to be available
2232 : // from this block.
2233 6190964 : for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2234 3397614 : const BasicBlock *SuccBB = TI->getSuccessor(succ);
2235 3397614 : if (!isa<PHINode>(SuccBB->begin()))
2236 3190592 : continue;
2237 207023 : MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2238 :
2239 : // If this terminator has multiple identical successors (common for
2240 : // switches), only handle each succ once.
2241 207023 : if (!SuccsHandled.insert(SuccMBB).second)
2242 : continue;
2243 :
2244 : MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2245 :
2246 : // At this point we know that there is a 1-1 correspondence between LLVM PHI
2247 : // nodes and Machine PHI nodes, but the incoming operands have not been
2248 : // emitted yet.
2249 607257 : for (const PHINode &PN : SuccBB->phis()) {
2250 : // Ignore dead phi's.
2251 207045 : if (PN.use_empty())
2252 16 : continue;
2253 :
2254 : // Only handle legal types. Two interesting things to note here. First,
2255 : // by bailing out early, we may leave behind some dead instructions,
2256 : // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2257 : // own moves. Second, this check is necessary because FastISel doesn't
2258 : // use CreateRegs to create registers, so it always creates
2259 : // exactly one register for each non-void instruction.
2260 207029 : EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2261 193262 : if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2262 : // Handle integer promotions, though, because they're common and easy.
2263 : if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2264 13824 : FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2265 13832 : return false;
2266 : }
2267 : }
2268 :
2269 193205 : const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2270 :
2271 : // Set the DebugLoc for the copy. Prefer the location of the operand
2272 : // if there is one; use the location of the PHI otherwise.
2273 : DbgLoc = PN.getDebugLoc();
2274 : if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2275 : DbgLoc = Inst->getDebugLoc();
2276 :
2277 193205 : unsigned Reg = getRegForValue(PHIOp);
2278 193205 : if (!Reg) {
2279 8 : FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2280 8 : return false;
2281 : }
2282 386394 : FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2283 386394 : DbgLoc = DebugLoc();
2284 : }
2285 : }
2286 :
2287 : return true;
2288 : }
2289 :
2290 4593294 : bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2291 : assert(LI->hasOneUse() &&
2292 : "tryToFoldLoad expected a LoadInst with a single use");
2293 : // We know that the load has a single use, but don't know what it is. If it
2294 : // isn't one of the folded instructions, then we can't succeed here. Handle
2295 : // this by scanning the single-use users of the load until we get to FoldInst.
2296 : unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2297 :
2298 : const Instruction *TheUser = LI->user_back();
2299 1703424 : while (TheUser != FoldInst && // Scan up until we find FoldInst.
2300 : // Stay in the right block.
2301 6296718 : TheUser->getParent() == FoldInst->getParent() &&
2302 : --MaxUsers) { // Don't scan too far.
2303 : // If there are multiple or no uses of this instruction, then bail out.
2304 1720450 : if (!TheUser->hasOneUse())
2305 : return false;
2306 :
2307 : TheUser = TheUser->user_back();
2308 : }
2309 :
2310 : // If we didn't find the fold instruction, then we failed to collapse the
2311 : // sequence.
2312 3551423 : if (TheUser != FoldInst)
2313 : return false;
2314 :
2315 : // Don't try to fold volatile loads. Target has to deal with alignment
2316 : // constraints.
2317 3324004 : if (LI->isVolatile())
2318 : return false;
2319 :
2320 : // Figure out which vreg this is going into. If there is no assigned vreg yet
2321 : // then there actually was no reference to it. Perhaps the load is referenced
2322 : // by a dead instruction.
2323 3321448 : unsigned LoadReg = getRegForValue(LI);
2324 3321448 : if (!LoadReg)
2325 : return false;
2326 :
2327 : // We can't fold if this vreg has no uses or more than one use. Multiple uses
2328 : // may mean that the instruction got lowered to multiple MIs, or the use of
2329 : // the loaded value ended up being multiple operands of the result.
2330 3321390 : if (!MRI.hasOneUse(LoadReg))
2331 : return false;
2332 :
2333 2958859 : MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2334 2958859 : MachineInstr *User = RI->getParent();
2335 :
2336 : // Set the insertion point properly. Folding the load can cause generation of
2337 : // other random instructions (like sign extends) for addressing modes; make
2338 : // sure they get inserted in a logical place before the new instruction.
2339 2958859 : FuncInfo.InsertPt = User;
2340 2958859 : FuncInfo.MBB = User->getParent();
2341 :
2342 : // Ask the target to try folding the load.
2343 5917718 : return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2344 : }
2345 :
2346 31121 : bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2347 : // Must be an add.
2348 : if (!isa<AddOperator>(Add))
2349 : return false;
2350 : // Type size needs to match.
2351 114 : if (DL.getTypeSizeInBits(GEP->getType()) !=
2352 57 : DL.getTypeSizeInBits(Add->getType()))
2353 : return false;
2354 : // Must be in the same basic block.
2355 52 : if (isa<Instruction>(Add) &&
2356 52 : FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2357 1 : return false;
2358 : // Must have a constant operand.
2359 51 : return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2360 : }
2361 :
2362 : MachineMemOperand *
2363 10170690 : FastISel::createMachineMemOperandFor(const Instruction *I) const {
2364 : const Value *Ptr;
2365 : Type *ValTy;
2366 : unsigned Alignment;
2367 : MachineMemOperand::Flags Flags;
2368 : bool IsVolatile;
2369 :
2370 : if (const auto *LI = dyn_cast<LoadInst>(I)) {
2371 : Alignment = LI->getAlignment();
2372 : IsVolatile = LI->isVolatile();
2373 : Flags = MachineMemOperand::MOLoad;
2374 : Ptr = LI->getPointerOperand();
2375 5290385 : ValTy = LI->getType();
2376 : } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2377 : Alignment = SI->getAlignment();
2378 : IsVolatile = SI->isVolatile();
2379 : Flags = MachineMemOperand::MOStore;
2380 : Ptr = SI->getPointerOperand();
2381 4880305 : ValTy = SI->getValueOperand()->getType();
2382 : } else
2383 : return nullptr;
2384 :
2385 : bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2386 : bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2387 : bool IsDereferenceable =
2388 : I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr;
2389 : const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2390 :
2391 : AAMDNodes AAInfo;
2392 10170690 : I->getAAMetadata(AAInfo);
2393 :
2394 10170690 : if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2395 1223 : Alignment = DL.getABITypeAlignment(ValTy);
2396 :
2397 10170690 : unsigned Size = DL.getTypeStoreSize(ValTy);
2398 :
2399 10170690 : if (IsVolatile)
2400 : Flags |= MachineMemOperand::MOVolatile;
2401 10170690 : if (IsNonTemporal)
2402 : Flags |= MachineMemOperand::MONonTemporal;
2403 10170690 : if (IsDereferenceable)
2404 : Flags |= MachineMemOperand::MODereferenceable;
2405 10170690 : if (IsInvariant)
2406 : Flags |= MachineMemOperand::MOInvariant;
2407 :
2408 20341380 : return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2409 : Alignment, AAInfo, Ranges);
2410 : }
2411 :
2412 461105 : CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2413 : // If both operands are the same, then try to optimize or fold the cmp.
2414 : CmpInst::Predicate Predicate = CI->getPredicate();
2415 461105 : if (CI->getOperand(0) != CI->getOperand(1))
2416 : return Predicate;
2417 :
2418 582 : switch (Predicate) {
2419 0 : default: llvm_unreachable("Invalid predicate!");
2420 : case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2421 14 : case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2422 : case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2423 4 : case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2424 : case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2425 4 : case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2426 : case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2427 4 : case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2428 392 : case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2429 6 : case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2430 4 : case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2431 4 : case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2432 4 : case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2433 4 : case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2434 4 : case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2435 0 : case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2436 :
2437 68 : case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2438 : case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2439 : case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2440 4 : case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2441 : case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2442 4 : case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2443 : case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2444 4 : case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2445 : case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2446 4 : case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2447 : }
2448 :
2449 : return Predicate;
2450 : }
|