LLVM 19.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1//===- InstCombinePHI.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitPHINode function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
23#include <optional>
24
25using namespace llvm;
26using namespace llvm::PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
31MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40/// The PHI arguments will be folded into a single operation with a PHI node
41/// as input. The debug location of the single operation will be the merged
42/// locations of the original PHI node arguments.
44 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(PN.incoming_values())) {
51 auto *I = cast<Instruction>(V);
52 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53 }
54}
55
56// Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
57// If there is an existing pointer typed PHI that produces the same value as PN,
58// replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
59// PHI node:
60//
61// Case-1:
62// bb1:
63// int_init = PtrToInt(ptr_init)
64// br label %bb2
65// bb2:
66// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
67// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
68// ptr_val2 = IntToPtr(int_val)
69// ...
70// use(ptr_val2)
71// ptr_val_inc = ...
72// inc_val_inc = PtrToInt(ptr_val_inc)
73//
74// ==>
75// bb1:
76// br label %bb2
77// bb2:
78// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
79// ...
80// use(ptr_val)
81// ptr_val_inc = ...
82//
83// Case-2:
84// bb1:
85// int_ptr = BitCast(ptr_ptr)
86// int_init = Load(int_ptr)
87// br label %bb2
88// bb2:
89// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
90// ptr_val2 = IntToPtr(int_val)
91// ...
92// use(ptr_val2)
93// ptr_val_inc = ...
94// inc_val_inc = PtrToInt(ptr_val_inc)
95// ==>
96// bb1:
97// ptr_init = Load(ptr_ptr)
98// br label %bb2
99// bb2:
100// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
101// ...
102// use(ptr_val)
103// ptr_val_inc = ...
104// ...
105//
107 if (!PN.getType()->isIntegerTy())
108 return false;
109 if (!PN.hasOneUse())
110 return false;
111
112 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
113 if (!IntToPtr)
114 return false;
115
116 // Check if the pointer is actually used as pointer:
117 auto HasPointerUse = [](Instruction *IIP) {
118 for (User *U : IIP->users()) {
119 Value *Ptr = nullptr;
120 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
121 Ptr = LoadI->getPointerOperand();
122 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
123 Ptr = SI->getPointerOperand();
124 } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
125 Ptr = GI->getPointerOperand();
126 }
127
128 if (Ptr && Ptr == IIP)
129 return true;
130 }
131 return false;
132 };
133
134 if (!HasPointerUse(IntToPtr))
135 return false;
136
137 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
138 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
139 return false;
140
141 SmallVector<Value *, 4> AvailablePtrVals;
142 for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
143 BasicBlock *BB = std::get<0>(Incoming);
144 Value *Arg = std::get<1>(Incoming);
145
146 // First look backward:
147 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
148 AvailablePtrVals.emplace_back(PI->getOperand(0));
149 continue;
150 }
151
152 // Next look forward:
153 Value *ArgIntToPtr = nullptr;
154 for (User *U : Arg->users()) {
155 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
156 (DT.dominates(cast<Instruction>(U), BB) ||
157 cast<Instruction>(U)->getParent() == BB)) {
158 ArgIntToPtr = U;
159 break;
160 }
161 }
162
163 if (ArgIntToPtr) {
164 AvailablePtrVals.emplace_back(ArgIntToPtr);
165 continue;
166 }
167
168 // If Arg is defined by a PHI, allow it. This will also create
169 // more opportunities iteratively.
170 if (isa<PHINode>(Arg)) {
171 AvailablePtrVals.emplace_back(Arg);
172 continue;
173 }
174
175 // For a single use integer load:
176 auto *LoadI = dyn_cast<LoadInst>(Arg);
177 if (!LoadI)
178 return false;
179
180 if (!LoadI->hasOneUse())
181 return false;
182
183 // Push the integer typed Load instruction into the available
184 // value set, and fix it up later when the pointer typed PHI
185 // is synthesized.
186 AvailablePtrVals.emplace_back(LoadI);
187 }
188
189 // Now search for a matching PHI
190 auto *BB = PN.getParent();
191 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
192 "Not enough available ptr typed incoming values");
193 PHINode *MatchingPtrPHI = nullptr;
194 unsigned NumPhis = 0;
195 for (PHINode &PtrPHI : BB->phis()) {
196 // FIXME: consider handling this in AggressiveInstCombine
197 if (NumPhis++ > MaxNumPhis)
198 return false;
199 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
200 continue;
201 if (any_of(zip(PN.blocks(), AvailablePtrVals),
202 [&](const auto &BlockAndValue) {
203 BasicBlock *BB = std::get<0>(BlockAndValue);
204 Value *V = std::get<1>(BlockAndValue);
205 return PtrPHI.getIncomingValueForBlock(BB) != V;
206 }))
207 continue;
208 MatchingPtrPHI = &PtrPHI;
209 break;
210 }
211
212 if (MatchingPtrPHI) {
213 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
214 "Phi's Type does not match with IntToPtr");
215 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
216 // to make sure another transform can't undo it in the meantime.
217 replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
218 eraseInstFromFunction(*IntToPtr);
220 return true;
221 }
222
223 // If it requires a conversion for every PHI operand, do not do it.
224 if (all_of(AvailablePtrVals, [&](Value *V) {
225 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
226 }))
227 return false;
228
229 // If any of the operand that requires casting is a terminator
230 // instruction, do not do it. Similarly, do not do the transform if the value
231 // is PHI in a block with no insertion point, for example, a catchswitch
232 // block, since we will not be able to insert a cast after the PHI.
233 if (any_of(AvailablePtrVals, [&](Value *V) {
234 if (V->getType() == IntToPtr->getType())
235 return false;
236 auto *Inst = dyn_cast<Instruction>(V);
237 if (!Inst)
238 return false;
239 if (Inst->isTerminator())
240 return true;
241 auto *BB = Inst->getParent();
242 if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
243 return true;
244 return false;
245 }))
246 return false;
247
248 PHINode *NewPtrPHI = PHINode::Create(
249 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
250
251 InsertNewInstBefore(NewPtrPHI, PN.getIterator());
253 for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
254 auto *IncomingBB = std::get<0>(Incoming);
255 auto *IncomingVal = std::get<1>(Incoming);
256
257 if (IncomingVal->getType() == IntToPtr->getType()) {
258 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
259 continue;
260 }
261
262#ifndef NDEBUG
263 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
264 assert((isa<PHINode>(IncomingVal) ||
265 IncomingVal->getType()->isPointerTy() ||
266 (LoadI && LoadI->hasOneUse())) &&
267 "Can not replace LoadInst with multiple uses");
268#endif
269 // Need to insert a BitCast.
270 // For an integer Load instruction with a single use, the load + IntToPtr
271 // cast will be simplified into a pointer load:
272 // %v = load i64, i64* %a.ip, align 8
273 // %v.cast = inttoptr i64 %v to float **
274 // ==>
275 // %v.ptrp = bitcast i64 * %a.ip to float **
276 // %v.cast = load float *, float ** %v.ptrp, align 8
277 Instruction *&CI = Casts[IncomingVal];
278 if (!CI) {
279 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
280 IncomingVal->getName() + ".ptr");
281 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
282 BasicBlock::iterator InsertPos(IncomingI);
283 InsertPos++;
284 BasicBlock *BB = IncomingI->getParent();
285 if (isa<PHINode>(IncomingI))
286 InsertPos = BB->getFirstInsertionPt();
287 assert(InsertPos != BB->end() && "should have checked above");
288 InsertNewInstBefore(CI, InsertPos);
289 } else {
290 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
291 InsertNewInstBefore(CI, InsertBB->getFirstInsertionPt());
292 }
293 }
294 NewPtrPHI->addIncoming(CI, IncomingBB);
295 }
296
297 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
298 // to make sure another transform can't undo it in the meantime.
299 replaceInstUsesWith(*IntToPtr, NewPtrPHI);
300 eraseInstFromFunction(*IntToPtr);
302 return true;
303}
304
305// Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
306// fold Phi-operand to bitcast.
308 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
309 // Make sure all uses of phi are ptr2int.
310 if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
311 return nullptr;
312
313 // Iterating over all operands to check presence of target pointers for
314 // optimization.
315 bool OperandWithRoundTripCast = false;
316 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
317 if (auto *NewOp =
318 simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
319 replaceOperand(PN, OpNum, NewOp);
320 OperandWithRoundTripCast = true;
321 }
322 }
323 if (!OperandWithRoundTripCast)
324 return nullptr;
325 return &PN;
326}
327
328/// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
329/// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
332 auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
333
334 // Scan to see if all operands are `insertvalue`'s with the same indicies,
335 // and all have a single use.
336 for (Value *V : drop_begin(PN.incoming_values())) {
337 auto *I = dyn_cast<InsertValueInst>(V);
338 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
339 return nullptr;
340 }
341
342 // For each operand of an `insertvalue`
343 std::array<PHINode *, 2> NewOperands;
344 for (int OpIdx : {0, 1}) {
345 auto *&NewOperand = NewOperands[OpIdx];
346 // Create a new PHI node to receive the values the operand has in each
347 // incoming basic block.
348 NewOperand = PHINode::Create(
349 FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
350 FirstIVI->getOperand(OpIdx)->getName() + ".pn");
351 // And populate each operand's PHI with said values.
352 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
353 NewOperand->addIncoming(
354 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
355 std::get<0>(Incoming));
356 InsertNewInstBefore(NewOperand, PN.getIterator());
357 }
358
359 // And finally, create `insertvalue` over the newly-formed PHI nodes.
360 auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
361 FirstIVI->getIndices(), PN.getName());
362
363 PHIArgMergedDebugLoc(NewIVI, PN);
364 ++NumPHIsOfInsertValues;
365 return NewIVI;
366}
367
368/// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
369/// turn this into a phi[a,b] and a single extractvalue.
372 auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
373
374 // Scan to see if all operands are `extractvalue`'s with the same indicies,
375 // and all have a single use.
376 for (Value *V : drop_begin(PN.incoming_values())) {
377 auto *I = dyn_cast<ExtractValueInst>(V);
378 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
379 I->getAggregateOperand()->getType() !=
380 FirstEVI->getAggregateOperand()->getType())
381 return nullptr;
382 }
383
384 // Create a new PHI node to receive the values the aggregate operand has
385 // in each incoming basic block.
386 auto *NewAggregateOperand = PHINode::Create(
387 FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
388 FirstEVI->getAggregateOperand()->getName() + ".pn");
389 // And populate the PHI with said values.
390 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
391 NewAggregateOperand->addIncoming(
392 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
393 std::get<0>(Incoming));
394 InsertNewInstBefore(NewAggregateOperand, PN.getIterator());
395
396 // And finally, create `extractvalue` over the newly-formed PHI nodes.
397 auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
398 FirstEVI->getIndices(), PN.getName());
399
400 PHIArgMergedDebugLoc(NewEVI, PN);
401 ++NumPHIsOfExtractValues;
402 return NewEVI;
403}
404
405/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
406/// adds all have a single user, turn this into a phi and a single binop.
408 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
409 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
410 unsigned Opc = FirstInst->getOpcode();
411 Value *LHSVal = FirstInst->getOperand(0);
412 Value *RHSVal = FirstInst->getOperand(1);
413
414 Type *LHSType = LHSVal->getType();
415 Type *RHSType = RHSVal->getType();
416
417 // Scan to see if all operands are the same opcode, and all have one user.
418 for (Value *V : drop_begin(PN.incoming_values())) {
419 Instruction *I = dyn_cast<Instruction>(V);
420 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
421 // Verify type of the LHS matches so we don't fold cmp's of different
422 // types.
423 I->getOperand(0)->getType() != LHSType ||
424 I->getOperand(1)->getType() != RHSType)
425 return nullptr;
426
427 // If they are CmpInst instructions, check their predicates
428 if (CmpInst *CI = dyn_cast<CmpInst>(I))
429 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
430 return nullptr;
431
432 // Keep track of which operand needs a phi node.
433 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
434 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
435 }
436
437 // If both LHS and RHS would need a PHI, don't do this transformation,
438 // because it would increase the number of PHIs entering the block,
439 // which leads to higher register pressure. This is especially
440 // bad when the PHIs are in the header of a loop.
441 if (!LHSVal && !RHSVal)
442 return nullptr;
443
444 // Otherwise, this is safe to transform!
445
446 Value *InLHS = FirstInst->getOperand(0);
447 Value *InRHS = FirstInst->getOperand(1);
448 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
449 if (!LHSVal) {
450 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
451 FirstInst->getOperand(0)->getName() + ".pn");
452 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
453 InsertNewInstBefore(NewLHS, PN.getIterator());
454 LHSVal = NewLHS;
455 }
456
457 if (!RHSVal) {
458 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
459 FirstInst->getOperand(1)->getName() + ".pn");
460 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
461 InsertNewInstBefore(NewRHS, PN.getIterator());
462 RHSVal = NewRHS;
463 }
464
465 // Add all operands to the new PHIs.
466 if (NewLHS || NewRHS) {
467 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
468 BasicBlock *InBB = std::get<0>(Incoming);
469 Value *InVal = std::get<1>(Incoming);
470 Instruction *InInst = cast<Instruction>(InVal);
471 if (NewLHS) {
472 Value *NewInLHS = InInst->getOperand(0);
473 NewLHS->addIncoming(NewInLHS, InBB);
474 }
475 if (NewRHS) {
476 Value *NewInRHS = InInst->getOperand(1);
477 NewRHS->addIncoming(NewInRHS, InBB);
478 }
479 }
480 }
481
482 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
483 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
484 LHSVal, RHSVal);
485 PHIArgMergedDebugLoc(NewCI, PN);
486 return NewCI;
487 }
488
489 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
490 BinaryOperator *NewBinOp =
491 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
492
493 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
494
495 for (Value *V : drop_begin(PN.incoming_values()))
496 NewBinOp->andIRFlags(V);
497
498 PHIArgMergedDebugLoc(NewBinOp, PN);
499 return NewBinOp;
500}
501
503 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
504
505 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
506 FirstInst->op_end());
507 // This is true if all GEP bases are allocas and if all indices into them are
508 // constants.
509 bool AllBasePointersAreAllocas = true;
510
511 // We don't want to replace this phi if the replacement would require
512 // more than one phi, which leads to higher register pressure. This is
513 // especially bad when the PHIs are in the header of a loop.
514 bool NeededPhi = false;
515
516 bool AllInBounds = true;
517
518 // Scan to see if all operands are the same opcode, and all have one user.
519 for (Value *V : drop_begin(PN.incoming_values())) {
520 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V);
521 if (!GEP || !GEP->hasOneUser() ||
522 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
523 GEP->getNumOperands() != FirstInst->getNumOperands())
524 return nullptr;
525
526 AllInBounds &= GEP->isInBounds();
527
528 // Keep track of whether or not all GEPs are of alloca pointers.
529 if (AllBasePointersAreAllocas &&
530 (!isa<AllocaInst>(GEP->getOperand(0)) ||
531 !GEP->hasAllConstantIndices()))
532 AllBasePointersAreAllocas = false;
533
534 // Compare the operand lists.
535 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
536 if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
537 continue;
538
539 // Don't merge two GEPs when two operands differ (introducing phi nodes)
540 // if one of the PHIs has a constant for the index. The index may be
541 // substantially cheaper to compute for the constants, so making it a
542 // variable index could pessimize the path. This also handles the case
543 // for struct indices, which must always be constant.
544 if (isa<ConstantInt>(FirstInst->getOperand(Op)) ||
545 isa<ConstantInt>(GEP->getOperand(Op)))
546 return nullptr;
547
548 if (FirstInst->getOperand(Op)->getType() !=
549 GEP->getOperand(Op)->getType())
550 return nullptr;
551
552 // If we already needed a PHI for an earlier operand, and another operand
553 // also requires a PHI, we'd be introducing more PHIs than we're
554 // eliminating, which increases register pressure on entry to the PHI's
555 // block.
556 if (NeededPhi)
557 return nullptr;
558
559 FixedOperands[Op] = nullptr; // Needs a PHI.
560 NeededPhi = true;
561 }
562 }
563
564 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
565 // bother doing this transformation. At best, this will just save a bit of
566 // offset calculation, but all the predecessors will have to materialize the
567 // stack address into a register anyway. We'd actually rather *clone* the
568 // load up into the predecessors so that we have a load of a gep of an alloca,
569 // which can usually all be folded into the load.
570 if (AllBasePointersAreAllocas)
571 return nullptr;
572
573 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
574 // that is variable.
575 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
576
577 bool HasAnyPHIs = false;
578 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
579 if (FixedOperands[I])
580 continue; // operand doesn't need a phi.
581 Value *FirstOp = FirstInst->getOperand(I);
582 PHINode *NewPN =
583 PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
584 InsertNewInstBefore(NewPN, PN.getIterator());
585
586 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
587 OperandPhis[I] = NewPN;
588 FixedOperands[I] = NewPN;
589 HasAnyPHIs = true;
590 }
591
592 // Add all operands to the new PHIs.
593 if (HasAnyPHIs) {
594 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
595 BasicBlock *InBB = std::get<0>(Incoming);
596 Value *InVal = std::get<1>(Incoming);
597 GetElementPtrInst *InGEP = cast<GetElementPtrInst>(InVal);
598
599 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
600 if (PHINode *OpPhi = OperandPhis[Op])
601 OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
602 }
603 }
604
605 Value *Base = FixedOperands[0];
606 GetElementPtrInst *NewGEP =
608 ArrayRef(FixedOperands).slice(1));
609 if (AllInBounds) NewGEP->setIsInBounds();
610 PHIArgMergedDebugLoc(NewGEP, PN);
611 return NewGEP;
612}
613
614/// Return true if we know that it is safe to sink the load out of the block
615/// that defines it. This means that it must be obvious the value of the load is
616/// not changed from the point of the load to the end of the block it is in.
617///
618/// Finally, it is safe, but not profitable, to sink a load targeting a
619/// non-address-taken alloca. Doing so will cause us to not promote the alloca
620/// to a register.
622 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
623
624 for (++BBI; BBI != E; ++BBI)
625 if (BBI->mayWriteToMemory()) {
626 // Calls that only access inaccessible memory do not block sinking the
627 // load.
628 if (auto *CB = dyn_cast<CallBase>(BBI))
629 if (CB->onlyAccessesInaccessibleMemory())
630 continue;
631 return false;
632 }
633
634 // Check for non-address taken alloca. If not address-taken already, it isn't
635 // profitable to do this xform.
636 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
637 bool IsAddressTaken = false;
638 for (User *U : AI->users()) {
639 if (isa<LoadInst>(U)) continue;
640 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
641 // If storing TO the alloca, then the address isn't taken.
642 if (SI->getOperand(1) == AI) continue;
643 }
644 IsAddressTaken = true;
645 break;
646 }
647
648 if (!IsAddressTaken && AI->isStaticAlloca())
649 return false;
650 }
651
652 // If this load is a load from a GEP with a constant offset from an alloca,
653 // then we don't want to sink it. In its present form, it will be
654 // load [constant stack offset]. Sinking it will cause us to have to
655 // materialize the stack addresses in each predecessor in a register only to
656 // do a shared load from register in the successor.
657 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
658 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
659 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
660 return false;
661
662 return true;
663}
664
666 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
667
668 // Can't forward swifterror through a phi.
669 if (FirstLI->getOperand(0)->isSwiftError())
670 return nullptr;
671
672 // FIXME: This is overconservative; this transform is allowed in some cases
673 // for atomic operations.
674 if (FirstLI->isAtomic())
675 return nullptr;
676
677 // When processing loads, we need to propagate two bits of information to the
678 // sunk load: whether it is volatile, and what its alignment is.
679 bool IsVolatile = FirstLI->isVolatile();
680 Align LoadAlignment = FirstLI->getAlign();
681 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
682
683 // We can't sink the load if the loaded value could be modified between the
684 // load and the PHI.
685 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
687 return nullptr;
688
689 // If the PHI is of volatile loads and the load block has multiple
690 // successors, sinking it would remove a load of the volatile value from
691 // the path through the other successor.
692 if (IsVolatile &&
693 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
694 return nullptr;
695
696 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
697 BasicBlock *InBB = std::get<0>(Incoming);
698 Value *InVal = std::get<1>(Incoming);
699 LoadInst *LI = dyn_cast<LoadInst>(InVal);
700 if (!LI || !LI->hasOneUser() || LI->isAtomic())
701 return nullptr;
702
703 // Make sure all arguments are the same type of operation.
704 if (LI->isVolatile() != IsVolatile ||
705 LI->getPointerAddressSpace() != LoadAddrSpace)
706 return nullptr;
707
708 // Can't forward swifterror through a phi.
709 if (LI->getOperand(0)->isSwiftError())
710 return nullptr;
711
712 // We can't sink the load if the loaded value could be modified between
713 // the load and the PHI.
714 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
715 return nullptr;
716
717 LoadAlignment = std::min(LoadAlignment, LI->getAlign());
718
719 // If the PHI is of volatile loads and the load block has multiple
720 // successors, sinking it would remove a load of the volatile value from
721 // the path through the other successor.
722 if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
723 return nullptr;
724 }
725
726 // Okay, they are all the same operation. Create a new PHI node of the
727 // correct type, and PHI together all of the LHS's of the instructions.
728 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
730 PN.getName()+".in");
731
732 Value *InVal = FirstLI->getOperand(0);
733 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
734 LoadInst *NewLI =
735 new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
736
737 unsigned KnownIDs[] = {
738 LLVMContext::MD_tbaa,
739 LLVMContext::MD_range,
740 LLVMContext::MD_invariant_load,
741 LLVMContext::MD_alias_scope,
742 LLVMContext::MD_noalias,
743 LLVMContext::MD_nonnull,
744 LLVMContext::MD_align,
745 LLVMContext::MD_dereferenceable,
746 LLVMContext::MD_dereferenceable_or_null,
747 LLVMContext::MD_access_group,
748 LLVMContext::MD_noundef,
749 };
750
751 for (unsigned ID : KnownIDs)
752 NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
753
754 // Add all operands to the new PHI and combine TBAA metadata.
755 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
756 BasicBlock *BB = std::get<0>(Incoming);
757 Value *V = std::get<1>(Incoming);
758 LoadInst *LI = cast<LoadInst>(V);
759 combineMetadata(NewLI, LI, KnownIDs, true);
760 Value *NewInVal = LI->getOperand(0);
761 if (NewInVal != InVal)
762 InVal = nullptr;
763 NewPN->addIncoming(NewInVal, BB);
764 }
765
766 if (InVal) {
767 // The new PHI unions all of the same values together. This is really
768 // common, so we handle it intelligently here for compile-time speed.
769 NewLI->setOperand(0, InVal);
770 delete NewPN;
771 } else {
772 InsertNewInstBefore(NewPN, PN.getIterator());
773 }
774
775 // If this was a volatile load that we are merging, make sure to loop through
776 // and mark all the input loads as non-volatile. If we don't do this, we will
777 // insert a new volatile load and the old ones will not be deletable.
778 if (IsVolatile)
779 for (Value *IncValue : PN.incoming_values())
780 cast<LoadInst>(IncValue)->setVolatile(false);
781
782 PHIArgMergedDebugLoc(NewLI, PN);
783 return NewLI;
784}
785
786/// TODO: This function could handle other cast types, but then it might
787/// require special-casing a cast from the 'i1' type. See the comment in
788/// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
790 // We cannot create a new instruction after the PHI if the terminator is an
791 // EHPad because there is no valid insertion point.
792 if (Instruction *TI = Phi.getParent()->getTerminator())
793 if (TI->isEHPad())
794 return nullptr;
795
796 // Early exit for the common case of a phi with two operands. These are
797 // handled elsewhere. See the comment below where we check the count of zexts
798 // and constants for more details.
799 unsigned NumIncomingValues = Phi.getNumIncomingValues();
800 if (NumIncomingValues < 3)
801 return nullptr;
802
803 // Find the narrower type specified by the first zext.
804 Type *NarrowType = nullptr;
805 for (Value *V : Phi.incoming_values()) {
806 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
807 NarrowType = Zext->getSrcTy();
808 break;
809 }
810 }
811 if (!NarrowType)
812 return nullptr;
813
814 // Walk the phi operands checking that we only have zexts or constants that
815 // we can shrink for free. Store the new operands for the new phi.
816 SmallVector<Value *, 4> NewIncoming;
817 unsigned NumZexts = 0;
818 unsigned NumConsts = 0;
819 for (Value *V : Phi.incoming_values()) {
820 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
821 // All zexts must be identical and have one user.
822 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
823 return nullptr;
824 NewIncoming.push_back(Zext->getOperand(0));
825 NumZexts++;
826 } else if (auto *C = dyn_cast<Constant>(V)) {
827 // Make sure that constants can fit in the new type.
828 Constant *Trunc = getLosslessUnsignedTrunc(C, NarrowType);
829 if (!Trunc)
830 return nullptr;
831 NewIncoming.push_back(Trunc);
832 NumConsts++;
833 } else {
834 // If it's not a cast or a constant, bail out.
835 return nullptr;
836 }
837 }
838
839 // The more common cases of a phi with no constant operands or just one
840 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
841 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
842 // performed here. It tries to replicate a cast in the phi operand's basic
843 // block to expose other folding opportunities. Thus, InstCombine will
844 // infinite loop without this check.
845 if (NumConsts == 0 || NumZexts < 2)
846 return nullptr;
847
848 // All incoming values are zexts or constants that are safe to truncate.
849 // Create a new phi node of the narrow type, phi together all of the new
850 // operands, and zext the result back to the original type.
851 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
852 Phi.getName() + ".shrunk");
853 for (unsigned I = 0; I != NumIncomingValues; ++I)
854 NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
855
856 InsertNewInstBefore(NewPhi, Phi.getIterator());
857 return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
858}
859
860/// If all operands to a PHI node are the same "unary" operator and they all are
861/// only used by the PHI, PHI together their inputs, and do the operation once,
862/// to the result of the PHI.
864 // We cannot create a new instruction after the PHI if the terminator is an
865 // EHPad because there is no valid insertion point.
866 if (Instruction *TI = PN.getParent()->getTerminator())
867 if (TI->isEHPad())
868 return nullptr;
869
870 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
871
872 if (isa<GetElementPtrInst>(FirstInst))
873 return foldPHIArgGEPIntoPHI(PN);
874 if (isa<LoadInst>(FirstInst))
875 return foldPHIArgLoadIntoPHI(PN);
876 if (isa<InsertValueInst>(FirstInst))
878 if (isa<ExtractValueInst>(FirstInst))
880
881 // Scan the instruction, looking for input operations that can be folded away.
882 // If all input operands to the phi are the same instruction (e.g. a cast from
883 // the same type or "+42") we can pull the operation through the PHI, reducing
884 // code size and simplifying code.
885 Constant *ConstantOp = nullptr;
886 Type *CastSrcTy = nullptr;
887
888 if (isa<CastInst>(FirstInst)) {
889 CastSrcTy = FirstInst->getOperand(0)->getType();
890
891 // Be careful about transforming integer PHIs. We don't want to pessimize
892 // the code by turning an i32 into an i1293.
893 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
894 if (!shouldChangeType(PN.getType(), CastSrcTy))
895 return nullptr;
896 }
897 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
898 // Can fold binop, compare or shift here if the RHS is a constant,
899 // otherwise call FoldPHIArgBinOpIntoPHI.
900 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
901 if (!ConstantOp)
902 return foldPHIArgBinOpIntoPHI(PN);
903 } else {
904 return nullptr; // Cannot fold this operation.
905 }
906
907 // Check to see if all arguments are the same operation.
908 for (Value *V : drop_begin(PN.incoming_values())) {
909 Instruction *I = dyn_cast<Instruction>(V);
910 if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
911 return nullptr;
912 if (CastSrcTy) {
913 if (I->getOperand(0)->getType() != CastSrcTy)
914 return nullptr; // Cast operation must match.
915 } else if (I->getOperand(1) != ConstantOp) {
916 return nullptr;
917 }
918 }
919
920 // Okay, they are all the same operation. Create a new PHI node of the
921 // correct type, and PHI together all of the LHS's of the instructions.
922 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
924 PN.getName()+".in");
925
926 Value *InVal = FirstInst->getOperand(0);
927 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
928
929 // Add all operands to the new PHI.
930 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
931 BasicBlock *BB = std::get<0>(Incoming);
932 Value *V = std::get<1>(Incoming);
933 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
934 if (NewInVal != InVal)
935 InVal = nullptr;
936 NewPN->addIncoming(NewInVal, BB);
937 }
938
939 Value *PhiVal;
940 if (InVal) {
941 // The new PHI unions all of the same values together. This is really
942 // common, so we handle it intelligently here for compile-time speed.
943 PhiVal = InVal;
944 delete NewPN;
945 } else {
946 InsertNewInstBefore(NewPN, PN.getIterator());
947 PhiVal = NewPN;
948 }
949
950 // Insert and return the new operation.
951 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
952 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
953 PN.getType());
954 PHIArgMergedDebugLoc(NewCI, PN);
955 return NewCI;
956 }
957
958 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
959 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
960 BinOp->copyIRFlags(PN.getIncomingValue(0));
961
962 for (Value *V : drop_begin(PN.incoming_values()))
963 BinOp->andIRFlags(V);
964
965 PHIArgMergedDebugLoc(BinOp, PN);
966 return BinOp;
967 }
968
969 CmpInst *CIOp = cast<CmpInst>(FirstInst);
970 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
971 PhiVal, ConstantOp);
972 PHIArgMergedDebugLoc(NewCI, PN);
973 return NewCI;
974}
975
976/// Return true if this PHI node is only used by a PHI node cycle that is dead.
977static bool isDeadPHICycle(PHINode *PN,
978 SmallPtrSetImpl<PHINode *> &PotentiallyDeadPHIs) {
979 if (PN->use_empty()) return true;
980 if (!PN->hasOneUse()) return false;
981
982 // Remember this node, and if we find the cycle, return.
983 if (!PotentiallyDeadPHIs.insert(PN).second)
984 return true;
985
986 // Don't scan crazily complex things.
987 if (PotentiallyDeadPHIs.size() == 16)
988 return false;
989
990 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
991 return isDeadPHICycle(PU, PotentiallyDeadPHIs);
992
993 return false;
994}
995
996/// Return true if this phi node is always equal to NonPhiInVal.
997/// This happens with mutually cyclic phi nodes like:
998/// z = some value; x = phi (y, z); y = phi (x, z)
999static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
1000 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
1001 // See if we already saw this PHI node.
1002 if (!ValueEqualPHIs.insert(PN).second)
1003 return true;
1004
1005 // Don't scan crazily complex things.
1006 if (ValueEqualPHIs.size() == 16)
1007 return false;
1008
1009 // Scan the operands to see if they are either phi nodes or are equal to
1010 // the value.
1011 for (Value *Op : PN->incoming_values()) {
1012 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
1013 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) {
1014 if (NonPhiInVal)
1015 return false;
1016 NonPhiInVal = OpPN;
1017 }
1018 } else if (Op != NonPhiInVal)
1019 return false;
1020 }
1021
1022 return true;
1023}
1024
1025/// Return an existing non-zero constant if this phi node has one, otherwise
1026/// return constant 1.
1028 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1029 for (Value *V : PN.operands())
1030 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1031 if (!ConstVA->isZero())
1032 return ConstVA;
1033 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1034}
1035
1036namespace {
1037struct PHIUsageRecord {
1038 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1039 unsigned Shift; // The amount shifted.
1040 Instruction *Inst; // The trunc instruction.
1041
1042 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1043 : PHIId(Pn), Shift(Sh), Inst(User) {}
1044
1045 bool operator<(const PHIUsageRecord &RHS) const {
1046 if (PHIId < RHS.PHIId) return true;
1047 if (PHIId > RHS.PHIId) return false;
1048 if (Shift < RHS.Shift) return true;
1049 if (Shift > RHS.Shift) return false;
1050 return Inst->getType()->getPrimitiveSizeInBits() <
1052 }
1053};
1054
1055struct LoweredPHIRecord {
1056 PHINode *PN; // The PHI that was lowered.
1057 unsigned Shift; // The amount shifted.
1058 unsigned Width; // The width extracted.
1059
1060 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1061 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1062
1063 // Ctor form used by DenseMap.
1064 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1065};
1066} // namespace
1067
1068namespace llvm {
1069 template<>
1070 struct DenseMapInfo<LoweredPHIRecord> {
1071 static inline LoweredPHIRecord getEmptyKey() {
1072 return LoweredPHIRecord(nullptr, 0);
1073 }
1074 static inline LoweredPHIRecord getTombstoneKey() {
1075 return LoweredPHIRecord(nullptr, 1);
1076 }
1077 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1078 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1079 (Val.Width>>3);
1080 }
1081 static bool isEqual(const LoweredPHIRecord &LHS,
1082 const LoweredPHIRecord &RHS) {
1083 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1084 LHS.Width == RHS.Width;
1085 }
1086 };
1087} // namespace llvm
1088
1089
1090/// This is an integer PHI and we know that it has an illegal type: see if it is
1091/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1092/// the various pieces being extracted. This sort of thing is introduced when
1093/// SROA promotes an aggregate to large integer values.
1094///
1095/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1096/// inttoptr. We should produce new PHIs in the right type.
1097///
1099 // PHIUsers - Keep track of all of the truncated values extracted from a set
1100 // of PHIs, along with their offset. These are the things we want to rewrite.
1102
1103 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1104 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1105 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1106 // check the uses of (to ensure they are all extracts).
1107 SmallVector<PHINode*, 8> PHIsToSlice;
1108 SmallPtrSet<PHINode*, 8> PHIsInspected;
1109
1110 PHIsToSlice.push_back(&FirstPhi);
1111 PHIsInspected.insert(&FirstPhi);
1112
1113 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1114 PHINode *PN = PHIsToSlice[PHIId];
1115
1116 // Scan the input list of the PHI. If any input is an invoke, and if the
1117 // input is defined in the predecessor, then we won't be split the critical
1118 // edge which is required to insert a truncate. Because of this, we have to
1119 // bail out.
1120 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1121 BasicBlock *BB = std::get<0>(Incoming);
1122 Value *V = std::get<1>(Incoming);
1123 InvokeInst *II = dyn_cast<InvokeInst>(V);
1124 if (!II)
1125 continue;
1126 if (II->getParent() != BB)
1127 continue;
1128
1129 // If we have a phi, and if it's directly in the predecessor, then we have
1130 // a critical edge where we need to put the truncate. Since we can't
1131 // split the edge in instcombine, we have to bail out.
1132 return nullptr;
1133 }
1134
1135 // If the incoming value is a PHI node before a catchswitch, we cannot
1136 // extract the value within that BB because we cannot insert any non-PHI
1137 // instructions in the BB.
1138 for (auto *Pred : PN->blocks())
1139 if (Pred->getFirstInsertionPt() == Pred->end())
1140 return nullptr;
1141
1142 for (User *U : PN->users()) {
1143 Instruction *UserI = cast<Instruction>(U);
1144
1145 // If the user is a PHI, inspect its uses recursively.
1146 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1147 if (PHIsInspected.insert(UserPN).second)
1148 PHIsToSlice.push_back(UserPN);
1149 continue;
1150 }
1151
1152 // Truncates are always ok.
1153 if (isa<TruncInst>(UserI)) {
1154 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1155 continue;
1156 }
1157
1158 // Otherwise it must be a lshr which can only be used by one trunc.
1159 if (UserI->getOpcode() != Instruction::LShr ||
1160 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1161 !isa<ConstantInt>(UserI->getOperand(1)))
1162 return nullptr;
1163
1164 // Bail on out of range shifts.
1165 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1166 if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1167 return nullptr;
1168
1169 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1170 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1171 }
1172 }
1173
1174 // If we have no users, they must be all self uses, just nuke the PHI.
1175 if (PHIUsers.empty())
1176 return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1177
1178 // If this phi node is transformable, create new PHIs for all the pieces
1179 // extracted out of it. First, sort the users by their offset and size.
1180 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1181
1182 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1183 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1184 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1185
1186 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1187 // hoisted out here to avoid construction/destruction thrashing.
1189
1190 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1191 // introduce redundant PHIs.
1193
1194 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1195 unsigned PHIId = PHIUsers[UserI].PHIId;
1196 PHINode *PN = PHIsToSlice[PHIId];
1197 unsigned Offset = PHIUsers[UserI].Shift;
1198 Type *Ty = PHIUsers[UserI].Inst->getType();
1199
1200 PHINode *EltPHI;
1201
1202 // If we've already lowered a user like this, reuse the previously lowered
1203 // value.
1204 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1205
1206 // Otherwise, Create the new PHI node for this user.
1207 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1208 PN->getName()+".off"+Twine(Offset), PN);
1209 assert(EltPHI->getType() != PN->getType() &&
1210 "Truncate didn't shrink phi?");
1211
1212 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1213 BasicBlock *Pred = std::get<0>(Incoming);
1214 Value *InVal = std::get<1>(Incoming);
1215 Value *&PredVal = PredValues[Pred];
1216
1217 // If we already have a value for this predecessor, reuse it.
1218 if (PredVal) {
1219 EltPHI->addIncoming(PredVal, Pred);
1220 continue;
1221 }
1222
1223 // Handle the PHI self-reuse case.
1224 if (InVal == PN) {
1225 PredVal = EltPHI;
1226 EltPHI->addIncoming(PredVal, Pred);
1227 continue;
1228 }
1229
1230 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1231 // If the incoming value was a PHI, and if it was one of the PHIs we
1232 // already rewrote it, just use the lowered value.
1233 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1234 PredVal = Res;
1235 EltPHI->addIncoming(PredVal, Pred);
1236 continue;
1237 }
1238 }
1239
1240 // Otherwise, do an extract in the predecessor.
1242 Value *Res = InVal;
1243 if (Offset)
1244 Res = Builder.CreateLShr(
1245 Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1246 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1247 PredVal = Res;
1248 EltPHI->addIncoming(Res, Pred);
1249
1250 // If the incoming value was a PHI, and if it was one of the PHIs we are
1251 // rewriting, we will ultimately delete the code we inserted. This
1252 // means we need to revisit that PHI to make sure we extract out the
1253 // needed piece.
1254 if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1255 if (PHIsInspected.count(OldInVal)) {
1256 unsigned RefPHIId =
1257 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1258 PHIUsers.push_back(
1259 PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1260 ++UserE;
1261 }
1262 }
1263 PredValues.clear();
1264
1265 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1266 << *EltPHI << '\n');
1267 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1268 }
1269
1270 // Replace the use of this piece with the PHI node.
1271 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1272 }
1273
1274 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1275 // with poison.
1276 Value *Poison = PoisonValue::get(FirstPhi.getType());
1277 for (PHINode *PHI : drop_begin(PHIsToSlice))
1278 replaceInstUsesWith(*PHI, Poison);
1279 return replaceInstUsesWith(FirstPhi, Poison);
1280}
1281
1283 const DominatorTree &DT) {
1284 // Simplify the following patterns:
1285 // if (cond)
1286 // / \
1287 // ... ...
1288 // \ /
1289 // phi [true] [false]
1290 // and
1291 // switch (cond)
1292 // case v1: / \ case v2:
1293 // ... ...
1294 // \ /
1295 // phi [v1] [v2]
1296 // Make sure all inputs are constants.
1297 if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
1298 return nullptr;
1299
1300 BasicBlock *BB = PN.getParent();
1301 // Do not bother with unreachable instructions.
1302 if (!DT.isReachableFromEntry(BB))
1303 return nullptr;
1304
1305 // Determine which value the condition of the idom has for which successor.
1307 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1308 Value *Cond;
1311 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1312 SuccForValue[C] = Succ;
1313 ++SuccCount[Succ];
1314 };
1315 if (auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1316 if (BI->isUnconditional())
1317 return nullptr;
1318
1319 Cond = BI->getCondition();
1320 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1321 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1322 } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1323 Cond = SI->getCondition();
1324 ++SuccCount[SI->getDefaultDest()];
1325 for (auto Case : SI->cases())
1326 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1327 } else {
1328 return nullptr;
1329 }
1330
1331 if (Cond->getType() != PN.getType())
1332 return nullptr;
1333
1334 // Check that edges outgoing from the idom's terminators dominate respective
1335 // inputs of the Phi.
1336 std::optional<bool> Invert;
1337 for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1338 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1339 BasicBlock *Pred = std::get<1>(Pair);
1340 auto IsCorrectInput = [&](ConstantInt *Input) {
1341 // The input needs to be dominated by the corresponding edge of the idom.
1342 // This edge cannot be a multi-edge, as that would imply that multiple
1343 // different condition values follow the same edge.
1344 auto It = SuccForValue.find(Input);
1345 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1346 DT.dominates(BasicBlockEdge(IDom, It->second),
1347 BasicBlockEdge(Pred, BB));
1348 };
1349
1350 // Depending on the constant, the condition may need to be inverted.
1351 bool NeedsInvert;
1352 if (IsCorrectInput(Input))
1353 NeedsInvert = false;
1354 else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1355 NeedsInvert = true;
1356 else
1357 return nullptr;
1358
1359 // Make sure the inversion requirement is always the same.
1360 if (Invert && *Invert != NeedsInvert)
1361 return nullptr;
1362
1363 Invert = NeedsInvert;
1364 }
1365
1366 if (!*Invert)
1367 return Cond;
1368
1369 // This Phi is actually opposite to branching condition of IDom. We invert
1370 // the condition that will potentially open up some opportunities for
1371 // sinking.
1372 auto InsertPt = BB->getFirstInsertionPt();
1373 if (InsertPt != BB->end()) {
1374 Self.Builder.SetInsertPoint(&*BB, InsertPt);
1375 return Self.Builder.CreateNot(Cond);
1376 }
1377
1378 return nullptr;
1379}
1380
1381// Fold iv = phi(start, iv.next = iv2.next op start)
1382// where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1383// and iv2.start op start = start
1384// to iv = iv2 op start
1386 BasicBlock *BB = PN.getParent();
1387 if (PN.getNumIncomingValues() != 2)
1388 return nullptr;
1389
1390 Value *Start;
1391 Instruction *IvNext;
1392 BinaryOperator *Iv2Next;
1393 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1394 if (match(V2, m_c_BinOp(m_Specific(V1), m_BinOp(Iv2Next))) ||
1395 match(V2, m_GEP(m_Specific(V1), m_BinOp(Iv2Next)))) {
1396 Start = V1;
1397 IvNext = cast<Instruction>(V2);
1398 return true;
1399 }
1400 return false;
1401 };
1402
1403 if (!MatchOuterIV(PN.getIncomingValue(0), PN.getIncomingValue(1)) &&
1404 !MatchOuterIV(PN.getIncomingValue(1), PN.getIncomingValue(0)))
1405 return nullptr;
1406
1407 PHINode *Iv2;
1408 Value *Iv2Start, *Iv2Step;
1409 if (!matchSimpleRecurrence(Iv2Next, Iv2, Iv2Start, Iv2Step) ||
1410 Iv2->getParent() != BB)
1411 return nullptr;
1412
1413 auto *BO = dyn_cast<BinaryOperator>(IvNext);
1414 Constant *Identity =
1415 BO ? ConstantExpr::getBinOpIdentity(BO->getOpcode(), Iv2Start->getType())
1416 : Constant::getNullValue(Iv2Start->getType());
1417 if (Iv2Start != Identity)
1418 return nullptr;
1419
1420 Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
1421 if (!BO) {
1422 auto *GEP = cast<GEPOperator>(IvNext);
1423 return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
1424 cast<GEPOperator>(IvNext)->isInBounds());
1425 }
1426
1427 assert(BO->isCommutative() && "Must be commutative");
1428 Value *Res = Builder.CreateBinOp(BO->getOpcode(), Iv2, Start);
1429 cast<Instruction>(Res)->copyIRFlags(BO);
1430 return Res;
1431}
1432
1433// PHINode simplification
1434//
1436 if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1437 return replaceInstUsesWith(PN, V);
1438
1439 if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1440 return Result;
1441
1442 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1443 return Result;
1444
1445 // If all PHI operands are the same operation, pull them through the PHI,
1446 // reducing code size.
1447 auto *Inst0 = dyn_cast<Instruction>(PN.getIncomingValue(0));
1448 auto *Inst1 = dyn_cast<Instruction>(PN.getIncomingValue(1));
1449 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1450 Inst0->hasOneUser())
1451 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1452 return Result;
1453
1454 // If the incoming values are pointer casts of the same original value,
1455 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1456 if (PN.getType()->isPointerTy() &&
1457 PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1458 Value *IV0 = PN.getIncomingValue(0);
1459 Value *IV0Stripped = IV0->stripPointerCasts();
1460 // Set to keep track of values known to be equal to IV0Stripped after
1461 // stripping pointer casts.
1462 SmallPtrSet<Value *, 4> CheckedIVs;
1463 CheckedIVs.insert(IV0);
1464 if (IV0 != IV0Stripped &&
1465 all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1466 return !CheckedIVs.insert(IV).second ||
1467 IV0Stripped == IV->stripPointerCasts();
1468 })) {
1469 return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1470 }
1471 }
1472
1473 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1474 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1475 // PHI)... break the cycle.
1476 if (PN.hasOneUse()) {
1477 if (foldIntegerTypedPHI(PN))
1478 return nullptr;
1479
1480 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1481 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1482 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1483 PotentiallyDeadPHIs.insert(&PN);
1484 if (isDeadPHICycle(PU, PotentiallyDeadPHIs))
1486 }
1487
1488 // If this phi has a single use, and if that use just computes a value for
1489 // the next iteration of a loop, delete the phi. This occurs with unused
1490 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1491 // common case here is good because the only other things that catch this
1492 // are induction variable analysis (sometimes) and ADCE, which is only run
1493 // late.
1494 if (PHIUser->hasOneUse() &&
1495 (isa<BinaryOperator>(PHIUser) || isa<UnaryOperator>(PHIUser) ||
1496 isa<GetElementPtrInst>(PHIUser)) &&
1497 PHIUser->user_back() == &PN) {
1499 }
1500 }
1501
1502 // When a PHI is used only to be compared with zero, it is safe to replace
1503 // an incoming value proved as known nonzero with any non-zero constant.
1504 // For example, in the code below, the incoming value %v can be replaced
1505 // with any non-zero constant based on the fact that the PHI is only used to
1506 // be compared with zero and %v is a known non-zero value:
1507 // %v = select %cond, 1, 2
1508 // %p = phi [%v, BB] ...
1509 // icmp eq, %p, 0
1510 // FIXME: To be simple, handle only integer type for now.
1511 // This handles a small number of uses to keep the complexity down, and an
1512 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1513 // "or" will only add bits.
1514 if (!PN.hasNUsesOrMore(3)) {
1515 SmallVector<Instruction *> DropPoisonFlags;
1516 bool AllUsesOfPhiEndsInCmp = all_of(PN.users(), [&](User *U) {
1517 auto *CmpInst = dyn_cast<ICmpInst>(U);
1518 if (!CmpInst) {
1519 // This is always correct as OR only add bits and we are checking
1520 // against 0.
1521 if (U->hasOneUse() && match(U, m_c_Or(m_Specific(&PN), m_Value()))) {
1522 DropPoisonFlags.push_back(cast<Instruction>(U));
1523 CmpInst = dyn_cast<ICmpInst>(U->user_back());
1524 }
1525 }
1526 if (!CmpInst || !isa<IntegerType>(PN.getType()) ||
1527 !CmpInst->isEquality() || !match(CmpInst->getOperand(1), m_Zero())) {
1528 return false;
1529 }
1530 return true;
1531 });
1532 // All uses of PHI results in a compare with zero.
1533 if (AllUsesOfPhiEndsInCmp) {
1534 ConstantInt *NonZeroConst = nullptr;
1535 bool MadeChange = false;
1536 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1538 Value *VA = PN.getIncomingValue(I);
1539 if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
1540 if (!NonZeroConst)
1541 NonZeroConst = getAnyNonZeroConstInt(PN);
1542 if (NonZeroConst != VA) {
1543 replaceOperand(PN, I, NonZeroConst);
1544 // The "disjoint" flag may no longer hold after the transform.
1545 for (Instruction *I : DropPoisonFlags)
1546 I->dropPoisonGeneratingFlags();
1547 MadeChange = true;
1548 }
1549 }
1550 }
1551 if (MadeChange)
1552 return &PN;
1553 }
1554 }
1555
1556 // We sometimes end up with phi cycles that non-obviously end up being the
1557 // same value, for example:
1558 // z = some value; x = phi (y, z); y = phi (x, z)
1559 // where the phi nodes don't necessarily need to be in the same block. Do a
1560 // quick check to see if the PHI node only contains a single non-phi value, if
1561 // so, scan to see if the phi cycle is actually equal to that value. If the
1562 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1563 // one of the phis itself does not have a single input.
1564 {
1565 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1566 // Scan for the first non-phi operand.
1567 while (InValNo != NumIncomingVals &&
1568 isa<PHINode>(PN.getIncomingValue(InValNo)))
1569 ++InValNo;
1570
1571 Value *NonPhiInVal =
1572 InValNo != NumIncomingVals ? PN.getIncomingValue(InValNo) : nullptr;
1573
1574 // Scan the rest of the operands to see if there are any conflicts, if so
1575 // there is no need to recursively scan other phis.
1576 if (NonPhiInVal)
1577 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1578 Value *OpVal = PN.getIncomingValue(InValNo);
1579 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1580 break;
1581 }
1582
1583 // If we scanned over all operands, then we have one unique value plus
1584 // phi values. Scan PHI nodes to see if they all merge in each other or
1585 // the value.
1586 if (InValNo == NumIncomingVals) {
1587 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1588 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1589 return replaceInstUsesWith(PN, NonPhiInVal);
1590 }
1591 }
1592
1593 // If there are multiple PHIs, sort their operands so that they all list
1594 // the blocks in the same order. This will help identical PHIs be eliminated
1595 // by other passes. Other passes shouldn't depend on this for correctness
1596 // however.
1597 auto Res = PredOrder.try_emplace(PN.getParent());
1598 if (!Res.second) {
1599 const auto &Preds = Res.first->second;
1600 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1601 BasicBlock *BBA = PN.getIncomingBlock(I);
1602 BasicBlock *BBB = Preds[I];
1603 if (BBA != BBB) {
1604 Value *VA = PN.getIncomingValue(I);
1605 unsigned J = PN.getBasicBlockIndex(BBB);
1606 Value *VB = PN.getIncomingValue(J);
1607 PN.setIncomingBlock(I, BBB);
1608 PN.setIncomingValue(I, VB);
1609 PN.setIncomingBlock(J, BBA);
1610 PN.setIncomingValue(J, VA);
1611 // NOTE: Instcombine normally would want us to "return &PN" if we
1612 // modified any of the operands of an instruction. However, since we
1613 // aren't adding or removing uses (just rearranging them) we don't do
1614 // this in this case.
1615 }
1616 }
1617 } else {
1618 // Remember the block order of the first encountered phi node.
1619 append_range(Res.first->second, PN.blocks());
1620 }
1621
1622 // Is there an identical PHI node in this basic block?
1623 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1624 // Ignore the PHI node itself.
1625 if (&IdenticalPN == &PN)
1626 continue;
1627 // Note that even though we've just canonicalized this PHI, due to the
1628 // worklist visitation order, there are no guarantess that *every* PHI
1629 // has been canonicalized, so we can't just compare operands ranges.
1630 if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1631 continue;
1632 // Just use that PHI instead then.
1633 ++NumPHICSEs;
1634 return replaceInstUsesWith(PN, &IdenticalPN);
1635 }
1636
1637 // If this is an integer PHI and we know that it has an illegal type, see if
1638 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1639 // PHI into the various pieces being extracted. This sort of thing is
1640 // introduced when SROA promotes an aggregate to a single large integer type.
1641 if (PN.getType()->isIntegerTy() &&
1642 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1643 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1644 return Res;
1645
1646 // Ultimately, try to replace this Phi with a dominating condition.
1647 if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1648 return replaceInstUsesWith(PN, V);
1649
1650 if (Value *Res = foldDependentIVs(PN, Builder))
1651 return replaceInstUsesWith(PN, Res);
1652
1653 return nullptr;
1654}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Hexagon Common GEP
This file provides internal interfaces used to implement the InstCombine.
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
static Value * foldDependentIVs(PHINode &PN, IRBuilderBase &Builder)
static bool isDeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
This file provides the interface for the instcombine pass implementation.
#define I(x, y, z)
Definition: MD5.cpp:58
LLVMContext & Context
if(VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
an instruction to allocate memory on the stack
Definition: Instructions.h:59
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:442
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:498
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:389
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:164
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
Definition: InstrTypes.h:491
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:579
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:955
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1066
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:1061
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2531
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2598
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:849
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:856
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:410
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr, BasicBlock::iterator InsertBefore)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Type * getSourceElementType() const
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2006
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1431
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1748
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1660
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1865
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitPHINode(PHINode &PN)
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
The core instruction combiner logic.
Definition: InstCombiner.h:47
SimplifyQuery SQ
Definition: InstCombiner.h:76
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:365
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:385
const DataLayout & DL
Definition: InstCombiner.h:75
AssumptionCache & AC
Definition: InstCombiner.h:72
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:409
DominatorTree & DT
Definition: InstCombiner.h:74
BuilderTy & Builder
Definition: InstCombiner.h:60
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:452
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Definition: Instruction.h:150
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:147
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:357
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1633
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:250
void applyMergedLocation(DILocation *LocA, DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:929
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:449
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:286
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
op_range operands()
Definition: User.h:242
op_iterator op_begin()
Definition: User.h:234
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition: Value.cpp:153
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:693
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:1095
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
self_iterator getIterator()
Definition: ilist_node.h:109
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:780
auto m_GEP(const OperandTypes &...Ops)
Matches GetElementPtrInst.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:545
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:862
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2053
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3199
DWARFExpression::Operation Op
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1616
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static unsigned getHashValue(const LoweredPHIRecord &Val)
static LoweredPHIRecord getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:50
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SimplifyQuery getWithInstruction(const Instruction *I) const
Definition: SimplifyQuery.h:96