LLVM 20.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1//===- InstCombinePHI.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitPHINode function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
23#include <optional>
24
25using namespace llvm;
26using namespace llvm::PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
31MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40/// The PHI arguments will be folded into a single operation with a PHI node
41/// as input. The debug location of the single operation will be the merged
42/// locations of the original PHI node arguments.
44 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(PN.incoming_values())) {
51 auto *I = cast<Instruction>(V);
52 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53 }
54}
55
56// Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
57// If there is an existing pointer typed PHI that produces the same value as PN,
58// replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
59// PHI node:
60//
61// Case-1:
62// bb1:
63// int_init = PtrToInt(ptr_init)
64// br label %bb2
65// bb2:
66// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
67// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
68// ptr_val2 = IntToPtr(int_val)
69// ...
70// use(ptr_val2)
71// ptr_val_inc = ...
72// inc_val_inc = PtrToInt(ptr_val_inc)
73//
74// ==>
75// bb1:
76// br label %bb2
77// bb2:
78// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
79// ...
80// use(ptr_val)
81// ptr_val_inc = ...
82//
83// Case-2:
84// bb1:
85// int_ptr = BitCast(ptr_ptr)
86// int_init = Load(int_ptr)
87// br label %bb2
88// bb2:
89// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
90// ptr_val2 = IntToPtr(int_val)
91// ...
92// use(ptr_val2)
93// ptr_val_inc = ...
94// inc_val_inc = PtrToInt(ptr_val_inc)
95// ==>
96// bb1:
97// ptr_init = Load(ptr_ptr)
98// br label %bb2
99// bb2:
100// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
101// ...
102// use(ptr_val)
103// ptr_val_inc = ...
104// ...
105//
107 if (!PN.getType()->isIntegerTy())
108 return false;
109 if (!PN.hasOneUse())
110 return false;
111
112 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
113 if (!IntToPtr)
114 return false;
115
116 // Check if the pointer is actually used as pointer:
117 auto HasPointerUse = [](Instruction *IIP) {
118 for (User *U : IIP->users()) {
119 Value *Ptr = nullptr;
120 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
121 Ptr = LoadI->getPointerOperand();
122 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
123 Ptr = SI->getPointerOperand();
124 } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
125 Ptr = GI->getPointerOperand();
126 }
127
128 if (Ptr && Ptr == IIP)
129 return true;
130 }
131 return false;
132 };
133
134 if (!HasPointerUse(IntToPtr))
135 return false;
136
137 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
138 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
139 return false;
140
141 SmallVector<Value *, 4> AvailablePtrVals;
142 for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
143 BasicBlock *BB = std::get<0>(Incoming);
144 Value *Arg = std::get<1>(Incoming);
145
146 // Arg could be a constant, constant expr, etc., which we don't cover here.
147 if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
148 return false;
149
150 // First look backward:
151 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
152 AvailablePtrVals.emplace_back(PI->getOperand(0));
153 continue;
154 }
155
156 // Next look forward:
157 Value *ArgIntToPtr = nullptr;
158 for (User *U : Arg->users()) {
159 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
160 (DT.dominates(cast<Instruction>(U), BB) ||
161 cast<Instruction>(U)->getParent() == BB)) {
162 ArgIntToPtr = U;
163 break;
164 }
165 }
166
167 if (ArgIntToPtr) {
168 AvailablePtrVals.emplace_back(ArgIntToPtr);
169 continue;
170 }
171
172 // If Arg is defined by a PHI, allow it. This will also create
173 // more opportunities iteratively.
174 if (isa<PHINode>(Arg)) {
175 AvailablePtrVals.emplace_back(Arg);
176 continue;
177 }
178
179 // For a single use integer load:
180 auto *LoadI = dyn_cast<LoadInst>(Arg);
181 if (!LoadI)
182 return false;
183
184 if (!LoadI->hasOneUse())
185 return false;
186
187 // Push the integer typed Load instruction into the available
188 // value set, and fix it up later when the pointer typed PHI
189 // is synthesized.
190 AvailablePtrVals.emplace_back(LoadI);
191 }
192
193 // Now search for a matching PHI
194 auto *BB = PN.getParent();
195 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
196 "Not enough available ptr typed incoming values");
197 PHINode *MatchingPtrPHI = nullptr;
198 unsigned NumPhis = 0;
199 for (PHINode &PtrPHI : BB->phis()) {
200 // FIXME: consider handling this in AggressiveInstCombine
201 if (NumPhis++ > MaxNumPhis)
202 return false;
203 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
204 continue;
205 if (any_of(zip(PN.blocks(), AvailablePtrVals),
206 [&](const auto &BlockAndValue) {
207 BasicBlock *BB = std::get<0>(BlockAndValue);
208 Value *V = std::get<1>(BlockAndValue);
209 return PtrPHI.getIncomingValueForBlock(BB) != V;
210 }))
211 continue;
212 MatchingPtrPHI = &PtrPHI;
213 break;
214 }
215
216 if (MatchingPtrPHI) {
217 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
218 "Phi's Type does not match with IntToPtr");
219 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
220 // to make sure another transform can't undo it in the meantime.
221 replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
222 eraseInstFromFunction(*IntToPtr);
224 return true;
225 }
226
227 // If it requires a conversion for every PHI operand, do not do it.
228 if (all_of(AvailablePtrVals, [&](Value *V) {
229 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
230 }))
231 return false;
232
233 // If any of the operand that requires casting is a terminator
234 // instruction, do not do it. Similarly, do not do the transform if the value
235 // is PHI in a block with no insertion point, for example, a catchswitch
236 // block, since we will not be able to insert a cast after the PHI.
237 if (any_of(AvailablePtrVals, [&](Value *V) {
238 if (V->getType() == IntToPtr->getType())
239 return false;
240 auto *Inst = dyn_cast<Instruction>(V);
241 if (!Inst)
242 return false;
243 if (Inst->isTerminator())
244 return true;
245 auto *BB = Inst->getParent();
246 if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
247 return true;
248 return false;
249 }))
250 return false;
251
252 PHINode *NewPtrPHI = PHINode::Create(
253 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
254
255 InsertNewInstBefore(NewPtrPHI, PN.getIterator());
257 for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
258 auto *IncomingBB = std::get<0>(Incoming);
259 auto *IncomingVal = std::get<1>(Incoming);
260
261 if (IncomingVal->getType() == IntToPtr->getType()) {
262 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
263 continue;
264 }
265
266#ifndef NDEBUG
267 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
268 assert((isa<PHINode>(IncomingVal) ||
269 IncomingVal->getType()->isPointerTy() ||
270 (LoadI && LoadI->hasOneUse())) &&
271 "Can not replace LoadInst with multiple uses");
272#endif
273 // Need to insert a BitCast.
274 // For an integer Load instruction with a single use, the load + IntToPtr
275 // cast will be simplified into a pointer load:
276 // %v = load i64, i64* %a.ip, align 8
277 // %v.cast = inttoptr i64 %v to float **
278 // ==>
279 // %v.ptrp = bitcast i64 * %a.ip to float **
280 // %v.cast = load float *, float ** %v.ptrp, align 8
281 Instruction *&CI = Casts[IncomingVal];
282 if (!CI) {
283 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
284 IncomingVal->getName() + ".ptr");
285 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
286 BasicBlock::iterator InsertPos(IncomingI);
287 InsertPos++;
288 BasicBlock *BB = IncomingI->getParent();
289 if (isa<PHINode>(IncomingI))
290 InsertPos = BB->getFirstInsertionPt();
291 assert(InsertPos != BB->end() && "should have checked above");
292 InsertNewInstBefore(CI, InsertPos);
293 } else {
294 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
295 InsertNewInstBefore(CI, InsertBB->getFirstInsertionPt());
296 }
297 }
298 NewPtrPHI->addIncoming(CI, IncomingBB);
299 }
300
301 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
302 // to make sure another transform can't undo it in the meantime.
303 replaceInstUsesWith(*IntToPtr, NewPtrPHI);
304 eraseInstFromFunction(*IntToPtr);
306 return true;
307}
308
309// Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
310// fold Phi-operand to bitcast.
312 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
313 // Make sure all uses of phi are ptr2int.
314 if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
315 return nullptr;
316
317 // Iterating over all operands to check presence of target pointers for
318 // optimization.
319 bool OperandWithRoundTripCast = false;
320 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
321 if (auto *NewOp =
322 simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
323 replaceOperand(PN, OpNum, NewOp);
324 OperandWithRoundTripCast = true;
325 }
326 }
327 if (!OperandWithRoundTripCast)
328 return nullptr;
329 return &PN;
330}
331
332/// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
333/// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
336 auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
337
338 // Scan to see if all operands are `insertvalue`'s with the same indices,
339 // and all have a single use.
340 for (Value *V : drop_begin(PN.incoming_values())) {
341 auto *I = dyn_cast<InsertValueInst>(V);
342 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
343 return nullptr;
344 }
345
346 // For each operand of an `insertvalue`
347 std::array<PHINode *, 2> NewOperands;
348 for (int OpIdx : {0, 1}) {
349 auto *&NewOperand = NewOperands[OpIdx];
350 // Create a new PHI node to receive the values the operand has in each
351 // incoming basic block.
352 NewOperand = PHINode::Create(
353 FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
354 FirstIVI->getOperand(OpIdx)->getName() + ".pn");
355 // And populate each operand's PHI with said values.
356 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
357 NewOperand->addIncoming(
358 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
359 std::get<0>(Incoming));
360 InsertNewInstBefore(NewOperand, PN.getIterator());
361 }
362
363 // And finally, create `insertvalue` over the newly-formed PHI nodes.
364 auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
365 FirstIVI->getIndices(), PN.getName());
366
367 PHIArgMergedDebugLoc(NewIVI, PN);
368 ++NumPHIsOfInsertValues;
369 return NewIVI;
370}
371
372/// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
373/// turn this into a phi[a,b] and a single extractvalue.
376 auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
377
378 // Scan to see if all operands are `extractvalue`'s with the same indices,
379 // and all have a single use.
380 for (Value *V : drop_begin(PN.incoming_values())) {
381 auto *I = dyn_cast<ExtractValueInst>(V);
382 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
383 I->getAggregateOperand()->getType() !=
384 FirstEVI->getAggregateOperand()->getType())
385 return nullptr;
386 }
387
388 // Create a new PHI node to receive the values the aggregate operand has
389 // in each incoming basic block.
390 auto *NewAggregateOperand = PHINode::Create(
391 FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
392 FirstEVI->getAggregateOperand()->getName() + ".pn");
393 // And populate the PHI with said values.
394 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
395 NewAggregateOperand->addIncoming(
396 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
397 std::get<0>(Incoming));
398 InsertNewInstBefore(NewAggregateOperand, PN.getIterator());
399
400 // And finally, create `extractvalue` over the newly-formed PHI nodes.
401 auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
402 FirstEVI->getIndices(), PN.getName());
403
404 PHIArgMergedDebugLoc(NewEVI, PN);
405 ++NumPHIsOfExtractValues;
406 return NewEVI;
407}
408
409/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
410/// adds all have a single user, turn this into a phi and a single binop.
412 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
413 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
414 unsigned Opc = FirstInst->getOpcode();
415 Value *LHSVal = FirstInst->getOperand(0);
416 Value *RHSVal = FirstInst->getOperand(1);
417
418 Type *LHSType = LHSVal->getType();
419 Type *RHSType = RHSVal->getType();
420
421 // Scan to see if all operands are the same opcode, and all have one user.
422 for (Value *V : drop_begin(PN.incoming_values())) {
423 Instruction *I = dyn_cast<Instruction>(V);
424 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
425 // Verify type of the LHS matches so we don't fold cmp's of different
426 // types.
427 I->getOperand(0)->getType() != LHSType ||
428 I->getOperand(1)->getType() != RHSType)
429 return nullptr;
430
431 // If they are CmpInst instructions, check their predicates
432 if (CmpInst *CI = dyn_cast<CmpInst>(I))
433 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
434 return nullptr;
435
436 // Keep track of which operand needs a phi node.
437 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
438 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
439 }
440
441 // If both LHS and RHS would need a PHI, don't do this transformation,
442 // because it would increase the number of PHIs entering the block,
443 // which leads to higher register pressure. This is especially
444 // bad when the PHIs are in the header of a loop.
445 if (!LHSVal && !RHSVal)
446 return nullptr;
447
448 // Otherwise, this is safe to transform!
449
450 Value *InLHS = FirstInst->getOperand(0);
451 Value *InRHS = FirstInst->getOperand(1);
452 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
453 if (!LHSVal) {
454 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
455 FirstInst->getOperand(0)->getName() + ".pn");
456 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
457 InsertNewInstBefore(NewLHS, PN.getIterator());
458 LHSVal = NewLHS;
459 }
460
461 if (!RHSVal) {
462 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
463 FirstInst->getOperand(1)->getName() + ".pn");
464 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
465 InsertNewInstBefore(NewRHS, PN.getIterator());
466 RHSVal = NewRHS;
467 }
468
469 // Add all operands to the new PHIs.
470 if (NewLHS || NewRHS) {
471 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
472 BasicBlock *InBB = std::get<0>(Incoming);
473 Value *InVal = std::get<1>(Incoming);
474 Instruction *InInst = cast<Instruction>(InVal);
475 if (NewLHS) {
476 Value *NewInLHS = InInst->getOperand(0);
477 NewLHS->addIncoming(NewInLHS, InBB);
478 }
479 if (NewRHS) {
480 Value *NewInRHS = InInst->getOperand(1);
481 NewRHS->addIncoming(NewInRHS, InBB);
482 }
483 }
484 }
485
486 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
487 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
488 LHSVal, RHSVal);
489 PHIArgMergedDebugLoc(NewCI, PN);
490 return NewCI;
491 }
492
493 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
494 BinaryOperator *NewBinOp =
495 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
496
497 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
498
499 for (Value *V : drop_begin(PN.incoming_values()))
500 NewBinOp->andIRFlags(V);
501
502 PHIArgMergedDebugLoc(NewBinOp, PN);
503 return NewBinOp;
504}
505
507 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
508
509 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
510 FirstInst->op_end());
511 // This is true if all GEP bases are allocas and if all indices into them are
512 // constants.
513 bool AllBasePointersAreAllocas = true;
514
515 // We don't want to replace this phi if the replacement would require
516 // more than one phi, which leads to higher register pressure. This is
517 // especially bad when the PHIs are in the header of a loop.
518 bool NeededPhi = false;
519
521
522 // Scan to see if all operands are the same opcode, and all have one user.
523 for (Value *V : drop_begin(PN.incoming_values())) {
524 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V);
525 if (!GEP || !GEP->hasOneUser() ||
526 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
527 GEP->getNumOperands() != FirstInst->getNumOperands())
528 return nullptr;
529
530 NW &= GEP->getNoWrapFlags();
531
532 // Keep track of whether or not all GEPs are of alloca pointers.
533 if (AllBasePointersAreAllocas &&
534 (!isa<AllocaInst>(GEP->getOperand(0)) ||
535 !GEP->hasAllConstantIndices()))
536 AllBasePointersAreAllocas = false;
537
538 // Compare the operand lists.
539 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
540 if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
541 continue;
542
543 // Don't merge two GEPs when two operands differ (introducing phi nodes)
544 // if one of the PHIs has a constant for the index. The index may be
545 // substantially cheaper to compute for the constants, so making it a
546 // variable index could pessimize the path. This also handles the case
547 // for struct indices, which must always be constant.
548 if (isa<ConstantInt>(FirstInst->getOperand(Op)) ||
549 isa<ConstantInt>(GEP->getOperand(Op)))
550 return nullptr;
551
552 if (FirstInst->getOperand(Op)->getType() !=
553 GEP->getOperand(Op)->getType())
554 return nullptr;
555
556 // If we already needed a PHI for an earlier operand, and another operand
557 // also requires a PHI, we'd be introducing more PHIs than we're
558 // eliminating, which increases register pressure on entry to the PHI's
559 // block.
560 if (NeededPhi)
561 return nullptr;
562
563 FixedOperands[Op] = nullptr; // Needs a PHI.
564 NeededPhi = true;
565 }
566 }
567
568 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
569 // bother doing this transformation. At best, this will just save a bit of
570 // offset calculation, but all the predecessors will have to materialize the
571 // stack address into a register anyway. We'd actually rather *clone* the
572 // load up into the predecessors so that we have a load of a gep of an alloca,
573 // which can usually all be folded into the load.
574 if (AllBasePointersAreAllocas)
575 return nullptr;
576
577 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
578 // that is variable.
579 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
580
581 bool HasAnyPHIs = false;
582 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
583 if (FixedOperands[I])
584 continue; // operand doesn't need a phi.
585 Value *FirstOp = FirstInst->getOperand(I);
586 PHINode *NewPN =
587 PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
588 InsertNewInstBefore(NewPN, PN.getIterator());
589
590 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
591 OperandPhis[I] = NewPN;
592 FixedOperands[I] = NewPN;
593 HasAnyPHIs = true;
594 }
595
596 // Add all operands to the new PHIs.
597 if (HasAnyPHIs) {
598 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
599 BasicBlock *InBB = std::get<0>(Incoming);
600 Value *InVal = std::get<1>(Incoming);
601 GetElementPtrInst *InGEP = cast<GetElementPtrInst>(InVal);
602
603 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
604 if (PHINode *OpPhi = OperandPhis[Op])
605 OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
606 }
607 }
608
609 Value *Base = FixedOperands[0];
610 GetElementPtrInst *NewGEP =
612 ArrayRef(FixedOperands).slice(1), NW);
613 PHIArgMergedDebugLoc(NewGEP, PN);
614 return NewGEP;
615}
616
617/// Return true if we know that it is safe to sink the load out of the block
618/// that defines it. This means that it must be obvious the value of the load is
619/// not changed from the point of the load to the end of the block it is in.
620///
621/// Finally, it is safe, but not profitable, to sink a load targeting a
622/// non-address-taken alloca. Doing so will cause us to not promote the alloca
623/// to a register.
625 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
626
627 for (++BBI; BBI != E; ++BBI)
628 if (BBI->mayWriteToMemory()) {
629 // Calls that only access inaccessible memory do not block sinking the
630 // load.
631 if (auto *CB = dyn_cast<CallBase>(BBI))
632 if (CB->onlyAccessesInaccessibleMemory())
633 continue;
634 return false;
635 }
636
637 // Check for non-address taken alloca. If not address-taken already, it isn't
638 // profitable to do this xform.
639 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
640 bool IsAddressTaken = false;
641 for (User *U : AI->users()) {
642 if (isa<LoadInst>(U)) continue;
643 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
644 // If storing TO the alloca, then the address isn't taken.
645 if (SI->getOperand(1) == AI) continue;
646 }
647 IsAddressTaken = true;
648 break;
649 }
650
651 if (!IsAddressTaken && AI->isStaticAlloca())
652 return false;
653 }
654
655 // If this load is a load from a GEP with a constant offset from an alloca,
656 // then we don't want to sink it. In its present form, it will be
657 // load [constant stack offset]. Sinking it will cause us to have to
658 // materialize the stack addresses in each predecessor in a register only to
659 // do a shared load from register in the successor.
660 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
661 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
662 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
663 return false;
664
665 return true;
666}
667
669 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
670
671 // Can't forward swifterror through a phi.
672 if (FirstLI->getOperand(0)->isSwiftError())
673 return nullptr;
674
675 // FIXME: This is overconservative; this transform is allowed in some cases
676 // for atomic operations.
677 if (FirstLI->isAtomic())
678 return nullptr;
679
680 // When processing loads, we need to propagate two bits of information to the
681 // sunk load: whether it is volatile, and what its alignment is.
682 bool IsVolatile = FirstLI->isVolatile();
683 Align LoadAlignment = FirstLI->getAlign();
684 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
685
686 // We can't sink the load if the loaded value could be modified between the
687 // load and the PHI.
688 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
690 return nullptr;
691
692 // If the PHI is of volatile loads and the load block has multiple
693 // successors, sinking it would remove a load of the volatile value from
694 // the path through the other successor.
695 if (IsVolatile &&
696 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
697 return nullptr;
698
699 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
700 BasicBlock *InBB = std::get<0>(Incoming);
701 Value *InVal = std::get<1>(Incoming);
702 LoadInst *LI = dyn_cast<LoadInst>(InVal);
703 if (!LI || !LI->hasOneUser() || LI->isAtomic())
704 return nullptr;
705
706 // Make sure all arguments are the same type of operation.
707 if (LI->isVolatile() != IsVolatile ||
708 LI->getPointerAddressSpace() != LoadAddrSpace)
709 return nullptr;
710
711 // Can't forward swifterror through a phi.
712 if (LI->getOperand(0)->isSwiftError())
713 return nullptr;
714
715 // We can't sink the load if the loaded value could be modified between
716 // the load and the PHI.
717 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
718 return nullptr;
719
720 LoadAlignment = std::min(LoadAlignment, LI->getAlign());
721
722 // If the PHI is of volatile loads and the load block has multiple
723 // successors, sinking it would remove a load of the volatile value from
724 // the path through the other successor.
725 if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
726 return nullptr;
727 }
728
729 // Okay, they are all the same operation. Create a new PHI node of the
730 // correct type, and PHI together all of the LHS's of the instructions.
731 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
733 PN.getName()+".in");
734
735 Value *InVal = FirstLI->getOperand(0);
736 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
737 LoadInst *NewLI =
738 new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
739
740 unsigned KnownIDs[] = {
741 LLVMContext::MD_tbaa,
742 LLVMContext::MD_range,
743 LLVMContext::MD_invariant_load,
744 LLVMContext::MD_alias_scope,
745 LLVMContext::MD_noalias,
746 LLVMContext::MD_nonnull,
747 LLVMContext::MD_align,
748 LLVMContext::MD_dereferenceable,
749 LLVMContext::MD_dereferenceable_or_null,
750 LLVMContext::MD_access_group,
751 LLVMContext::MD_noundef,
752 };
753
754 for (unsigned ID : KnownIDs)
755 NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
756
757 // Add all operands to the new PHI and combine TBAA metadata.
758 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
759 BasicBlock *BB = std::get<0>(Incoming);
760 Value *V = std::get<1>(Incoming);
761 LoadInst *LI = cast<LoadInst>(V);
762 combineMetadata(NewLI, LI, KnownIDs, true);
763 Value *NewInVal = LI->getOperand(0);
764 if (NewInVal != InVal)
765 InVal = nullptr;
766 NewPN->addIncoming(NewInVal, BB);
767 }
768
769 if (InVal) {
770 // The new PHI unions all of the same values together. This is really
771 // common, so we handle it intelligently here for compile-time speed.
772 NewLI->setOperand(0, InVal);
773 delete NewPN;
774 } else {
775 InsertNewInstBefore(NewPN, PN.getIterator());
776 }
777
778 // If this was a volatile load that we are merging, make sure to loop through
779 // and mark all the input loads as non-volatile. If we don't do this, we will
780 // insert a new volatile load and the old ones will not be deletable.
781 if (IsVolatile)
782 for (Value *IncValue : PN.incoming_values())
783 cast<LoadInst>(IncValue)->setVolatile(false);
784
785 PHIArgMergedDebugLoc(NewLI, PN);
786 return NewLI;
787}
788
789/// TODO: This function could handle other cast types, but then it might
790/// require special-casing a cast from the 'i1' type. See the comment in
791/// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
793 // We cannot create a new instruction after the PHI if the terminator is an
794 // EHPad because there is no valid insertion point.
795 if (Instruction *TI = Phi.getParent()->getTerminator())
796 if (TI->isEHPad())
797 return nullptr;
798
799 // Early exit for the common case of a phi with two operands. These are
800 // handled elsewhere. See the comment below where we check the count of zexts
801 // and constants for more details.
802 unsigned NumIncomingValues = Phi.getNumIncomingValues();
803 if (NumIncomingValues < 3)
804 return nullptr;
805
806 // Find the narrower type specified by the first zext.
807 Type *NarrowType = nullptr;
808 for (Value *V : Phi.incoming_values()) {
809 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
810 NarrowType = Zext->getSrcTy();
811 break;
812 }
813 }
814 if (!NarrowType)
815 return nullptr;
816
817 // Walk the phi operands checking that we only have zexts or constants that
818 // we can shrink for free. Store the new operands for the new phi.
819 SmallVector<Value *, 4> NewIncoming;
820 unsigned NumZexts = 0;
821 unsigned NumConsts = 0;
822 for (Value *V : Phi.incoming_values()) {
823 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
824 // All zexts must be identical and have one user.
825 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
826 return nullptr;
827 NewIncoming.push_back(Zext->getOperand(0));
828 NumZexts++;
829 } else if (auto *C = dyn_cast<Constant>(V)) {
830 // Make sure that constants can fit in the new type.
831 Constant *Trunc = getLosslessUnsignedTrunc(C, NarrowType);
832 if (!Trunc)
833 return nullptr;
834 NewIncoming.push_back(Trunc);
835 NumConsts++;
836 } else {
837 // If it's not a cast or a constant, bail out.
838 return nullptr;
839 }
840 }
841
842 // The more common cases of a phi with no constant operands or just one
843 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
844 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
845 // performed here. It tries to replicate a cast in the phi operand's basic
846 // block to expose other folding opportunities. Thus, InstCombine will
847 // infinite loop without this check.
848 if (NumConsts == 0 || NumZexts < 2)
849 return nullptr;
850
851 // All incoming values are zexts or constants that are safe to truncate.
852 // Create a new phi node of the narrow type, phi together all of the new
853 // operands, and zext the result back to the original type.
854 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
855 Phi.getName() + ".shrunk");
856 for (unsigned I = 0; I != NumIncomingValues; ++I)
857 NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
858
859 InsertNewInstBefore(NewPhi, Phi.getIterator());
860 return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
861}
862
863/// If all operands to a PHI node are the same "unary" operator and they all are
864/// only used by the PHI, PHI together their inputs, and do the operation once,
865/// to the result of the PHI.
867 // We cannot create a new instruction after the PHI if the terminator is an
868 // EHPad because there is no valid insertion point.
869 if (Instruction *TI = PN.getParent()->getTerminator())
870 if (TI->isEHPad())
871 return nullptr;
872
873 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
874
875 if (isa<GetElementPtrInst>(FirstInst))
876 return foldPHIArgGEPIntoPHI(PN);
877 if (isa<LoadInst>(FirstInst))
878 return foldPHIArgLoadIntoPHI(PN);
879 if (isa<InsertValueInst>(FirstInst))
881 if (isa<ExtractValueInst>(FirstInst))
883
884 // Scan the instruction, looking for input operations that can be folded away.
885 // If all input operands to the phi are the same instruction (e.g. a cast from
886 // the same type or "+42") we can pull the operation through the PHI, reducing
887 // code size and simplifying code.
888 Constant *ConstantOp = nullptr;
889 Type *CastSrcTy = nullptr;
890
891 if (isa<CastInst>(FirstInst)) {
892 CastSrcTy = FirstInst->getOperand(0)->getType();
893
894 // Be careful about transforming integer PHIs. We don't want to pessimize
895 // the code by turning an i32 into an i1293.
896 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
897 if (!shouldChangeType(PN.getType(), CastSrcTy))
898 return nullptr;
899 }
900 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
901 // Can fold binop, compare or shift here if the RHS is a constant,
902 // otherwise call FoldPHIArgBinOpIntoPHI.
903 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
904 if (!ConstantOp)
905 return foldPHIArgBinOpIntoPHI(PN);
906 } else {
907 return nullptr; // Cannot fold this operation.
908 }
909
910 // Check to see if all arguments are the same operation.
911 for (Value *V : drop_begin(PN.incoming_values())) {
912 Instruction *I = dyn_cast<Instruction>(V);
913 if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
914 return nullptr;
915 if (CastSrcTy) {
916 if (I->getOperand(0)->getType() != CastSrcTy)
917 return nullptr; // Cast operation must match.
918 } else if (I->getOperand(1) != ConstantOp) {
919 return nullptr;
920 }
921 }
922
923 // Okay, they are all the same operation. Create a new PHI node of the
924 // correct type, and PHI together all of the LHS's of the instructions.
925 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
927 PN.getName()+".in");
928
929 Value *InVal = FirstInst->getOperand(0);
930 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
931
932 // Add all operands to the new PHI.
933 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
934 BasicBlock *BB = std::get<0>(Incoming);
935 Value *V = std::get<1>(Incoming);
936 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
937 if (NewInVal != InVal)
938 InVal = nullptr;
939 NewPN->addIncoming(NewInVal, BB);
940 }
941
942 Value *PhiVal;
943 if (InVal) {
944 // The new PHI unions all of the same values together. This is really
945 // common, so we handle it intelligently here for compile-time speed.
946 PhiVal = InVal;
947 delete NewPN;
948 } else {
949 InsertNewInstBefore(NewPN, PN.getIterator());
950 PhiVal = NewPN;
951 }
952
953 // Insert and return the new operation.
954 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
955 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
956 PN.getType());
957 PHIArgMergedDebugLoc(NewCI, PN);
958 return NewCI;
959 }
960
961 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
962 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
963 BinOp->copyIRFlags(PN.getIncomingValue(0));
964
965 for (Value *V : drop_begin(PN.incoming_values()))
966 BinOp->andIRFlags(V);
967
968 PHIArgMergedDebugLoc(BinOp, PN);
969 return BinOp;
970 }
971
972 CmpInst *CIOp = cast<CmpInst>(FirstInst);
973 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
974 PhiVal, ConstantOp);
975 PHIArgMergedDebugLoc(NewCI, PN);
976 return NewCI;
977}
978
979/// Return true if this PHI node is only used by a PHI node cycle that is dead.
980static bool isDeadPHICycle(PHINode *PN,
981 SmallPtrSetImpl<PHINode *> &PotentiallyDeadPHIs) {
982 if (PN->use_empty()) return true;
983 if (!PN->hasOneUse()) return false;
984
985 // Remember this node, and if we find the cycle, return.
986 if (!PotentiallyDeadPHIs.insert(PN).second)
987 return true;
988
989 // Don't scan crazily complex things.
990 if (PotentiallyDeadPHIs.size() == 16)
991 return false;
992
993 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
994 return isDeadPHICycle(PU, PotentiallyDeadPHIs);
995
996 return false;
997}
998
999/// Return true if this phi node is always equal to NonPhiInVal.
1000/// This happens with mutually cyclic phi nodes like:
1001/// z = some value; x = phi (y, z); y = phi (x, z)
1002static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
1003 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
1004 // See if we already saw this PHI node.
1005 if (!ValueEqualPHIs.insert(PN).second)
1006 return true;
1007
1008 // Don't scan crazily complex things.
1009 if (ValueEqualPHIs.size() == 16)
1010 return false;
1011
1012 // Scan the operands to see if they are either phi nodes or are equal to
1013 // the value.
1014 for (Value *Op : PN->incoming_values()) {
1015 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
1016 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) {
1017 if (NonPhiInVal)
1018 return false;
1019 NonPhiInVal = OpPN;
1020 }
1021 } else if (Op != NonPhiInVal)
1022 return false;
1023 }
1024
1025 return true;
1026}
1027
1028/// Return an existing non-zero constant if this phi node has one, otherwise
1029/// return constant 1.
1031 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1032 for (Value *V : PN.operands())
1033 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1034 if (!ConstVA->isZero())
1035 return ConstVA;
1036 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1037}
1038
1039namespace {
1040struct PHIUsageRecord {
1041 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1042 unsigned Shift; // The amount shifted.
1043 Instruction *Inst; // The trunc instruction.
1044
1045 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1046 : PHIId(Pn), Shift(Sh), Inst(User) {}
1047
1048 bool operator<(const PHIUsageRecord &RHS) const {
1049 if (PHIId < RHS.PHIId) return true;
1050 if (PHIId > RHS.PHIId) return false;
1051 if (Shift < RHS.Shift) return true;
1052 if (Shift > RHS.Shift) return false;
1053 return Inst->getType()->getPrimitiveSizeInBits() <
1055 }
1056};
1057
1058struct LoweredPHIRecord {
1059 PHINode *PN; // The PHI that was lowered.
1060 unsigned Shift; // The amount shifted.
1061 unsigned Width; // The width extracted.
1062
1063 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1064 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1065
1066 // Ctor form used by DenseMap.
1067 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1068};
1069} // namespace
1070
1071namespace llvm {
1072 template<>
1073 struct DenseMapInfo<LoweredPHIRecord> {
1074 static inline LoweredPHIRecord getEmptyKey() {
1075 return LoweredPHIRecord(nullptr, 0);
1076 }
1077 static inline LoweredPHIRecord getTombstoneKey() {
1078 return LoweredPHIRecord(nullptr, 1);
1079 }
1080 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1081 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1082 (Val.Width>>3);
1083 }
1084 static bool isEqual(const LoweredPHIRecord &LHS,
1085 const LoweredPHIRecord &RHS) {
1086 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1087 LHS.Width == RHS.Width;
1088 }
1089 };
1090} // namespace llvm
1091
1092
1093/// This is an integer PHI and we know that it has an illegal type: see if it is
1094/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1095/// the various pieces being extracted. This sort of thing is introduced when
1096/// SROA promotes an aggregate to large integer values.
1097///
1098/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1099/// inttoptr. We should produce new PHIs in the right type.
1100///
1102 // PHIUsers - Keep track of all of the truncated values extracted from a set
1103 // of PHIs, along with their offset. These are the things we want to rewrite.
1105
1106 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1107 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1108 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1109 // check the uses of (to ensure they are all extracts).
1110 SmallVector<PHINode*, 8> PHIsToSlice;
1111 SmallPtrSet<PHINode*, 8> PHIsInspected;
1112
1113 PHIsToSlice.push_back(&FirstPhi);
1114 PHIsInspected.insert(&FirstPhi);
1115
1116 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1117 PHINode *PN = PHIsToSlice[PHIId];
1118
1119 // Scan the input list of the PHI. If any input is an invoke, and if the
1120 // input is defined in the predecessor, then we won't be split the critical
1121 // edge which is required to insert a truncate. Because of this, we have to
1122 // bail out.
1123 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1124 BasicBlock *BB = std::get<0>(Incoming);
1125 Value *V = std::get<1>(Incoming);
1126 InvokeInst *II = dyn_cast<InvokeInst>(V);
1127 if (!II)
1128 continue;
1129 if (II->getParent() != BB)
1130 continue;
1131
1132 // If we have a phi, and if it's directly in the predecessor, then we have
1133 // a critical edge where we need to put the truncate. Since we can't
1134 // split the edge in instcombine, we have to bail out.
1135 return nullptr;
1136 }
1137
1138 // If the incoming value is a PHI node before a catchswitch, we cannot
1139 // extract the value within that BB because we cannot insert any non-PHI
1140 // instructions in the BB.
1141 for (auto *Pred : PN->blocks())
1142 if (Pred->getFirstInsertionPt() == Pred->end())
1143 return nullptr;
1144
1145 for (User *U : PN->users()) {
1146 Instruction *UserI = cast<Instruction>(U);
1147
1148 // If the user is a PHI, inspect its uses recursively.
1149 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1150 if (PHIsInspected.insert(UserPN).second)
1151 PHIsToSlice.push_back(UserPN);
1152 continue;
1153 }
1154
1155 // Truncates are always ok.
1156 if (isa<TruncInst>(UserI)) {
1157 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1158 continue;
1159 }
1160
1161 // Otherwise it must be a lshr which can only be used by one trunc.
1162 if (UserI->getOpcode() != Instruction::LShr ||
1163 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1164 !isa<ConstantInt>(UserI->getOperand(1)))
1165 return nullptr;
1166
1167 // Bail on out of range shifts.
1168 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1169 if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1170 return nullptr;
1171
1172 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1173 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1174 }
1175 }
1176
1177 // If we have no users, they must be all self uses, just nuke the PHI.
1178 if (PHIUsers.empty())
1179 return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1180
1181 // If this phi node is transformable, create new PHIs for all the pieces
1182 // extracted out of it. First, sort the users by their offset and size.
1183 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1184
1185 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1186 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1187 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1188
1189 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1190 // hoisted out here to avoid construction/destruction thrashing.
1192
1193 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1194 // introduce redundant PHIs.
1196
1197 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1198 unsigned PHIId = PHIUsers[UserI].PHIId;
1199 PHINode *PN = PHIsToSlice[PHIId];
1200 unsigned Offset = PHIUsers[UserI].Shift;
1201 Type *Ty = PHIUsers[UserI].Inst->getType();
1202
1203 PHINode *EltPHI;
1204
1205 // If we've already lowered a user like this, reuse the previously lowered
1206 // value.
1207 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1208
1209 // Otherwise, Create the new PHI node for this user.
1210 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1211 PN->getName() + ".off" + Twine(Offset),
1212 PN->getIterator());
1213 assert(EltPHI->getType() != PN->getType() &&
1214 "Truncate didn't shrink phi?");
1215
1216 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1217 BasicBlock *Pred = std::get<0>(Incoming);
1218 Value *InVal = std::get<1>(Incoming);
1219 Value *&PredVal = PredValues[Pred];
1220
1221 // If we already have a value for this predecessor, reuse it.
1222 if (PredVal) {
1223 EltPHI->addIncoming(PredVal, Pred);
1224 continue;
1225 }
1226
1227 // Handle the PHI self-reuse case.
1228 if (InVal == PN) {
1229 PredVal = EltPHI;
1230 EltPHI->addIncoming(PredVal, Pred);
1231 continue;
1232 }
1233
1234 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1235 // If the incoming value was a PHI, and if it was one of the PHIs we
1236 // already rewrote it, just use the lowered value.
1237 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1238 PredVal = Res;
1239 EltPHI->addIncoming(PredVal, Pred);
1240 continue;
1241 }
1242 }
1243
1244 // Otherwise, do an extract in the predecessor.
1246 Value *Res = InVal;
1247 if (Offset)
1248 Res = Builder.CreateLShr(
1249 Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1250 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1251 PredVal = Res;
1252 EltPHI->addIncoming(Res, Pred);
1253
1254 // If the incoming value was a PHI, and if it was one of the PHIs we are
1255 // rewriting, we will ultimately delete the code we inserted. This
1256 // means we need to revisit that PHI to make sure we extract out the
1257 // needed piece.
1258 if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1259 if (PHIsInspected.count(OldInVal)) {
1260 unsigned RefPHIId =
1261 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1262 PHIUsers.push_back(
1263 PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1264 ++UserE;
1265 }
1266 }
1267 PredValues.clear();
1268
1269 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1270 << *EltPHI << '\n');
1271 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1272 }
1273
1274 // Replace the use of this piece with the PHI node.
1275 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1276 }
1277
1278 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1279 // with poison.
1280 Value *Poison = PoisonValue::get(FirstPhi.getType());
1281 for (PHINode *PHI : drop_begin(PHIsToSlice))
1283 return replaceInstUsesWith(FirstPhi, Poison);
1284}
1285
1287 const DominatorTree &DT) {
1288 // Simplify the following patterns:
1289 // if (cond)
1290 // / \
1291 // ... ...
1292 // \ /
1293 // phi [true] [false]
1294 // and
1295 // switch (cond)
1296 // case v1: / \ case v2:
1297 // ... ...
1298 // \ /
1299 // phi [v1] [v2]
1300 // Make sure all inputs are constants.
1301 if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
1302 return nullptr;
1303
1304 BasicBlock *BB = PN.getParent();
1305 // Do not bother with unreachable instructions.
1306 if (!DT.isReachableFromEntry(BB))
1307 return nullptr;
1308
1309 // Determine which value the condition of the idom has for which successor.
1310 LLVMContext &Context = PN.getContext();
1311 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1312 Value *Cond;
1315 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1316 SuccForValue[C] = Succ;
1317 ++SuccCount[Succ];
1318 };
1319 if (auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1320 if (BI->isUnconditional())
1321 return nullptr;
1322
1323 Cond = BI->getCondition();
1324 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1325 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1326 } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1327 Cond = SI->getCondition();
1328 ++SuccCount[SI->getDefaultDest()];
1329 for (auto Case : SI->cases())
1330 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1331 } else {
1332 return nullptr;
1333 }
1334
1335 if (Cond->getType() != PN.getType())
1336 return nullptr;
1337
1338 // Check that edges outgoing from the idom's terminators dominate respective
1339 // inputs of the Phi.
1340 std::optional<bool> Invert;
1341 for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1342 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1343 BasicBlock *Pred = std::get<1>(Pair);
1344 auto IsCorrectInput = [&](ConstantInt *Input) {
1345 // The input needs to be dominated by the corresponding edge of the idom.
1346 // This edge cannot be a multi-edge, as that would imply that multiple
1347 // different condition values follow the same edge.
1348 auto It = SuccForValue.find(Input);
1349 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1350 DT.dominates(BasicBlockEdge(IDom, It->second),
1351 BasicBlockEdge(Pred, BB));
1352 };
1353
1354 // Depending on the constant, the condition may need to be inverted.
1355 bool NeedsInvert;
1356 if (IsCorrectInput(Input))
1357 NeedsInvert = false;
1358 else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1359 NeedsInvert = true;
1360 else
1361 return nullptr;
1362
1363 // Make sure the inversion requirement is always the same.
1364 if (Invert && *Invert != NeedsInvert)
1365 return nullptr;
1366
1367 Invert = NeedsInvert;
1368 }
1369
1370 if (!*Invert)
1371 return Cond;
1372
1373 // This Phi is actually opposite to branching condition of IDom. We invert
1374 // the condition that will potentially open up some opportunities for
1375 // sinking.
1376 auto InsertPt = BB->getFirstInsertionPt();
1377 if (InsertPt != BB->end()) {
1378 Self.Builder.SetInsertPoint(&*BB, InsertPt);
1379 return Self.Builder.CreateNot(Cond);
1380 }
1381
1382 return nullptr;
1383}
1384
1385// Fold iv = phi(start, iv.next = iv2.next op start)
1386// where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1387// and iv2.start op start = start
1388// to iv = iv2 op start
1390 BasicBlock *BB = PN.getParent();
1391 if (PN.getNumIncomingValues() != 2)
1392 return nullptr;
1393
1394 Value *Start;
1395 Instruction *IvNext;
1396 BinaryOperator *Iv2Next;
1397 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1398 if (match(V2, m_c_BinOp(m_Specific(V1), m_BinOp(Iv2Next))) ||
1399 match(V2, m_GEP(m_Specific(V1), m_BinOp(Iv2Next)))) {
1400 Start = V1;
1401 IvNext = cast<Instruction>(V2);
1402 return true;
1403 }
1404 return false;
1405 };
1406
1407 if (!MatchOuterIV(PN.getIncomingValue(0), PN.getIncomingValue(1)) &&
1408 !MatchOuterIV(PN.getIncomingValue(1), PN.getIncomingValue(0)))
1409 return nullptr;
1410
1411 PHINode *Iv2;
1412 Value *Iv2Start, *Iv2Step;
1413 if (!matchSimpleRecurrence(Iv2Next, Iv2, Iv2Start, Iv2Step) ||
1414 Iv2->getParent() != BB)
1415 return nullptr;
1416
1417 auto *BO = dyn_cast<BinaryOperator>(IvNext);
1418 Constant *Identity =
1419 BO ? ConstantExpr::getBinOpIdentity(BO->getOpcode(), Iv2Start->getType())
1420 : Constant::getNullValue(Iv2Start->getType());
1421 if (Iv2Start != Identity)
1422 return nullptr;
1423
1424 Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
1425 if (!BO) {
1426 auto *GEP = cast<GEPOperator>(IvNext);
1427 return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
1428 cast<GEPOperator>(IvNext)->getNoWrapFlags());
1429 }
1430
1431 assert(BO->isCommutative() && "Must be commutative");
1432 Value *Res = Builder.CreateBinOp(BO->getOpcode(), Iv2, Start);
1433 cast<Instruction>(Res)->copyIRFlags(BO);
1434 return Res;
1435}
1436
1437// PHINode simplification
1438//
1440 if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1441 return replaceInstUsesWith(PN, V);
1442
1443 if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1444 return Result;
1445
1446 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1447 return Result;
1448
1449 // If all PHI operands are the same operation, pull them through the PHI,
1450 // reducing code size.
1451 auto *Inst0 = dyn_cast<Instruction>(PN.getIncomingValue(0));
1452 auto *Inst1 = dyn_cast<Instruction>(PN.getIncomingValue(1));
1453 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1454 Inst0->hasOneUser())
1455 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1456 return Result;
1457
1458 // If the incoming values are pointer casts of the same original value,
1459 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1460 if (PN.getType()->isPointerTy() &&
1461 PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1462 Value *IV0 = PN.getIncomingValue(0);
1463 Value *IV0Stripped = IV0->stripPointerCasts();
1464 // Set to keep track of values known to be equal to IV0Stripped after
1465 // stripping pointer casts.
1466 SmallPtrSet<Value *, 4> CheckedIVs;
1467 CheckedIVs.insert(IV0);
1468 if (IV0 != IV0Stripped &&
1469 all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1470 return !CheckedIVs.insert(IV).second ||
1471 IV0Stripped == IV->stripPointerCasts();
1472 })) {
1473 return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1474 }
1475 }
1476
1477 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1478 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1479 // PHI)... break the cycle.
1480 if (PN.hasOneUse()) {
1481 if (foldIntegerTypedPHI(PN))
1482 return nullptr;
1483
1484 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1485 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1486 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1487 PotentiallyDeadPHIs.insert(&PN);
1488 if (isDeadPHICycle(PU, PotentiallyDeadPHIs))
1490 }
1491
1492 // If this phi has a single use, and if that use just computes a value for
1493 // the next iteration of a loop, delete the phi. This occurs with unused
1494 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1495 // common case here is good because the only other things that catch this
1496 // are induction variable analysis (sometimes) and ADCE, which is only run
1497 // late.
1498 if (PHIUser->hasOneUse() &&
1499 (isa<BinaryOperator>(PHIUser) || isa<UnaryOperator>(PHIUser) ||
1500 isa<GetElementPtrInst>(PHIUser)) &&
1501 PHIUser->user_back() == &PN) {
1503 }
1504 }
1505
1506 // When a PHI is used only to be compared with zero, it is safe to replace
1507 // an incoming value proved as known nonzero with any non-zero constant.
1508 // For example, in the code below, the incoming value %v can be replaced
1509 // with any non-zero constant based on the fact that the PHI is only used to
1510 // be compared with zero and %v is a known non-zero value:
1511 // %v = select %cond, 1, 2
1512 // %p = phi [%v, BB] ...
1513 // icmp eq, %p, 0
1514 // FIXME: To be simple, handle only integer type for now.
1515 // This handles a small number of uses to keep the complexity down, and an
1516 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1517 // "or" will only add bits.
1518 if (!PN.hasNUsesOrMore(3)) {
1519 SmallVector<Instruction *> DropPoisonFlags;
1520 bool AllUsesOfPhiEndsInCmp = all_of(PN.users(), [&](User *U) {
1521 auto *CmpInst = dyn_cast<ICmpInst>(U);
1522 if (!CmpInst) {
1523 // This is always correct as OR only add bits and we are checking
1524 // against 0.
1525 if (U->hasOneUse() && match(U, m_c_Or(m_Specific(&PN), m_Value()))) {
1526 DropPoisonFlags.push_back(cast<Instruction>(U));
1527 CmpInst = dyn_cast<ICmpInst>(U->user_back());
1528 }
1529 }
1530 if (!CmpInst || !isa<IntegerType>(PN.getType()) ||
1531 !CmpInst->isEquality() || !match(CmpInst->getOperand(1), m_Zero())) {
1532 return false;
1533 }
1534 return true;
1535 });
1536 // All uses of PHI results in a compare with zero.
1537 if (AllUsesOfPhiEndsInCmp) {
1538 ConstantInt *NonZeroConst = nullptr;
1539 bool MadeChange = false;
1540 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1542 Value *VA = PN.getIncomingValue(I);
1543 if (isKnownNonZero(VA, getSimplifyQuery().getWithInstruction(CtxI))) {
1544 if (!NonZeroConst)
1545 NonZeroConst = getAnyNonZeroConstInt(PN);
1546 if (NonZeroConst != VA) {
1547 replaceOperand(PN, I, NonZeroConst);
1548 // The "disjoint" flag may no longer hold after the transform.
1549 for (Instruction *I : DropPoisonFlags)
1550 I->dropPoisonGeneratingFlags();
1551 MadeChange = true;
1552 }
1553 }
1554 }
1555 if (MadeChange)
1556 return &PN;
1557 }
1558 }
1559
1560 // We sometimes end up with phi cycles that non-obviously end up being the
1561 // same value, for example:
1562 // z = some value; x = phi (y, z); y = phi (x, z)
1563 // where the phi nodes don't necessarily need to be in the same block. Do a
1564 // quick check to see if the PHI node only contains a single non-phi value, if
1565 // so, scan to see if the phi cycle is actually equal to that value. If the
1566 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1567 // one of the phis itself does not have a single input.
1568 {
1569 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1570 // Scan for the first non-phi operand.
1571 while (InValNo != NumIncomingVals &&
1572 isa<PHINode>(PN.getIncomingValue(InValNo)))
1573 ++InValNo;
1574
1575 Value *NonPhiInVal =
1576 InValNo != NumIncomingVals ? PN.getIncomingValue(InValNo) : nullptr;
1577
1578 // Scan the rest of the operands to see if there are any conflicts, if so
1579 // there is no need to recursively scan other phis.
1580 if (NonPhiInVal)
1581 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1582 Value *OpVal = PN.getIncomingValue(InValNo);
1583 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1584 break;
1585 }
1586
1587 // If we scanned over all operands, then we have one unique value plus
1588 // phi values. Scan PHI nodes to see if they all merge in each other or
1589 // the value.
1590 if (InValNo == NumIncomingVals) {
1591 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1592 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1593 return replaceInstUsesWith(PN, NonPhiInVal);
1594 }
1595 }
1596
1597 // If there are multiple PHIs, sort their operands so that they all list
1598 // the blocks in the same order. This will help identical PHIs be eliminated
1599 // by other passes. Other passes shouldn't depend on this for correctness
1600 // however.
1601 auto Res = PredOrder.try_emplace(PN.getParent());
1602 if (!Res.second) {
1603 const auto &Preds = Res.first->second;
1604 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1605 BasicBlock *BBA = PN.getIncomingBlock(I);
1606 BasicBlock *BBB = Preds[I];
1607 if (BBA != BBB) {
1608 Value *VA = PN.getIncomingValue(I);
1609 unsigned J = PN.getBasicBlockIndex(BBB);
1610 Value *VB = PN.getIncomingValue(J);
1611 PN.setIncomingBlock(I, BBB);
1612 PN.setIncomingValue(I, VB);
1613 PN.setIncomingBlock(J, BBA);
1614 PN.setIncomingValue(J, VA);
1615 // NOTE: Instcombine normally would want us to "return &PN" if we
1616 // modified any of the operands of an instruction. However, since we
1617 // aren't adding or removing uses (just rearranging them) we don't do
1618 // this in this case.
1619 }
1620 }
1621 } else {
1622 // Remember the block order of the first encountered phi node.
1623 append_range(Res.first->second, PN.blocks());
1624 }
1625
1626 // Is there an identical PHI node in this basic block?
1627 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1628 // Ignore the PHI node itself.
1629 if (&IdenticalPN == &PN)
1630 continue;
1631 // Note that even though we've just canonicalized this PHI, due to the
1632 // worklist visitation order, there are no guarantess that *every* PHI
1633 // has been canonicalized, so we can't just compare operands ranges.
1634 if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1635 continue;
1636 // Just use that PHI instead then.
1637 ++NumPHICSEs;
1638 return replaceInstUsesWith(PN, &IdenticalPN);
1639 }
1640
1641 // If this is an integer PHI and we know that it has an illegal type, see if
1642 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1643 // PHI into the various pieces being extracted. This sort of thing is
1644 // introduced when SROA promotes an aggregate to a single large integer type.
1645 if (PN.getType()->isIntegerTy() &&
1646 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1647 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1648 return Res;
1649
1650 // Ultimately, try to replace this Phi with a dominating condition.
1651 if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1652 return replaceInstUsesWith(PN, V);
1653
1654 if (Value *Res = foldDependentIVs(PN, Builder))
1655 return replaceInstUsesWith(PN, Res);
1656
1657 return nullptr;
1658}
@ Poison
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Hexagon Common GEP
This file provides internal interfaces used to implement the InstCombine.
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
static Value * foldDependentIVs(PHINode &PN, IRBuilderBase &Builder)
static bool isDeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
This file provides the interface for the instcombine pass implementation.
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
an instruction to allocate memory on the stack
Definition: Instructions.h:61
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:842
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2605
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2665
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:857
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:367
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags all()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:938
Type * getSourceElementType() const
Definition: Instructions.h:971
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2027
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1683
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitPHINode(PHINode &PN)
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
The core instruction combiner logic.
Definition: InstCombiner.h:47
SimplifyQuery SQ
Definition: InstCombiner.h:76
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:363
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:383
const DataLayout & DL
Definition: InstCombiner.h:75
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:407
DominatorTree & DT
Definition: InstCombiner.h:74
BuilderTy & Builder
Definition: InstCombiner.h:60
const SimplifyQuery & getSimplifyQuery() const
Definition: InstCombiner.h:339
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:169
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1642
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void applyMergedLocation(DILocation *LocA, DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:932
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:259
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:203
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
size_type size() const
Definition: SmallPtrSet.h:95
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
op_range operands()
Definition: User.h:242
op_iterator op_begin()
Definition: User.h:234
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition: Value.cpp:153
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:1096
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
auto m_GEP(const OperandTypes &...Ops)
Matches GetElementPtrInst.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:853
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2098
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3239
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
DWARFExpression::Operation Op
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1607
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static unsigned getHashValue(const LoweredPHIRecord &Val)
static LoweredPHIRecord getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SimplifyQuery getWithInstruction(const Instruction *I) const