LLVM API Documentation

Reassociate.cpp
Go to the documentation of this file.
00001 //===- Reassociate.cpp - Reassociate binary expressions -------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This pass reassociates commutative expressions in an order that is designed
00011 // to promote better constant propagation, GCSE, LICM, PRE, etc.
00012 //
00013 // For example: 4 + (x + 5) -> x + (4 + 5)
00014 //
00015 // In the implementation of this algorithm, constants are assigned rank = 0,
00016 // function arguments are rank = 1, and other values are assigned ranks
00017 // corresponding to the reverse post order traversal of current function
00018 // (starting at 2), which effectively gives values in deep loops higher rank
00019 // than values not in loops.
00020 //
00021 //===----------------------------------------------------------------------===//
00022 
00023 #include "llvm/Transforms/Scalar.h"
00024 #include "llvm/ADT/DenseMap.h"
00025 #include "llvm/ADT/PostOrderIterator.h"
00026 #include "llvm/ADT/STLExtras.h"
00027 #include "llvm/ADT/SetVector.h"
00028 #include "llvm/ADT/Statistic.h"
00029 #include "llvm/IR/CFG.h"
00030 #include "llvm/IR/Constants.h"
00031 #include "llvm/IR/DerivedTypes.h"
00032 #include "llvm/IR/Function.h"
00033 #include "llvm/IR/IRBuilder.h"
00034 #include "llvm/IR/Instructions.h"
00035 #include "llvm/IR/IntrinsicInst.h"
00036 #include "llvm/IR/ValueHandle.h"
00037 #include "llvm/Pass.h"
00038 #include "llvm/Support/Debug.h"
00039 #include "llvm/Support/raw_ostream.h"
00040 #include "llvm/Transforms/Utils/Local.h"
00041 #include <algorithm>
00042 using namespace llvm;
00043 
00044 #define DEBUG_TYPE "reassociate"
00045 
00046 STATISTIC(NumChanged, "Number of insts reassociated");
00047 STATISTIC(NumAnnihil, "Number of expr tree annihilated");
00048 STATISTIC(NumFactor , "Number of multiplies factored");
00049 
00050 namespace {
00051   struct ValueEntry {
00052     unsigned Rank;
00053     Value *Op;
00054     ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {}
00055   };
00056   inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) {
00057     return LHS.Rank > RHS.Rank;   // Sort so that highest rank goes to start.
00058   }
00059 }
00060 
00061 #ifndef NDEBUG
00062 /// PrintOps - Print out the expression identified in the Ops list.
00063 ///
00064 static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
00065   Module *M = I->getParent()->getParent()->getParent();
00066   dbgs() << Instruction::getOpcodeName(I->getOpcode()) << " "
00067        << *Ops[0].Op->getType() << '\t';
00068   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
00069     dbgs() << "[ ";
00070     Ops[i].Op->printAsOperand(dbgs(), false, M);
00071     dbgs() << ", #" << Ops[i].Rank << "] ";
00072   }
00073 }
00074 #endif
00075 
00076 namespace {
00077   /// \brief Utility class representing a base and exponent pair which form one
00078   /// factor of some product.
00079   struct Factor {
00080     Value *Base;
00081     unsigned Power;
00082 
00083     Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {}
00084 
00085     /// \brief Sort factors by their Base.
00086     struct BaseSorter {
00087       bool operator()(const Factor &LHS, const Factor &RHS) {
00088         return LHS.Base < RHS.Base;
00089       }
00090     };
00091 
00092     /// \brief Compare factors for equal bases.
00093     struct BaseEqual {
00094       bool operator()(const Factor &LHS, const Factor &RHS) {
00095         return LHS.Base == RHS.Base;
00096       }
00097     };
00098 
00099     /// \brief Sort factors in descending order by their power.
00100     struct PowerDescendingSorter {
00101       bool operator()(const Factor &LHS, const Factor &RHS) {
00102         return LHS.Power > RHS.Power;
00103       }
00104     };
00105 
00106     /// \brief Compare factors for equal powers.
00107     struct PowerEqual {
00108       bool operator()(const Factor &LHS, const Factor &RHS) {
00109         return LHS.Power == RHS.Power;
00110       }
00111     };
00112   };
00113   
00114   /// Utility class representing a non-constant Xor-operand. We classify
00115   /// non-constant Xor-Operands into two categories:
00116   ///  C1) The operand is in the form "X & C", where C is a constant and C != ~0
00117   ///  C2)
00118   ///    C2.1) The operand is in the form of "X | C", where C is a non-zero
00119   ///          constant.
00120   ///    C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this
00121   ///          operand as "E | 0"
00122   class XorOpnd {
00123   public:
00124     XorOpnd(Value *V);
00125 
00126     bool isInvalid() const { return SymbolicPart == nullptr; }
00127     bool isOrExpr() const { return isOr; }
00128     Value *getValue() const { return OrigVal; }
00129     Value *getSymbolicPart() const { return SymbolicPart; }
00130     unsigned getSymbolicRank() const { return SymbolicRank; }
00131     const APInt &getConstPart() const { return ConstPart; }
00132 
00133     void Invalidate() { SymbolicPart = OrigVal = nullptr; }
00134     void setSymbolicRank(unsigned R) { SymbolicRank = R; }
00135 
00136     // Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank.
00137     // The purpose is twofold:
00138     // 1) Cluster together the operands sharing the same symbolic-value.
00139     // 2) Operand having smaller symbolic-value-rank is permuted earlier, which 
00140     //   could potentially shorten crital path, and expose more loop-invariants.
00141     //   Note that values' rank are basically defined in RPO order (FIXME). 
00142     //   So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier 
00143     //   than Y which is defined earlier than Z. Permute "x | 1", "Y & 2",
00144     //   "z" in the order of X-Y-Z is better than any other orders.
00145     struct PtrSortFunctor {
00146       bool operator()(XorOpnd * const &LHS, XorOpnd * const &RHS) {
00147         return LHS->getSymbolicRank() < RHS->getSymbolicRank();
00148       }
00149     };
00150   private:
00151     Value *OrigVal;
00152     Value *SymbolicPart;
00153     APInt ConstPart;
00154     unsigned SymbolicRank;
00155     bool isOr;
00156   };
00157 }
00158 
00159 namespace {
00160   class Reassociate : public FunctionPass {
00161     DenseMap<BasicBlock*, unsigned> RankMap;
00162     DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
00163     SetVector<AssertingVH<Instruction> > RedoInsts;
00164     bool MadeChange;
00165   public:
00166     static char ID; // Pass identification, replacement for typeid
00167     Reassociate() : FunctionPass(ID) {
00168       initializeReassociatePass(*PassRegistry::getPassRegistry());
00169     }
00170 
00171     bool runOnFunction(Function &F) override;
00172 
00173     void getAnalysisUsage(AnalysisUsage &AU) const override {
00174       AU.setPreservesCFG();
00175     }
00176   private:
00177     void BuildRankMap(Function &F);
00178     unsigned getRank(Value *V);
00179     void ReassociateExpression(BinaryOperator *I);
00180     void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops);
00181     Value *OptimizeExpression(BinaryOperator *I,
00182                               SmallVectorImpl<ValueEntry> &Ops);
00183     Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops);
00184     Value *OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops);
00185     bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd,
00186                         Value *&Res);
00187     bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2,
00188                         APInt &ConstOpnd, Value *&Res);
00189     bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
00190                                 SmallVectorImpl<Factor> &Factors);
00191     Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder,
00192                                    SmallVectorImpl<Factor> &Factors);
00193     Value *OptimizeMul(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops);
00194     Value *RemoveFactorFromExpression(Value *V, Value *Factor);
00195     void EraseInst(Instruction *I);
00196     void OptimizeInst(Instruction *I);
00197   };
00198 }
00199 
00200 XorOpnd::XorOpnd(Value *V) {
00201   assert(!isa<ConstantInt>(V) && "No ConstantInt");
00202   OrigVal = V;
00203   Instruction *I = dyn_cast<Instruction>(V);
00204   SymbolicRank = 0;
00205 
00206   if (I && (I->getOpcode() == Instruction::Or ||
00207             I->getOpcode() == Instruction::And)) {
00208     Value *V0 = I->getOperand(0);
00209     Value *V1 = I->getOperand(1);
00210     if (isa<ConstantInt>(V0))
00211       std::swap(V0, V1);
00212 
00213     if (ConstantInt *C = dyn_cast<ConstantInt>(V1)) {
00214       ConstPart = C->getValue();
00215       SymbolicPart = V0;
00216       isOr = (I->getOpcode() == Instruction::Or);
00217       return;
00218     }
00219   }
00220 
00221   // view the operand as "V | 0"
00222   SymbolicPart = V;
00223   ConstPart = APInt::getNullValue(V->getType()->getIntegerBitWidth());
00224   isOr = true;
00225 }
00226 
00227 char Reassociate::ID = 0;
00228 INITIALIZE_PASS(Reassociate, "reassociate",
00229                 "Reassociate expressions", false, false)
00230 
00231 // Public interface to the Reassociate pass
00232 FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
00233 
00234 /// isReassociableOp - Return true if V is an instruction of the specified
00235 /// opcode and if it only has one use.
00236 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
00237   if (V->hasOneUse() && isa<Instruction>(V) &&
00238       cast<Instruction>(V)->getOpcode() == Opcode)
00239     return cast<BinaryOperator>(V);
00240   return nullptr;
00241 }
00242 
00243 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1,
00244                                         unsigned Opcode2) {
00245   if (V->hasOneUse() && isa<Instruction>(V) &&
00246       (cast<Instruction>(V)->getOpcode() == Opcode1 ||
00247        cast<Instruction>(V)->getOpcode() == Opcode2))
00248     return cast<BinaryOperator>(V);
00249   return nullptr;
00250 }
00251 
00252 static bool isUnmovableInstruction(Instruction *I) {
00253   switch (I->getOpcode()) {
00254   case Instruction::PHI:
00255   case Instruction::LandingPad:
00256   case Instruction::Alloca:
00257   case Instruction::Load:
00258   case Instruction::Invoke:
00259   case Instruction::UDiv:
00260   case Instruction::SDiv:
00261   case Instruction::FDiv:
00262   case Instruction::URem:
00263   case Instruction::SRem:
00264   case Instruction::FRem:
00265     return true;
00266   case Instruction::Call:
00267     return !isa<DbgInfoIntrinsic>(I);
00268   default:
00269     return false;
00270   }
00271 }
00272 
00273 void Reassociate::BuildRankMap(Function &F) {
00274   unsigned i = 2;
00275 
00276   // Assign distinct ranks to function arguments
00277   for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
00278     ValueRankMap[&*I] = ++i;
00279 
00280   ReversePostOrderTraversal<Function*> RPOT(&F);
00281   for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
00282          E = RPOT.end(); I != E; ++I) {
00283     BasicBlock *BB = *I;
00284     unsigned BBRank = RankMap[BB] = ++i << 16;
00285 
00286     // Walk the basic block, adding precomputed ranks for any instructions that
00287     // we cannot move.  This ensures that the ranks for these instructions are
00288     // all different in the block.
00289     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
00290       if (isUnmovableInstruction(I))
00291         ValueRankMap[&*I] = ++BBRank;
00292   }
00293 }
00294 
00295 unsigned Reassociate::getRank(Value *V) {
00296   Instruction *I = dyn_cast<Instruction>(V);
00297   if (!I) {
00298     if (isa<Argument>(V)) return ValueRankMap[V];   // Function argument.
00299     return 0;  // Otherwise it's a global or constant, rank 0.
00300   }
00301 
00302   if (unsigned Rank = ValueRankMap[I])
00303     return Rank;    // Rank already known?
00304 
00305   // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
00306   // we can reassociate expressions for code motion!  Since we do not recurse
00307   // for PHI nodes, we cannot have infinite recursion here, because there
00308   // cannot be loops in the value graph that do not go through PHI nodes.
00309   unsigned Rank = 0, MaxRank = RankMap[I->getParent()];
00310   for (unsigned i = 0, e = I->getNumOperands();
00311        i != e && Rank != MaxRank; ++i)
00312     Rank = std::max(Rank, getRank(I->getOperand(i)));
00313 
00314   // If this is a not or neg instruction, do not count it for rank.  This
00315   // assures us that X and ~X will have the same rank.
00316   Type *Ty = V->getType();
00317   if ((!Ty->isIntegerTy() && !Ty->isFloatingPointTy()) ||
00318       (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I) &&
00319        !BinaryOperator::isFNeg(I)))
00320     ++Rank;
00321 
00322   //DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = "
00323   //     << Rank << "\n");
00324 
00325   return ValueRankMap[I] = Rank;
00326 }
00327 
00328 static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name,
00329                                  Instruction *InsertBefore, Value *FlagsOp) {
00330   if (S1->getType()->isIntegerTy())
00331     return BinaryOperator::CreateAdd(S1, S2, Name, InsertBefore);
00332   else {
00333     BinaryOperator *Res =
00334         BinaryOperator::CreateFAdd(S1, S2, Name, InsertBefore);
00335     Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags());
00336     return Res;
00337   }
00338 }
00339 
00340 static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
00341                                  Instruction *InsertBefore, Value *FlagsOp) {
00342   if (S1->getType()->isIntegerTy())
00343     return BinaryOperator::CreateMul(S1, S2, Name, InsertBefore);
00344   else {
00345     BinaryOperator *Res =
00346       BinaryOperator::CreateFMul(S1, S2, Name, InsertBefore);
00347     Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags());
00348     return Res;
00349   }
00350 }
00351 
00352 static BinaryOperator *CreateNeg(Value *S1, const Twine &Name,
00353                                  Instruction *InsertBefore, Value *FlagsOp) {
00354   if (S1->getType()->isIntegerTy())
00355     return BinaryOperator::CreateNeg(S1, Name, InsertBefore);
00356   else {
00357     BinaryOperator *Res = BinaryOperator::CreateFNeg(S1, Name, InsertBefore);
00358     Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags());
00359     return Res;
00360   }
00361 }
00362 
00363 /// LowerNegateToMultiply - Replace 0-X with X*-1.
00364 ///
00365 static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
00366   Type *Ty = Neg->getType();
00367   Constant *NegOne = Ty->isIntegerTy() ? ConstantInt::getAllOnesValue(Ty)
00368                                        : ConstantFP::get(Ty, -1.0);
00369 
00370   BinaryOperator *Res = CreateMul(Neg->getOperand(1), NegOne, "", Neg, Neg);
00371   Neg->setOperand(1, Constant::getNullValue(Ty)); // Drop use of op.
00372   Res->takeName(Neg);
00373   Neg->replaceAllUsesWith(Res);
00374   Res->setDebugLoc(Neg->getDebugLoc());
00375   return Res;
00376 }
00377 
00378 /// CarmichaelShift - Returns k such that lambda(2^Bitwidth) = 2^k, where lambda
00379 /// is the Carmichael function. This means that x^(2^k) === 1 mod 2^Bitwidth for
00380 /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic.
00381 /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every
00382 /// even x in Bitwidth-bit arithmetic.
00383 static unsigned CarmichaelShift(unsigned Bitwidth) {
00384   if (Bitwidth < 3)
00385     return Bitwidth - 1;
00386   return Bitwidth - 2;
00387 }
00388 
00389 /// IncorporateWeight - Add the extra weight 'RHS' to the existing weight 'LHS',
00390 /// reducing the combined weight using any special properties of the operation.
00391 /// The existing weight LHS represents the computation X op X op ... op X where
00392 /// X occurs LHS times.  The combined weight represents  X op X op ... op X with
00393 /// X occurring LHS + RHS times.  If op is "Xor" for example then the combined
00394 /// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even;
00395 /// the routine returns 1 in LHS in the first case, and 0 in LHS in the second.
00396 static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
00397   // If we were working with infinite precision arithmetic then the combined
00398   // weight would be LHS + RHS.  But we are using finite precision arithmetic,
00399   // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct
00400   // for nilpotent operations and addition, but not for idempotent operations
00401   // and multiplication), so it is important to correctly reduce the combined
00402   // weight back into range if wrapping would be wrong.
00403 
00404   // If RHS is zero then the weight didn't change.
00405   if (RHS.isMinValue())
00406     return;
00407   // If LHS is zero then the combined weight is RHS.
00408   if (LHS.isMinValue()) {
00409     LHS = RHS;
00410     return;
00411   }
00412   // From this point on we know that neither LHS nor RHS is zero.
00413 
00414   if (Instruction::isIdempotent(Opcode)) {
00415     // Idempotent means X op X === X, so any non-zero weight is equivalent to a
00416     // weight of 1.  Keeping weights at zero or one also means that wrapping is
00417     // not a problem.
00418     assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
00419     return; // Return a weight of 1.
00420   }
00421   if (Instruction::isNilpotent(Opcode)) {
00422     // Nilpotent means X op X === 0, so reduce weights modulo 2.
00423     assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
00424     LHS = 0; // 1 + 1 === 0 modulo 2.
00425     return;
00426   }
00427   if (Opcode == Instruction::Add || Opcode == Instruction::FAdd) {
00428     // TODO: Reduce the weight by exploiting nsw/nuw?
00429     LHS += RHS;
00430     return;
00431   }
00432 
00433   assert((Opcode == Instruction::Mul || Opcode == Instruction::FMul) &&
00434          "Unknown associative operation!");
00435   unsigned Bitwidth = LHS.getBitWidth();
00436   // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth
00437   // can be replaced with W-CM.  That's because x^W=x^(W-CM) for every Bitwidth
00438   // bit number x, since either x is odd in which case x^CM = 1, or x is even in
00439   // which case both x^W and x^(W - CM) are zero.  By subtracting off multiples
00440   // of CM like this weights can always be reduced to the range [0, CM+Bitwidth)
00441   // which by a happy accident means that they can always be represented using
00442   // Bitwidth bits.
00443   // TODO: Reduce the weight by exploiting nsw/nuw?  (Could do much better than
00444   // the Carmichael number).
00445   if (Bitwidth > 3) {
00446     /// CM - The value of Carmichael's lambda function.
00447     APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth));
00448     // Any weight W >= Threshold can be replaced with W - CM.
00449     APInt Threshold = CM + Bitwidth;
00450     assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!");
00451     // For Bitwidth 4 or more the following sum does not overflow.
00452     LHS += RHS;
00453     while (LHS.uge(Threshold))
00454       LHS -= CM;
00455   } else {
00456     // To avoid problems with overflow do everything the same as above but using
00457     // a larger type.
00458     unsigned CM = 1U << CarmichaelShift(Bitwidth);
00459     unsigned Threshold = CM + Bitwidth;
00460     assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold &&
00461            "Weights not reduced!");
00462     unsigned Total = LHS.getZExtValue() + RHS.getZExtValue();
00463     while (Total >= Threshold)
00464       Total -= CM;
00465     LHS = Total;
00466   }
00467 }
00468 
00469 typedef std::pair<Value*, APInt> RepeatedValue;
00470 
00471 /// LinearizeExprTree - Given an associative binary expression, return the leaf
00472 /// nodes in Ops along with their weights (how many times the leaf occurs).  The
00473 /// original expression is the same as
00474 ///   (Ops[0].first op Ops[0].first op ... Ops[0].first)  <- Ops[0].second times
00475 /// op
00476 ///   (Ops[1].first op Ops[1].first op ... Ops[1].first)  <- Ops[1].second times
00477 /// op
00478 ///   ...
00479 /// op
00480 ///   (Ops[N].first op Ops[N].first op ... Ops[N].first)  <- Ops[N].second times
00481 ///
00482 /// Note that the values Ops[0].first, ..., Ops[N].first are all distinct.
00483 ///
00484 /// This routine may modify the function, in which case it returns 'true'.  The
00485 /// changes it makes may well be destructive, changing the value computed by 'I'
00486 /// to something completely different.  Thus if the routine returns 'true' then
00487 /// you MUST either replace I with a new expression computed from the Ops array,
00488 /// or use RewriteExprTree to put the values back in.
00489 ///
00490 /// A leaf node is either not a binary operation of the same kind as the root
00491 /// node 'I' (i.e. is not a binary operator at all, or is, but with a different
00492 /// opcode), or is the same kind of binary operator but has a use which either
00493 /// does not belong to the expression, or does belong to the expression but is
00494 /// a leaf node.  Every leaf node has at least one use that is a non-leaf node
00495 /// of the expression, while for non-leaf nodes (except for the root 'I') every
00496 /// use is a non-leaf node of the expression.
00497 ///
00498 /// For example:
00499 ///           expression graph        node names
00500 ///
00501 ///                     +        |        I
00502 ///                    / \       |
00503 ///                   +   +      |      A,  B
00504 ///                  / \ / \     |
00505 ///                 *   +   *    |    C,  D,  E
00506 ///                / \ / \ / \   |
00507 ///                   +   *      |      F,  G
00508 ///
00509 /// The leaf nodes are C, E, F and G.  The Ops array will contain (maybe not in
00510 /// that order) (C, 1), (E, 1), (F, 2), (G, 2).
00511 ///
00512 /// The expression is maximal: if some instruction is a binary operator of the
00513 /// same kind as 'I', and all of its uses are non-leaf nodes of the expression,
00514 /// then the instruction also belongs to the expression, is not a leaf node of
00515 /// it, and its operands also belong to the expression (but may be leaf nodes).
00516 ///
00517 /// NOTE: This routine will set operands of non-leaf non-root nodes to undef in
00518 /// order to ensure that every non-root node in the expression has *exactly one*
00519 /// use by a non-leaf node of the expression.  This destruction means that the
00520 /// caller MUST either replace 'I' with a new expression or use something like
00521 /// RewriteExprTree to put the values back in if the routine indicates that it
00522 /// made a change by returning 'true'.
00523 ///
00524 /// In the above example either the right operand of A or the left operand of B
00525 /// will be replaced by undef.  If it is B's operand then this gives:
00526 ///
00527 ///                     +        |        I
00528 ///                    / \       |
00529 ///                   +   +      |      A,  B - operand of B replaced with undef
00530 ///                  / \   \     |
00531 ///                 *   +   *    |    C,  D,  E
00532 ///                / \ / \ / \   |
00533 ///                   +   *      |      F,  G
00534 ///
00535 /// Note that such undef operands can only be reached by passing through 'I'.
00536 /// For example, if you visit operands recursively starting from a leaf node
00537 /// then you will never see such an undef operand unless you get back to 'I',
00538 /// which requires passing through a phi node.
00539 ///
00540 /// Note that this routine may also mutate binary operators of the wrong type
00541 /// that have all uses inside the expression (i.e. only used by non-leaf nodes
00542 /// of the expression) if it can turn them into binary operators of the right
00543 /// type and thus make the expression bigger.
00544 
00545 static bool LinearizeExprTree(BinaryOperator *I,
00546                               SmallVectorImpl<RepeatedValue> &Ops) {
00547   DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
00548   unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits();
00549   unsigned Opcode = I->getOpcode();
00550   assert(I->isAssociative() && I->isCommutative() &&
00551          "Expected an associative and commutative operation!");
00552 
00553   // Visit all operands of the expression, keeping track of their weight (the
00554   // number of paths from the expression root to the operand, or if you like
00555   // the number of times that operand occurs in the linearized expression).
00556   // For example, if I = X + A, where X = A + B, then I, X and B have weight 1
00557   // while A has weight two.
00558 
00559   // Worklist of non-leaf nodes (their operands are in the expression too) along
00560   // with their weights, representing a certain number of paths to the operator.
00561   // If an operator occurs in the worklist multiple times then we found multiple
00562   // ways to get to it.
00563   SmallVector<std::pair<BinaryOperator*, APInt>, 8> Worklist; // (Op, Weight)
00564   Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1)));
00565   bool MadeChange = false;
00566 
00567   // Leaves of the expression are values that either aren't the right kind of
00568   // operation (eg: a constant, or a multiply in an add tree), or are, but have
00569   // some uses that are not inside the expression.  For example, in I = X + X,
00570   // X = A + B, the value X has two uses (by I) that are in the expression.  If
00571   // X has any other uses, for example in a return instruction, then we consider
00572   // X to be a leaf, and won't analyze it further.  When we first visit a value,
00573   // if it has more than one use then at first we conservatively consider it to
00574   // be a leaf.  Later, as the expression is explored, we may discover some more
00575   // uses of the value from inside the expression.  If all uses turn out to be
00576   // from within the expression (and the value is a binary operator of the right
00577   // kind) then the value is no longer considered to be a leaf, and its operands
00578   // are explored.
00579 
00580   // Leaves - Keeps track of the set of putative leaves as well as the number of
00581   // paths to each leaf seen so far.
00582   typedef DenseMap<Value*, APInt> LeafMap;
00583   LeafMap Leaves; // Leaf -> Total weight so far.
00584   SmallVector<Value*, 8> LeafOrder; // Ensure deterministic leaf output order.
00585 
00586 #ifndef NDEBUG
00587   SmallPtrSet<Value*, 8> Visited; // For sanity checking the iteration scheme.
00588 #endif
00589   while (!Worklist.empty()) {
00590     std::pair<BinaryOperator*, APInt> P = Worklist.pop_back_val();
00591     I = P.first; // We examine the operands of this binary operator.
00592 
00593     for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands.
00594       Value *Op = I->getOperand(OpIdx);
00595       APInt Weight = P.second; // Number of paths to this operand.
00596       DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
00597       assert(!Op->use_empty() && "No uses, so how did we get to it?!");
00598 
00599       // If this is a binary operation of the right kind with only one use then
00600       // add its operands to the expression.
00601       if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
00602         assert(Visited.insert(Op) && "Not first visit!");
00603         DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
00604         Worklist.push_back(std::make_pair(BO, Weight));
00605         continue;
00606       }
00607 
00608       // Appears to be a leaf.  Is the operand already in the set of leaves?
00609       LeafMap::iterator It = Leaves.find(Op);
00610       if (It == Leaves.end()) {
00611         // Not in the leaf map.  Must be the first time we saw this operand.
00612         assert(Visited.insert(Op) && "Not first visit!");
00613         if (!Op->hasOneUse()) {
00614           // This value has uses not accounted for by the expression, so it is
00615           // not safe to modify.  Mark it as being a leaf.
00616           DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n");
00617           LeafOrder.push_back(Op);
00618           Leaves[Op] = Weight;
00619           continue;
00620         }
00621         // No uses outside the expression, try morphing it.
00622       } else if (It != Leaves.end()) {
00623         // Already in the leaf map.
00624         assert(Visited.count(Op) && "In leaf map but not visited!");
00625 
00626         // Update the number of paths to the leaf.
00627         IncorporateWeight(It->second, Weight, Opcode);
00628 
00629 #if 0   // TODO: Re-enable once PR13021 is fixed.
00630         // The leaf already has one use from inside the expression.  As we want
00631         // exactly one such use, drop this new use of the leaf.
00632         assert(!Op->hasOneUse() && "Only one use, but we got here twice!");
00633         I->setOperand(OpIdx, UndefValue::get(I->getType()));
00634         MadeChange = true;
00635 
00636         // If the leaf is a binary operation of the right kind and we now see
00637         // that its multiple original uses were in fact all by nodes belonging
00638         // to the expression, then no longer consider it to be a leaf and add
00639         // its operands to the expression.
00640         if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
00641           DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n");
00642           Worklist.push_back(std::make_pair(BO, It->second));
00643           Leaves.erase(It);
00644           continue;
00645         }
00646 #endif
00647 
00648         // If we still have uses that are not accounted for by the expression
00649         // then it is not safe to modify the value.
00650         if (!Op->hasOneUse())
00651           continue;
00652 
00653         // No uses outside the expression, try morphing it.
00654         Weight = It->second;
00655         Leaves.erase(It); // Since the value may be morphed below.
00656       }
00657 
00658       // At this point we have a value which, first of all, is not a binary
00659       // expression of the right kind, and secondly, is only used inside the
00660       // expression.  This means that it can safely be modified.  See if we
00661       // can usefully morph it into an expression of the right kind.
00662       assert((!isa<Instruction>(Op) ||
00663               cast<Instruction>(Op)->getOpcode() != Opcode) &&
00664              "Should have been handled above!");
00665       assert(Op->hasOneUse() && "Has uses outside the expression tree!");
00666 
00667       // If this is a multiply expression, turn any internal negations into
00668       // multiplies by -1 so they can be reassociated.
00669       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op))
00670         if ((Opcode == Instruction::Mul && BinaryOperator::isNeg(BO)) ||
00671             (Opcode == Instruction::FMul && BinaryOperator::isFNeg(BO))) {
00672           DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO ");
00673           BO = LowerNegateToMultiply(BO);
00674           DEBUG(dbgs() << *BO << '\n');
00675           Worklist.push_back(std::make_pair(BO, Weight));
00676           MadeChange = true;
00677           continue;
00678         }
00679 
00680       // Failed to morph into an expression of the right type.  This really is
00681       // a leaf.
00682       DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n");
00683       assert(!isReassociableOp(Op, Opcode) && "Value was morphed?");
00684       LeafOrder.push_back(Op);
00685       Leaves[Op] = Weight;
00686     }
00687   }
00688 
00689   // The leaves, repeated according to their weights, represent the linearized
00690   // form of the expression.
00691   for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) {
00692     Value *V = LeafOrder[i];
00693     LeafMap::iterator It = Leaves.find(V);
00694     if (It == Leaves.end())
00695       // Node initially thought to be a leaf wasn't.
00696       continue;
00697     assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!");
00698     APInt Weight = It->second;
00699     if (Weight.isMinValue())
00700       // Leaf already output or weight reduction eliminated it.
00701       continue;
00702     // Ensure the leaf is only output once.
00703     It->second = 0;
00704     Ops.push_back(std::make_pair(V, Weight));
00705   }
00706 
00707   // For nilpotent operations or addition there may be no operands, for example
00708   // because the expression was "X xor X" or consisted of 2^Bitwidth additions:
00709   // in both cases the weight reduces to 0 causing the value to be skipped.
00710   if (Ops.empty()) {
00711     Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
00712     assert(Identity && "Associative operation without identity!");
00713     Ops.push_back(std::make_pair(Identity, APInt(Bitwidth, 1)));
00714   }
00715 
00716   return MadeChange;
00717 }
00718 
00719 // RewriteExprTree - Now that the operands for this expression tree are
00720 // linearized and optimized, emit them in-order.
00721 void Reassociate::RewriteExprTree(BinaryOperator *I,
00722                                   SmallVectorImpl<ValueEntry> &Ops) {
00723   assert(Ops.size() > 1 && "Single values should be used directly!");
00724 
00725   // Since our optimizations should never increase the number of operations, the
00726   // new expression can usually be written reusing the existing binary operators
00727   // from the original expression tree, without creating any new instructions,
00728   // though the rewritten expression may have a completely different topology.
00729   // We take care to not change anything if the new expression will be the same
00730   // as the original.  If more than trivial changes (like commuting operands)
00731   // were made then we are obliged to clear out any optional subclass data like
00732   // nsw flags.
00733 
00734   /// NodesToRewrite - Nodes from the original expression available for writing
00735   /// the new expression into.
00736   SmallVector<BinaryOperator*, 8> NodesToRewrite;
00737   unsigned Opcode = I->getOpcode();
00738   BinaryOperator *Op = I;
00739 
00740   /// NotRewritable - The operands being written will be the leaves of the new
00741   /// expression and must not be used as inner nodes (via NodesToRewrite) by
00742   /// mistake.  Inner nodes are always reassociable, and usually leaves are not
00743   /// (if they were they would have been incorporated into the expression and so
00744   /// would not be leaves), so most of the time there is no danger of this.  But
00745   /// in rare cases a leaf may become reassociable if an optimization kills uses
00746   /// of it, or it may momentarily become reassociable during rewriting (below)
00747   /// due it being removed as an operand of one of its uses.  Ensure that misuse
00748   /// of leaf nodes as inner nodes cannot occur by remembering all of the future
00749   /// leaves and refusing to reuse any of them as inner nodes.
00750   SmallPtrSet<Value*, 8> NotRewritable;
00751   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
00752     NotRewritable.insert(Ops[i].Op);
00753 
00754   // ExpressionChanged - Non-null if the rewritten expression differs from the
00755   // original in some non-trivial way, requiring the clearing of optional flags.
00756   // Flags are cleared from the operator in ExpressionChanged up to I inclusive.
00757   BinaryOperator *ExpressionChanged = nullptr;
00758   for (unsigned i = 0; ; ++i) {
00759     // The last operation (which comes earliest in the IR) is special as both
00760     // operands will come from Ops, rather than just one with the other being
00761     // a subexpression.
00762     if (i+2 == Ops.size()) {
00763       Value *NewLHS = Ops[i].Op;
00764       Value *NewRHS = Ops[i+1].Op;
00765       Value *OldLHS = Op->getOperand(0);
00766       Value *OldRHS = Op->getOperand(1);
00767 
00768       if (NewLHS == OldLHS && NewRHS == OldRHS)
00769         // Nothing changed, leave it alone.
00770         break;
00771 
00772       if (NewLHS == OldRHS && NewRHS == OldLHS) {
00773         // The order of the operands was reversed.  Swap them.
00774         DEBUG(dbgs() << "RA: " << *Op << '\n');
00775         Op->swapOperands();
00776         DEBUG(dbgs() << "TO: " << *Op << '\n');
00777         MadeChange = true;
00778         ++NumChanged;
00779         break;
00780       }
00781 
00782       // The new operation differs non-trivially from the original. Overwrite
00783       // the old operands with the new ones.
00784       DEBUG(dbgs() << "RA: " << *Op << '\n');
00785       if (NewLHS != OldLHS) {
00786         BinaryOperator *BO = isReassociableOp(OldLHS, Opcode);
00787         if (BO && !NotRewritable.count(BO))
00788           NodesToRewrite.push_back(BO);
00789         Op->setOperand(0, NewLHS);
00790       }
00791       if (NewRHS != OldRHS) {
00792         BinaryOperator *BO = isReassociableOp(OldRHS, Opcode);
00793         if (BO && !NotRewritable.count(BO))
00794           NodesToRewrite.push_back(BO);
00795         Op->setOperand(1, NewRHS);
00796       }
00797       DEBUG(dbgs() << "TO: " << *Op << '\n');
00798 
00799       ExpressionChanged = Op;
00800       MadeChange = true;
00801       ++NumChanged;
00802 
00803       break;
00804     }
00805 
00806     // Not the last operation.  The left-hand side will be a sub-expression
00807     // while the right-hand side will be the current element of Ops.
00808     Value *NewRHS = Ops[i].Op;
00809     if (NewRHS != Op->getOperand(1)) {
00810       DEBUG(dbgs() << "RA: " << *Op << '\n');
00811       if (NewRHS == Op->getOperand(0)) {
00812         // The new right-hand side was already present as the left operand.  If
00813         // we are lucky then swapping the operands will sort out both of them.
00814         Op->swapOperands();
00815       } else {
00816         // Overwrite with the new right-hand side.
00817         BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode);
00818         if (BO && !NotRewritable.count(BO))
00819           NodesToRewrite.push_back(BO);
00820         Op->setOperand(1, NewRHS);
00821         ExpressionChanged = Op;
00822       }
00823       DEBUG(dbgs() << "TO: " << *Op << '\n');
00824       MadeChange = true;
00825       ++NumChanged;
00826     }
00827 
00828     // Now deal with the left-hand side.  If this is already an operation node
00829     // from the original expression then just rewrite the rest of the expression
00830     // into it.
00831     BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode);
00832     if (BO && !NotRewritable.count(BO)) {
00833       Op = BO;
00834       continue;
00835     }
00836 
00837     // Otherwise, grab a spare node from the original expression and use that as
00838     // the left-hand side.  If there are no nodes left then the optimizers made
00839     // an expression with more nodes than the original!  This usually means that
00840     // they did something stupid but it might mean that the problem was just too
00841     // hard (finding the mimimal number of multiplications needed to realize a
00842     // multiplication expression is NP-complete).  Whatever the reason, smart or
00843     // stupid, create a new node if there are none left.
00844     BinaryOperator *NewOp;
00845     if (NodesToRewrite.empty()) {
00846       Constant *Undef = UndefValue::get(I->getType());
00847       NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode),
00848                                      Undef, Undef, "", I);
00849       if (NewOp->getType()->isFloatingPointTy())
00850         NewOp->setFastMathFlags(I->getFastMathFlags());
00851     } else {
00852       NewOp = NodesToRewrite.pop_back_val();
00853     }
00854 
00855     DEBUG(dbgs() << "RA: " << *Op << '\n');
00856     Op->setOperand(0, NewOp);
00857     DEBUG(dbgs() << "TO: " << *Op << '\n');
00858     ExpressionChanged = Op;
00859     MadeChange = true;
00860     ++NumChanged;
00861     Op = NewOp;
00862   }
00863 
00864   // If the expression changed non-trivially then clear out all subclass data
00865   // starting from the operator specified in ExpressionChanged, and compactify
00866   // the operators to just before the expression root to guarantee that the
00867   // expression tree is dominated by all of Ops.
00868   if (ExpressionChanged)
00869     do {
00870       // Preserve FastMathFlags.
00871       if (isa<FPMathOperator>(I)) {
00872         FastMathFlags Flags = I->getFastMathFlags();
00873         ExpressionChanged->clearSubclassOptionalData();
00874         ExpressionChanged->setFastMathFlags(Flags);
00875       } else
00876         ExpressionChanged->clearSubclassOptionalData();
00877 
00878       if (ExpressionChanged == I)
00879         break;
00880       ExpressionChanged->moveBefore(I);
00881       ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->user_begin());
00882     } while (1);
00883 
00884   // Throw away any left over nodes from the original expression.
00885   for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i)
00886     RedoInsts.insert(NodesToRewrite[i]);
00887 }
00888 
00889 /// NegateValue - Insert instructions before the instruction pointed to by BI,
00890 /// that computes the negative version of the value specified.  The negative
00891 /// version of the value is returned, and BI is left pointing at the instruction
00892 /// that should be processed next by the reassociation pass.
00893 static Value *NegateValue(Value *V, Instruction *BI) {
00894   if (ConstantFP *C = dyn_cast<ConstantFP>(V))
00895     return ConstantExpr::getFNeg(C);
00896   if (Constant *C = dyn_cast<Constant>(V))
00897     return ConstantExpr::getNeg(C);
00898 
00899   // We are trying to expose opportunity for reassociation.  One of the things
00900   // that we want to do to achieve this is to push a negation as deep into an
00901   // expression chain as possible, to expose the add instructions.  In practice,
00902   // this means that we turn this:
00903   //   X = -(A+12+C+D)   into    X = -A + -12 + -C + -D = -12 + -A + -C + -D
00904   // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate
00905   // the constants.  We assume that instcombine will clean up the mess later if
00906   // we introduce tons of unnecessary negation instructions.
00907   //
00908   if (BinaryOperator *I =
00909           isReassociableOp(V, Instruction::Add, Instruction::FAdd)) {
00910     // Push the negates through the add.
00911     I->setOperand(0, NegateValue(I->getOperand(0), BI));
00912     I->setOperand(1, NegateValue(I->getOperand(1), BI));
00913 
00914     // We must move the add instruction here, because the neg instructions do
00915     // not dominate the old add instruction in general.  By moving it, we are
00916     // assured that the neg instructions we just inserted dominate the
00917     // instruction we are about to insert after them.
00918     //
00919     I->moveBefore(BI);
00920     I->setName(I->getName()+".neg");
00921     return I;
00922   }
00923 
00924   // Okay, we need to materialize a negated version of V with an instruction.
00925   // Scan the use lists of V to see if we have one already.
00926   for (User *U : V->users()) {
00927     if (!BinaryOperator::isNeg(U) && !BinaryOperator::isFNeg(U))
00928       continue;
00929 
00930     // We found one!  Now we have to make sure that the definition dominates
00931     // this use.  We do this by moving it to the entry block (if it is a
00932     // non-instruction value) or right after the definition.  These negates will
00933     // be zapped by reassociate later, so we don't need much finesse here.
00934     BinaryOperator *TheNeg = cast<BinaryOperator>(U);
00935 
00936     // Verify that the negate is in this function, V might be a constant expr.
00937     if (TheNeg->getParent()->getParent() != BI->getParent()->getParent())
00938       continue;
00939 
00940     BasicBlock::iterator InsertPt;
00941     if (Instruction *InstInput = dyn_cast<Instruction>(V)) {
00942       if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) {
00943         InsertPt = II->getNormalDest()->begin();
00944       } else {
00945         InsertPt = InstInput;
00946         ++InsertPt;
00947       }
00948       while (isa<PHINode>(InsertPt)) ++InsertPt;
00949     } else {
00950       InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin();
00951     }
00952     TheNeg->moveBefore(InsertPt);
00953     return TheNeg;
00954   }
00955 
00956   // Insert a 'neg' instruction that subtracts the value from zero to get the
00957   // negation.
00958   return CreateNeg(V, V->getName() + ".neg", BI, BI);
00959 }
00960 
00961 /// ShouldBreakUpSubtract - Return true if we should break up this subtract of
00962 /// X-Y into (X + -Y).
00963 static bool ShouldBreakUpSubtract(Instruction *Sub) {
00964   // If this is a negation, we can't split it up!
00965   if (BinaryOperator::isNeg(Sub) || BinaryOperator::isFNeg(Sub))
00966     return false;
00967 
00968   // Don't bother to break this up unless either the LHS is an associable add or
00969   // subtract or if this is only used by one.
00970   Value *V0 = Sub->getOperand(0);
00971   if (isReassociableOp(V0, Instruction::Add, Instruction::FAdd) ||
00972       isReassociableOp(V0, Instruction::Sub, Instruction::FSub))
00973     return true;
00974   Value *V1 = Sub->getOperand(1);
00975   if (isReassociableOp(V1, Instruction::Add, Instruction::FAdd) ||
00976       isReassociableOp(V1, Instruction::Sub, Instruction::FSub))
00977     return true;
00978   Value *VB = Sub->user_back();
00979   if (Sub->hasOneUse() &&
00980       (isReassociableOp(VB, Instruction::Add, Instruction::FAdd) ||
00981        isReassociableOp(VB, Instruction::Sub, Instruction::FSub)))
00982     return true;
00983 
00984   return false;
00985 }
00986 
00987 /// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is
00988 /// only used by an add, transform this into (X+(0-Y)) to promote better
00989 /// reassociation.
00990 static BinaryOperator *BreakUpSubtract(Instruction *Sub) {
00991   // Convert a subtract into an add and a neg instruction. This allows sub
00992   // instructions to be commuted with other add instructions.
00993   //
00994   // Calculate the negative value of Operand 1 of the sub instruction,
00995   // and set it as the RHS of the add instruction we just made.
00996   //
00997   Value *NegVal = NegateValue(Sub->getOperand(1), Sub);
00998   BinaryOperator *New = CreateAdd(Sub->getOperand(0), NegVal, "", Sub, Sub);
00999   Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op.
01000   Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op.
01001   New->takeName(Sub);
01002 
01003   // Everyone now refers to the add instruction.
01004   Sub->replaceAllUsesWith(New);
01005   New->setDebugLoc(Sub->getDebugLoc());
01006 
01007   DEBUG(dbgs() << "Negated: " << *New << '\n');
01008   return New;
01009 }
01010 
01011 /// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used
01012 /// by one, change this into a multiply by a constant to assist with further
01013 /// reassociation.
01014 static BinaryOperator *ConvertShiftToMul(Instruction *Shl) {
01015   Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
01016   MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1)));
01017 
01018   BinaryOperator *Mul =
01019     BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl);
01020   Shl->setOperand(0, UndefValue::get(Shl->getType())); // Drop use of op.
01021   Mul->takeName(Shl);
01022   Shl->replaceAllUsesWith(Mul);
01023   Mul->setDebugLoc(Shl->getDebugLoc());
01024   return Mul;
01025 }
01026 
01027 /// FindInOperandList - Scan backwards and forwards among values with the same
01028 /// rank as element i to see if X exists.  If X does not exist, return i.  This
01029 /// is useful when scanning for 'x' when we see '-x' because they both get the
01030 /// same rank.
01031 static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
01032                                   Value *X) {
01033   unsigned XRank = Ops[i].Rank;
01034   unsigned e = Ops.size();
01035   for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j)
01036     if (Ops[j].Op == X)
01037       return j;
01038   // Scan backwards.
01039   for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j)
01040     if (Ops[j].Op == X)
01041       return j;
01042   return i;
01043 }
01044 
01045 /// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together
01046 /// and returning the result.  Insert the tree before I.
01047 static Value *EmitAddTreeOfValues(Instruction *I,
01048                                   SmallVectorImpl<WeakVH> &Ops){
01049   if (Ops.size() == 1) return Ops.back();
01050 
01051   Value *V1 = Ops.back();
01052   Ops.pop_back();
01053   Value *V2 = EmitAddTreeOfValues(I, Ops);
01054   return CreateAdd(V2, V1, "tmp", I, I);
01055 }
01056 
01057 /// RemoveFactorFromExpression - If V is an expression tree that is a
01058 /// multiplication sequence, and if this sequence contains a multiply by Factor,
01059 /// remove Factor from the tree and return the new tree.
01060 Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
01061   BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul);
01062   if (!BO)
01063     return nullptr;
01064 
01065   SmallVector<RepeatedValue, 8> Tree;
01066   MadeChange |= LinearizeExprTree(BO, Tree);
01067   SmallVector<ValueEntry, 8> Factors;
01068   Factors.reserve(Tree.size());
01069   for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
01070     RepeatedValue E = Tree[i];
01071     Factors.append(E.second.getZExtValue(),
01072                    ValueEntry(getRank(E.first), E.first));
01073   }
01074 
01075   bool FoundFactor = false;
01076   bool NeedsNegate = false;
01077   for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
01078     if (Factors[i].Op == Factor) {
01079       FoundFactor = true;
01080       Factors.erase(Factors.begin()+i);
01081       break;
01082     }
01083 
01084     // If this is a negative version of this factor, remove it.
01085     if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) {
01086       if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op))
01087         if (FC1->getValue() == -FC2->getValue()) {
01088           FoundFactor = NeedsNegate = true;
01089           Factors.erase(Factors.begin()+i);
01090           break;
01091         }
01092     } else if (ConstantFP *FC1 = dyn_cast<ConstantFP>(Factor)) {
01093       if (ConstantFP *FC2 = dyn_cast<ConstantFP>(Factors[i].Op)) {
01094         APFloat F1(FC1->getValueAPF());
01095         APFloat F2(FC2->getValueAPF());
01096         F2.changeSign();
01097         if (F1.compare(F2) == APFloat::cmpEqual) {
01098           FoundFactor = NeedsNegate = true;
01099           Factors.erase(Factors.begin() + i);
01100           break;
01101         }
01102       }
01103     }
01104   }
01105 
01106   if (!FoundFactor) {
01107     // Make sure to restore the operands to the expression tree.
01108     RewriteExprTree(BO, Factors);
01109     return nullptr;
01110   }
01111 
01112   BasicBlock::iterator InsertPt = BO; ++InsertPt;
01113 
01114   // If this was just a single multiply, remove the multiply and return the only
01115   // remaining operand.
01116   if (Factors.size() == 1) {
01117     RedoInsts.insert(BO);
01118     V = Factors[0].Op;
01119   } else {
01120     RewriteExprTree(BO, Factors);
01121     V = BO;
01122   }
01123 
01124   if (NeedsNegate)
01125     V = CreateNeg(V, "neg", InsertPt, BO);
01126 
01127   return V;
01128 }
01129 
01130 /// FindSingleUseMultiplyFactors - If V is a single-use multiply, recursively
01131 /// add its operands as factors, otherwise add V to the list of factors.
01132 ///
01133 /// Ops is the top-level list of add operands we're trying to factor.
01134 static void FindSingleUseMultiplyFactors(Value *V,
01135                                          SmallVectorImpl<Value*> &Factors,
01136                                        const SmallVectorImpl<ValueEntry> &Ops) {
01137   BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul);
01138   if (!BO) {
01139     Factors.push_back(V);
01140     return;
01141   }
01142 
01143   // Otherwise, add the LHS and RHS to the list of factors.
01144   FindSingleUseMultiplyFactors(BO->getOperand(1), Factors, Ops);
01145   FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops);
01146 }
01147 
01148 /// OptimizeAndOrXor - Optimize a series of operands to an 'and', 'or', or 'xor'
01149 /// instruction.  This optimizes based on identities.  If it can be reduced to
01150 /// a single Value, it is returned, otherwise the Ops list is mutated as
01151 /// necessary.
01152 static Value *OptimizeAndOrXor(unsigned Opcode,
01153                                SmallVectorImpl<ValueEntry> &Ops) {
01154   // Scan the operand lists looking for X and ~X pairs, along with X,X pairs.
01155   // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1.
01156   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
01157     // First, check for X and ~X in the operand list.
01158     assert(i < Ops.size());
01159     if (BinaryOperator::isNot(Ops[i].Op)) {    // Cannot occur for ^.
01160       Value *X = BinaryOperator::getNotArgument(Ops[i].Op);
01161       unsigned FoundX = FindInOperandList(Ops, i, X);
01162       if (FoundX != i) {
01163         if (Opcode == Instruction::And)   // ...&X&~X = 0
01164           return Constant::getNullValue(X->getType());
01165 
01166         if (Opcode == Instruction::Or)    // ...|X|~X = -1
01167           return Constant::getAllOnesValue(X->getType());
01168       }
01169     }
01170 
01171     // Next, check for duplicate pairs of values, which we assume are next to
01172     // each other, due to our sorting criteria.
01173     assert(i < Ops.size());
01174     if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) {
01175       if (Opcode == Instruction::And || Opcode == Instruction::Or) {
01176         // Drop duplicate values for And and Or.
01177         Ops.erase(Ops.begin()+i);
01178         --i; --e;
01179         ++NumAnnihil;
01180         continue;
01181       }
01182 
01183       // Drop pairs of values for Xor.
01184       assert(Opcode == Instruction::Xor);
01185       if (e == 2)
01186         return Constant::getNullValue(Ops[0].Op->getType());
01187 
01188       // Y ^ X^X -> Y
01189       Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
01190       i -= 1; e -= 2;
01191       ++NumAnnihil;
01192     }
01193   }
01194   return nullptr;
01195 }
01196 
01197 /// Helper funciton of CombineXorOpnd(). It creates a bitwise-and
01198 /// instruction with the given two operands, and return the resulting
01199 /// instruction. There are two special cases: 1) if the constant operand is 0,
01200 /// it will return NULL. 2) if the constant is ~0, the symbolic operand will
01201 /// be returned.
01202 static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, 
01203                              const APInt &ConstOpnd) {
01204   if (ConstOpnd != 0) {
01205     if (!ConstOpnd.isAllOnesValue()) {
01206       LLVMContext &Ctx = Opnd->getType()->getContext();
01207       Instruction *I;
01208       I = BinaryOperator::CreateAnd(Opnd, ConstantInt::get(Ctx, ConstOpnd),
01209                                     "and.ra", InsertBefore);
01210       I->setDebugLoc(InsertBefore->getDebugLoc());
01211       return I;
01212     }
01213     return Opnd;
01214   }
01215   return nullptr;
01216 }
01217 
01218 // Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd"
01219 // into "R ^ C", where C would be 0, and R is a symbolic value.
01220 //
01221 // If it was successful, true is returned, and the "R" and "C" is returned
01222 // via "Res" and "ConstOpnd", respectively; otherwise, false is returned,
01223 // and both "Res" and "ConstOpnd" remain unchanged.
01224 //  
01225 bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1,
01226                                  APInt &ConstOpnd, Value *&Res) {
01227   // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 
01228   //                       = ((x | c1) ^ c1) ^ (c1 ^ c2)
01229   //                       = (x & ~c1) ^ (c1 ^ c2)
01230   // It is useful only when c1 == c2.
01231   if (Opnd1->isOrExpr() && Opnd1->getConstPart() != 0) {
01232     if (!Opnd1->getValue()->hasOneUse())
01233       return false;
01234 
01235     const APInt &C1 = Opnd1->getConstPart();
01236     if (C1 != ConstOpnd)
01237       return false;
01238 
01239     Value *X = Opnd1->getSymbolicPart();
01240     Res = createAndInstr(I, X, ~C1);
01241     // ConstOpnd was C2, now C1 ^ C2.
01242     ConstOpnd ^= C1;
01243 
01244     if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue()))
01245       RedoInsts.insert(T);
01246     return true;
01247   }
01248   return false;
01249 }
01250 
01251                            
01252 // Helper function of OptimizeXor(). It tries to simplify
01253 // "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a
01254 // symbolic value. 
01255 // 
01256 // If it was successful, true is returned, and the "R" and "C" is returned 
01257 // via "Res" and "ConstOpnd", respectively (If the entire expression is
01258 // evaluated to a constant, the Res is set to NULL); otherwise, false is
01259 // returned, and both "Res" and "ConstOpnd" remain unchanged.
01260 bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2,
01261                                  APInt &ConstOpnd, Value *&Res) {
01262   Value *X = Opnd1->getSymbolicPart();
01263   if (X != Opnd2->getSymbolicPart())
01264     return false;
01265 
01266   // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.)
01267   int DeadInstNum = 1;
01268   if (Opnd1->getValue()->hasOneUse())
01269     DeadInstNum++;
01270   if (Opnd2->getValue()->hasOneUse())
01271     DeadInstNum++;
01272 
01273   // Xor-Rule 2:
01274   //  (x | c1) ^ (x & c2)
01275   //   = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1
01276   //   = (x & ~c1) ^ (x & c2) ^ c1               // Xor-Rule 1
01277   //   = (x & c3) ^ c1, where c3 = ~c1 ^ c2      // Xor-rule 3
01278   //
01279   if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) {
01280     if (Opnd2->isOrExpr())
01281       std::swap(Opnd1, Opnd2);
01282 
01283     const APInt &C1 = Opnd1->getConstPart();
01284     const APInt &C2 = Opnd2->getConstPart();
01285     APInt C3((~C1) ^ C2);
01286 
01287     // Do not increase code size!
01288     if (C3 != 0 && !C3.isAllOnesValue()) {
01289       int NewInstNum = ConstOpnd != 0 ? 1 : 2;
01290       if (NewInstNum > DeadInstNum)
01291         return false;
01292     }
01293 
01294     Res = createAndInstr(I, X, C3);
01295     ConstOpnd ^= C1;
01296 
01297   } else if (Opnd1->isOrExpr()) {
01298     // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2
01299     //
01300     const APInt &C1 = Opnd1->getConstPart();
01301     const APInt &C2 = Opnd2->getConstPart();
01302     APInt C3 = C1 ^ C2;
01303     
01304     // Do not increase code size
01305     if (C3 != 0 && !C3.isAllOnesValue()) {
01306       int NewInstNum = ConstOpnd != 0 ? 1 : 2;
01307       if (NewInstNum > DeadInstNum)
01308         return false;
01309     }
01310 
01311     Res = createAndInstr(I, X, C3);
01312     ConstOpnd ^= C3;
01313   } else {
01314     // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2))
01315     //
01316     const APInt &C1 = Opnd1->getConstPart();
01317     const APInt &C2 = Opnd2->getConstPart();
01318     APInt C3 = C1 ^ C2;
01319     Res = createAndInstr(I, X, C3);
01320   }
01321 
01322   // Put the original operands in the Redo list; hope they will be deleted
01323   // as dead code.
01324   if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue()))
01325     RedoInsts.insert(T);
01326   if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue()))
01327     RedoInsts.insert(T);
01328 
01329   return true;
01330 }
01331 
01332 /// Optimize a series of operands to an 'xor' instruction. If it can be reduced
01333 /// to a single Value, it is returned, otherwise the Ops list is mutated as
01334 /// necessary.
01335 Value *Reassociate::OptimizeXor(Instruction *I,
01336                                 SmallVectorImpl<ValueEntry> &Ops) {
01337   if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops))
01338     return V;
01339       
01340   if (Ops.size() == 1)
01341     return nullptr;
01342 
01343   SmallVector<XorOpnd, 8> Opnds;
01344   SmallVector<XorOpnd*, 8> OpndPtrs;
01345   Type *Ty = Ops[0].Op->getType();
01346   APInt ConstOpnd(Ty->getIntegerBitWidth(), 0);
01347 
01348   // Step 1: Convert ValueEntry to XorOpnd
01349   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
01350     Value *V = Ops[i].Op;
01351     if (!isa<ConstantInt>(V)) {
01352       XorOpnd O(V);
01353       O.setSymbolicRank(getRank(O.getSymbolicPart()));
01354       Opnds.push_back(O);
01355     } else
01356       ConstOpnd ^= cast<ConstantInt>(V)->getValue();
01357   }
01358 
01359   // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds".
01360   //  It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate
01361   //  the "OpndPtrs" as well. For the similar reason, do not fuse this loop
01362   //  with the previous loop --- the iterator of the "Opnds" may be invalidated
01363   //  when new elements are added to the vector.
01364   for (unsigned i = 0, e = Opnds.size(); i != e; ++i)
01365     OpndPtrs.push_back(&Opnds[i]);
01366 
01367   // Step 2: Sort the Xor-Operands in a way such that the operands containing
01368   //  the same symbolic value cluster together. For instance, the input operand
01369   //  sequence ("x | 123", "y & 456", "x & 789") will be sorted into:
01370   //  ("x | 123", "x & 789", "y & 456").
01371   std::stable_sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor());
01372 
01373   // Step 3: Combine adjacent operands
01374   XorOpnd *PrevOpnd = nullptr;
01375   bool Changed = false;
01376   for (unsigned i = 0, e = Opnds.size(); i < e; i++) {
01377     XorOpnd *CurrOpnd = OpndPtrs[i];
01378     // The combined value
01379     Value *CV;
01380 
01381     // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd"
01382     if (ConstOpnd != 0 && CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) {
01383       Changed = true;
01384       if (CV)
01385         *CurrOpnd = XorOpnd(CV);
01386       else {
01387         CurrOpnd->Invalidate();
01388         continue;
01389       }
01390     }
01391 
01392     if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) {
01393       PrevOpnd = CurrOpnd;
01394       continue;
01395     }
01396 
01397     // step 3.2: When previous and current operands share the same symbolic
01398     //  value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" 
01399     //    
01400     if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) {
01401       // Remove previous operand
01402       PrevOpnd->Invalidate();
01403       if (CV) {
01404         *CurrOpnd = XorOpnd(CV);
01405         PrevOpnd = CurrOpnd;
01406       } else {
01407         CurrOpnd->Invalidate();
01408         PrevOpnd = nullptr;
01409       }
01410       Changed = true;
01411     }
01412   }
01413 
01414   // Step 4: Reassemble the Ops
01415   if (Changed) {
01416     Ops.clear();
01417     for (unsigned int i = 0, e = Opnds.size(); i < e; i++) {
01418       XorOpnd &O = Opnds[i];
01419       if (O.isInvalid())
01420         continue;
01421       ValueEntry VE(getRank(O.getValue()), O.getValue());
01422       Ops.push_back(VE);
01423     }
01424     if (ConstOpnd != 0) {
01425       Value *C = ConstantInt::get(Ty->getContext(), ConstOpnd);
01426       ValueEntry VE(getRank(C), C);
01427       Ops.push_back(VE);
01428     }
01429     int Sz = Ops.size();
01430     if (Sz == 1)
01431       return Ops.back().Op;
01432     else if (Sz == 0) {
01433       assert(ConstOpnd == 0);
01434       return ConstantInt::get(Ty->getContext(), ConstOpnd);
01435     }
01436   }
01437 
01438   return nullptr;
01439 }
01440 
01441 /// OptimizeAdd - Optimize a series of operands to an 'add' instruction.  This
01442 /// optimizes based on identities.  If it can be reduced to a single Value, it
01443 /// is returned, otherwise the Ops list is mutated as necessary.
01444 Value *Reassociate::OptimizeAdd(Instruction *I,
01445                                 SmallVectorImpl<ValueEntry> &Ops) {
01446   // Scan the operand lists looking for X and -X pairs.  If we find any, we
01447   // can simplify expressions like X+-X == 0 and X+~X ==-1.  While we're at it,
01448   // scan for any
01449   // duplicates.  We want to canonicalize Y+Y+Y+Z -> 3*Y+Z.
01450 
01451   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
01452     Value *TheOp = Ops[i].Op;
01453     // Check to see if we've seen this operand before.  If so, we factor all
01454     // instances of the operand together.  Due to our sorting criteria, we know
01455     // that these need to be next to each other in the vector.
01456     if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) {
01457       // Rescan the list, remove all instances of this operand from the expr.
01458       unsigned NumFound = 0;
01459       do {
01460         Ops.erase(Ops.begin()+i);
01461         ++NumFound;
01462       } while (i != Ops.size() && Ops[i].Op == TheOp);
01463 
01464       DEBUG(errs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n');
01465       ++NumFactor;
01466 
01467       // Insert a new multiply.
01468       Type *Ty = TheOp->getType();
01469       Constant *C = Ty->isIntegerTy() ? ConstantInt::get(Ty, NumFound)
01470                                       : ConstantFP::get(Ty, NumFound);
01471       Instruction *Mul = CreateMul(TheOp, C, "factor", I, I);
01472 
01473       // Now that we have inserted a multiply, optimize it. This allows us to
01474       // handle cases that require multiple factoring steps, such as this:
01475       // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6
01476       RedoInsts.insert(Mul);
01477 
01478       // If every add operand was a duplicate, return the multiply.
01479       if (Ops.empty())
01480         return Mul;
01481 
01482       // Otherwise, we had some input that didn't have the dupe, such as
01483       // "A + A + B" -> "A*2 + B".  Add the new multiply to the list of
01484       // things being added by this operation.
01485       Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul));
01486 
01487       --i;
01488       e = Ops.size();
01489       continue;
01490     }
01491 
01492     // Check for X and -X or X and ~X in the operand list.
01493     if (!BinaryOperator::isNeg(TheOp) && !BinaryOperator::isFNeg(TheOp) &&
01494         !BinaryOperator::isNot(TheOp))
01495       continue;
01496 
01497     Value *X = nullptr;
01498     if (BinaryOperator::isNeg(TheOp) || BinaryOperator::isFNeg(TheOp))
01499       X = BinaryOperator::getNegArgument(TheOp);
01500     else if (BinaryOperator::isNot(TheOp))
01501       X = BinaryOperator::getNotArgument(TheOp);
01502 
01503     unsigned FoundX = FindInOperandList(Ops, i, X);
01504     if (FoundX == i)
01505       continue;
01506 
01507     // Remove X and -X from the operand list.
01508     if (Ops.size() == 2 &&
01509         (BinaryOperator::isNeg(TheOp) || BinaryOperator::isFNeg(TheOp)))
01510       return Constant::getNullValue(X->getType());
01511 
01512     // Remove X and ~X from the operand list.
01513     if (Ops.size() == 2 && BinaryOperator::isNot(TheOp))
01514       return Constant::getAllOnesValue(X->getType());
01515 
01516     Ops.erase(Ops.begin()+i);
01517     if (i < FoundX)
01518       --FoundX;
01519     else
01520       --i;   // Need to back up an extra one.
01521     Ops.erase(Ops.begin()+FoundX);
01522     ++NumAnnihil;
01523     --i;     // Revisit element.
01524     e -= 2;  // Removed two elements.
01525 
01526     // if X and ~X we append -1 to the operand list.
01527     if (BinaryOperator::isNot(TheOp)) {
01528       Value *V = Constant::getAllOnesValue(X->getType());
01529       Ops.insert(Ops.end(), ValueEntry(getRank(V), V));
01530       e += 1;
01531     }
01532   }
01533 
01534   // Scan the operand list, checking to see if there are any common factors
01535   // between operands.  Consider something like A*A+A*B*C+D.  We would like to
01536   // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies.
01537   // To efficiently find this, we count the number of times a factor occurs
01538   // for any ADD operands that are MULs.
01539   DenseMap<Value*, unsigned> FactorOccurrences;
01540 
01541   // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4)
01542   // where they are actually the same multiply.
01543   unsigned MaxOcc = 0;
01544   Value *MaxOccVal = nullptr;
01545   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
01546     BinaryOperator *BOp =
01547         isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul);
01548     if (!BOp)
01549       continue;
01550 
01551     // Compute all of the factors of this added value.
01552     SmallVector<Value*, 8> Factors;
01553     FindSingleUseMultiplyFactors(BOp, Factors, Ops);
01554     assert(Factors.size() > 1 && "Bad linearize!");
01555 
01556     // Add one to FactorOccurrences for each unique factor in this op.
01557     SmallPtrSet<Value*, 8> Duplicates;
01558     for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
01559       Value *Factor = Factors[i];
01560       if (!Duplicates.insert(Factor))
01561         continue;
01562 
01563       unsigned Occ = ++FactorOccurrences[Factor];
01564       if (Occ > MaxOcc) {
01565         MaxOcc = Occ;
01566         MaxOccVal = Factor;
01567       }
01568 
01569       // If Factor is a negative constant, add the negated value as a factor
01570       // because we can percolate the negate out.  Watch for minint, which
01571       // cannot be positivified.
01572       if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) {
01573         if (CI->isNegative() && !CI->isMinValue(true)) {
01574           Factor = ConstantInt::get(CI->getContext(), -CI->getValue());
01575           assert(!Duplicates.count(Factor) &&
01576                  "Shouldn't have two constant factors, missed a canonicalize");
01577           unsigned Occ = ++FactorOccurrences[Factor];
01578           if (Occ > MaxOcc) {
01579             MaxOcc = Occ;
01580             MaxOccVal = Factor;
01581           }
01582         }
01583       } else if (ConstantFP *CF = dyn_cast<ConstantFP>(Factor)) {
01584         if (CF->isNegative()) {
01585           APFloat F(CF->getValueAPF());
01586           F.changeSign();
01587           Factor = ConstantFP::get(CF->getContext(), F);
01588           assert(!Duplicates.count(Factor) &&
01589                  "Shouldn't have two constant factors, missed a canonicalize");
01590           unsigned Occ = ++FactorOccurrences[Factor];
01591           if (Occ > MaxOcc) {
01592             MaxOcc = Occ;
01593             MaxOccVal = Factor;
01594           }
01595         }
01596       }
01597     }
01598   }
01599 
01600   // If any factor occurred more than one time, we can pull it out.
01601   if (MaxOcc > 1) {
01602     DEBUG(errs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n');
01603     ++NumFactor;
01604 
01605     // Create a new instruction that uses the MaxOccVal twice.  If we don't do
01606     // this, we could otherwise run into situations where removing a factor
01607     // from an expression will drop a use of maxocc, and this can cause
01608     // RemoveFactorFromExpression on successive values to behave differently.
01609     Instruction *DummyInst =
01610         I->getType()->isIntegerTy()
01611             ? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal)
01612             : BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal);
01613 
01614     SmallVector<WeakVH, 4> NewMulOps;
01615     for (unsigned i = 0; i != Ops.size(); ++i) {
01616       // Only try to remove factors from expressions we're allowed to.
01617       BinaryOperator *BOp =
01618           isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul);
01619       if (!BOp)
01620         continue;
01621 
01622       if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) {
01623         // The factorized operand may occur several times.  Convert them all in
01624         // one fell swoop.
01625         for (unsigned j = Ops.size(); j != i;) {
01626           --j;
01627           if (Ops[j].Op == Ops[i].Op) {
01628             NewMulOps.push_back(V);
01629             Ops.erase(Ops.begin()+j);
01630           }
01631         }
01632         --i;
01633       }
01634     }
01635 
01636     // No need for extra uses anymore.
01637     delete DummyInst;
01638 
01639     unsigned NumAddedValues = NewMulOps.size();
01640     Value *V = EmitAddTreeOfValues(I, NewMulOps);
01641 
01642     // Now that we have inserted the add tree, optimize it. This allows us to
01643     // handle cases that require multiple factoring steps, such as this:
01644     // A*A*B + A*A*C   -->   A*(A*B+A*C)   -->   A*(A*(B+C))
01645     assert(NumAddedValues > 1 && "Each occurrence should contribute a value");
01646     (void)NumAddedValues;
01647     if (Instruction *VI = dyn_cast<Instruction>(V))
01648       RedoInsts.insert(VI);
01649 
01650     // Create the multiply.
01651     Instruction *V2 = CreateMul(V, MaxOccVal, "tmp", I, I);
01652 
01653     // Rerun associate on the multiply in case the inner expression turned into
01654     // a multiply.  We want to make sure that we keep things in canonical form.
01655     RedoInsts.insert(V2);
01656 
01657     // If every add operand included the factor (e.g. "A*B + A*C"), then the
01658     // entire result expression is just the multiply "A*(B+C)".
01659     if (Ops.empty())
01660       return V2;
01661 
01662     // Otherwise, we had some input that didn't have the factor, such as
01663     // "A*B + A*C + D" -> "A*(B+C) + D".  Add the new multiply to the list of
01664     // things being added by this operation.
01665     Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
01666   }
01667 
01668   return nullptr;
01669 }
01670 
01671 /// \brief Build up a vector of value/power pairs factoring a product.
01672 ///
01673 /// Given a series of multiplication operands, build a vector of factors and
01674 /// the powers each is raised to when forming the final product. Sort them in
01675 /// the order of descending power.
01676 ///
01677 ///      (x*x)          -> [(x, 2)]
01678 ///     ((x*x)*x)       -> [(x, 3)]
01679 ///   ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)]
01680 ///
01681 /// \returns Whether any factors have a power greater than one.
01682 bool Reassociate::collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
01683                                          SmallVectorImpl<Factor> &Factors) {
01684   // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this.
01685   // Compute the sum of powers of simplifiable factors.
01686   unsigned FactorPowerSum = 0;
01687   for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) {
01688     Value *Op = Ops[Idx-1].Op;
01689 
01690     // Count the number of occurrences of this value.
01691     unsigned Count = 1;
01692     for (; Idx < Size && Ops[Idx].Op == Op; ++Idx)
01693       ++Count;
01694     // Track for simplification all factors which occur 2 or more times.
01695     if (Count > 1)
01696       FactorPowerSum += Count;
01697   }
01698 
01699   // We can only simplify factors if the sum of the powers of our simplifiable
01700   // factors is 4 or higher. When that is the case, we will *always* have
01701   // a simplification. This is an important invariant to prevent cyclicly
01702   // trying to simplify already minimal formations.
01703   if (FactorPowerSum < 4)
01704     return false;
01705 
01706   // Now gather the simplifiable factors, removing them from Ops.
01707   FactorPowerSum = 0;
01708   for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) {
01709     Value *Op = Ops[Idx-1].Op;
01710 
01711     // Count the number of occurrences of this value.
01712     unsigned Count = 1;
01713     for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx)
01714       ++Count;
01715     if (Count == 1)
01716       continue;
01717     // Move an even number of occurrences to Factors.
01718     Count &= ~1U;
01719     Idx -= Count;
01720     FactorPowerSum += Count;
01721     Factors.push_back(Factor(Op, Count));
01722     Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count);
01723   }
01724 
01725   // None of the adjustments above should have reduced the sum of factor powers
01726   // below our mininum of '4'.
01727   assert(FactorPowerSum >= 4);
01728 
01729   std::stable_sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter());
01730   return true;
01731 }
01732 
01733 /// \brief Build a tree of multiplies, computing the product of Ops.
01734 static Value *buildMultiplyTree(IRBuilder<> &Builder,
01735                                 SmallVectorImpl<Value*> &Ops) {
01736   if (Ops.size() == 1)
01737     return Ops.back();
01738 
01739   Value *LHS = Ops.pop_back_val();
01740   do {
01741     if (LHS->getType()->isIntegerTy())
01742       LHS = Builder.CreateMul(LHS, Ops.pop_back_val());
01743     else
01744       LHS = Builder.CreateFMul(LHS, Ops.pop_back_val());
01745   } while (!Ops.empty());
01746 
01747   return LHS;
01748 }
01749 
01750 /// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*...
01751 ///
01752 /// Given a vector of values raised to various powers, where no two values are
01753 /// equal and the powers are sorted in decreasing order, compute the minimal
01754 /// DAG of multiplies to compute the final product, and return that product
01755 /// value.
01756 Value *Reassociate::buildMinimalMultiplyDAG(IRBuilder<> &Builder,
01757                                             SmallVectorImpl<Factor> &Factors) {
01758   assert(Factors[0].Power);
01759   SmallVector<Value *, 4> OuterProduct;
01760   for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size();
01761        Idx < Size && Factors[Idx].Power > 0; ++Idx) {
01762     if (Factors[Idx].Power != Factors[LastIdx].Power) {
01763       LastIdx = Idx;
01764       continue;
01765     }
01766 
01767     // We want to multiply across all the factors with the same power so that
01768     // we can raise them to that power as a single entity. Build a mini tree
01769     // for that.
01770     SmallVector<Value *, 4> InnerProduct;
01771     InnerProduct.push_back(Factors[LastIdx].Base);
01772     do {
01773       InnerProduct.push_back(Factors[Idx].Base);
01774       ++Idx;
01775     } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power);
01776 
01777     // Reset the base value of the first factor to the new expression tree.
01778     // We'll remove all the factors with the same power in a second pass.
01779     Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct);
01780     if (Instruction *MI = dyn_cast<Instruction>(M))
01781       RedoInsts.insert(MI);
01782 
01783     LastIdx = Idx;
01784   }
01785   // Unique factors with equal powers -- we've folded them into the first one's
01786   // base.
01787   Factors.erase(std::unique(Factors.begin(), Factors.end(),
01788                             Factor::PowerEqual()),
01789                 Factors.end());
01790 
01791   // Iteratively collect the base of each factor with an add power into the
01792   // outer product, and halve each power in preparation for squaring the
01793   // expression.
01794   for (unsigned Idx = 0, Size = Factors.size(); Idx != Size; ++Idx) {
01795     if (Factors[Idx].Power & 1)
01796       OuterProduct.push_back(Factors[Idx].Base);
01797     Factors[Idx].Power >>= 1;
01798   }
01799   if (Factors[0].Power) {
01800     Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors);
01801     OuterProduct.push_back(SquareRoot);
01802     OuterProduct.push_back(SquareRoot);
01803   }
01804   if (OuterProduct.size() == 1)
01805     return OuterProduct.front();
01806 
01807   Value *V = buildMultiplyTree(Builder, OuterProduct);
01808   return V;
01809 }
01810 
01811 Value *Reassociate::OptimizeMul(BinaryOperator *I,
01812                                 SmallVectorImpl<ValueEntry> &Ops) {
01813   // We can only optimize the multiplies when there is a chain of more than
01814   // three, such that a balanced tree might require fewer total multiplies.
01815   if (Ops.size() < 4)
01816     return nullptr;
01817 
01818   // Try to turn linear trees of multiplies without other uses of the
01819   // intermediate stages into minimal multiply DAGs with perfect sub-expression
01820   // re-use.
01821   SmallVector<Factor, 4> Factors;
01822   if (!collectMultiplyFactors(Ops, Factors))
01823     return nullptr; // All distinct factors, so nothing left for us to do.
01824 
01825   IRBuilder<> Builder(I);
01826   Value *V = buildMinimalMultiplyDAG(Builder, Factors);
01827   if (Ops.empty())
01828     return V;
01829 
01830   ValueEntry NewEntry = ValueEntry(getRank(V), V);
01831   Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry);
01832   return nullptr;
01833 }
01834 
01835 Value *Reassociate::OptimizeExpression(BinaryOperator *I,
01836                                        SmallVectorImpl<ValueEntry> &Ops) {
01837   // Now that we have the linearized expression tree, try to optimize it.
01838   // Start by folding any constants that we found.
01839   Constant *Cst = nullptr;
01840   unsigned Opcode = I->getOpcode();
01841   while (!Ops.empty() && isa<Constant>(Ops.back().Op)) {
01842     Constant *C = cast<Constant>(Ops.pop_back_val().Op);
01843     Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C;
01844   }
01845   // If there was nothing but constants then we are done.
01846   if (Ops.empty())
01847     return Cst;
01848 
01849   // Put the combined constant back at the end of the operand list, except if
01850   // there is no point.  For example, an add of 0 gets dropped here, while a
01851   // multiplication by zero turns the whole expression into zero.
01852   if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) {
01853     if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType()))
01854       return Cst;
01855     Ops.push_back(ValueEntry(0, Cst));
01856   }
01857 
01858   if (Ops.size() == 1) return Ops[0].Op;
01859 
01860   // Handle destructive annihilation due to identities between elements in the
01861   // argument list here.
01862   unsigned NumOps = Ops.size();
01863   switch (Opcode) {
01864   default: break;
01865   case Instruction::And:
01866   case Instruction::Or:
01867     if (Value *Result = OptimizeAndOrXor(Opcode, Ops))
01868       return Result;
01869     break;
01870 
01871   case Instruction::Xor:
01872     if (Value *Result = OptimizeXor(I, Ops))
01873       return Result;
01874     break;
01875 
01876   case Instruction::Add:
01877   case Instruction::FAdd:
01878     if (Value *Result = OptimizeAdd(I, Ops))
01879       return Result;
01880     break;
01881 
01882   case Instruction::Mul:
01883   case Instruction::FMul:
01884     if (Value *Result = OptimizeMul(I, Ops))
01885       return Result;
01886     break;
01887   }
01888 
01889   if (Ops.size() != NumOps)
01890     return OptimizeExpression(I, Ops);
01891   return nullptr;
01892 }
01893 
01894 /// EraseInst - Zap the given instruction, adding interesting operands to the
01895 /// work list.
01896 void Reassociate::EraseInst(Instruction *I) {
01897   assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
01898   SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
01899   // Erase the dead instruction.
01900   ValueRankMap.erase(I);
01901   RedoInsts.remove(I);
01902   I->eraseFromParent();
01903   // Optimize its operands.
01904   SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes.
01905   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
01906     if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) {
01907       // If this is a node in an expression tree, climb to the expression root
01908       // and add that since that's where optimization actually happens.
01909       unsigned Opcode = Op->getOpcode();
01910       while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode &&
01911              Visited.insert(Op))
01912         Op = Op->user_back();
01913       RedoInsts.insert(Op);
01914     }
01915 }
01916 
01917 /// OptimizeInst - Inspect and optimize the given instruction. Note that erasing
01918 /// instructions is not allowed.
01919 void Reassociate::OptimizeInst(Instruction *I) {
01920   // Only consider operations that we understand.
01921   if (!isa<BinaryOperator>(I))
01922     return;
01923 
01924   if (I->getOpcode() == Instruction::Shl && isa<ConstantInt>(I->getOperand(1)))
01925     // If an operand of this shift is a reassociable multiply, or if the shift
01926     // is used by a reassociable multiply or add, turn into a multiply.
01927     if (isReassociableOp(I->getOperand(0), Instruction::Mul) ||
01928         (I->hasOneUse() &&
01929          (isReassociableOp(I->user_back(), Instruction::Mul) ||
01930           isReassociableOp(I->user_back(), Instruction::Add)))) {
01931       Instruction *NI = ConvertShiftToMul(I);
01932       RedoInsts.insert(I);
01933       MadeChange = true;
01934       I = NI;
01935     }
01936 
01937   // Commute floating point binary operators, to canonicalize the order of their
01938   // operands.  This can potentially expose more CSE opportunities, and makes
01939   // writing other transformations simpler.
01940   if (I->getType()->isFloatingPointTy() || I->getType()->isVectorTy()) {
01941 
01942     // FAdd and FMul can be commuted.
01943     if (I->getOpcode() == Instruction::FMul ||
01944         I->getOpcode() == Instruction::FAdd) {
01945       Value *LHS = I->getOperand(0);
01946       Value *RHS = I->getOperand(1);
01947       unsigned LHSRank = getRank(LHS);
01948       unsigned RHSRank = getRank(RHS);
01949 
01950       // Sort the operands by rank.
01951       if (RHSRank < LHSRank) {
01952         I->setOperand(0, RHS);
01953         I->setOperand(1, LHS);
01954       }
01955     }
01956 
01957     // FIXME: We should commute vector instructions as well.  However, this 
01958     // requires further analysis to determine the effect on later passes.
01959 
01960     // Don't try to optimize vector instructions or anything that doesn't have
01961     // unsafe algebra.
01962     if (I->getType()->isVectorTy() || !I->hasUnsafeAlgebra())
01963       return;
01964   }
01965 
01966   // Do not reassociate boolean (i1) expressions.  We want to preserve the
01967   // original order of evaluation for short-circuited comparisons that
01968   // SimplifyCFG has folded to AND/OR expressions.  If the expression
01969   // is not further optimized, it is likely to be transformed back to a
01970   // short-circuited form for code gen, and the source order may have been
01971   // optimized for the most likely conditions.
01972   if (I->getType()->isIntegerTy(1))
01973     return;
01974 
01975   // If this is a subtract instruction which is not already in negate form,
01976   // see if we can convert it to X+-Y.
01977   if (I->getOpcode() == Instruction::Sub) {
01978     if (ShouldBreakUpSubtract(I)) {
01979       Instruction *NI = BreakUpSubtract(I);
01980       RedoInsts.insert(I);
01981       MadeChange = true;
01982       I = NI;
01983     } else if (BinaryOperator::isNeg(I)) {
01984       // Otherwise, this is a negation.  See if the operand is a multiply tree
01985       // and if this is not an inner node of a multiply tree.
01986       if (isReassociableOp(I->getOperand(1), Instruction::Mul) &&
01987           (!I->hasOneUse() ||
01988            !isReassociableOp(I->user_back(), Instruction::Mul))) {
01989         Instruction *NI = LowerNegateToMultiply(I);
01990         RedoInsts.insert(I);
01991         MadeChange = true;
01992         I = NI;
01993       }
01994     }
01995   } else if (I->getOpcode() == Instruction::FSub) {
01996     if (ShouldBreakUpSubtract(I)) {
01997       Instruction *NI = BreakUpSubtract(I);
01998       RedoInsts.insert(I);
01999       MadeChange = true;
02000       I = NI;
02001     } else if (BinaryOperator::isFNeg(I)) {
02002       // Otherwise, this is a negation.  See if the operand is a multiply tree
02003       // and if this is not an inner node of a multiply tree.
02004       if (isReassociableOp(I->getOperand(1), Instruction::FMul) &&
02005           (!I->hasOneUse() ||
02006            !isReassociableOp(I->user_back(), Instruction::FMul))) {
02007         Instruction *NI = LowerNegateToMultiply(I);
02008         RedoInsts.insert(I);
02009         MadeChange = true;
02010         I = NI;
02011       }
02012     }
02013   }
02014 
02015   // If this instruction is an associative binary operator, process it.
02016   if (!I->isAssociative()) return;
02017   BinaryOperator *BO = cast<BinaryOperator>(I);
02018 
02019   // If this is an interior node of a reassociable tree, ignore it until we
02020   // get to the root of the tree, to avoid N^2 analysis.
02021   unsigned Opcode = BO->getOpcode();
02022   if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode)
02023     return;
02024 
02025   // If this is an add tree that is used by a sub instruction, ignore it
02026   // until we process the subtract.
02027   if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add &&
02028       cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub)
02029     return;
02030   if (BO->hasOneUse() && BO->getOpcode() == Instruction::FAdd &&
02031       cast<Instruction>(BO->user_back())->getOpcode() == Instruction::FSub)
02032     return;
02033 
02034   ReassociateExpression(BO);
02035 }
02036 
02037 void Reassociate::ReassociateExpression(BinaryOperator *I) {
02038   assert(!I->getType()->isVectorTy() &&
02039          "Reassociation of vector instructions is not supported.");
02040 
02041   // First, walk the expression tree, linearizing the tree, collecting the
02042   // operand information.
02043   SmallVector<RepeatedValue, 8> Tree;
02044   MadeChange |= LinearizeExprTree(I, Tree);
02045   SmallVector<ValueEntry, 8> Ops;
02046   Ops.reserve(Tree.size());
02047   for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
02048     RepeatedValue E = Tree[i];
02049     Ops.append(E.second.getZExtValue(),
02050                ValueEntry(getRank(E.first), E.first));
02051   }
02052 
02053   DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
02054 
02055   // Now that we have linearized the tree to a list and have gathered all of
02056   // the operands and their ranks, sort the operands by their rank.  Use a
02057   // stable_sort so that values with equal ranks will have their relative
02058   // positions maintained (and so the compiler is deterministic).  Note that
02059   // this sorts so that the highest ranking values end up at the beginning of
02060   // the vector.
02061   std::stable_sort(Ops.begin(), Ops.end());
02062 
02063   // OptimizeExpression - Now that we have the expression tree in a convenient
02064   // sorted form, optimize it globally if possible.
02065   if (Value *V = OptimizeExpression(I, Ops)) {
02066     if (V == I)
02067       // Self-referential expression in unreachable code.
02068       return;
02069     // This expression tree simplified to something that isn't a tree,
02070     // eliminate it.
02071     DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n');
02072     I->replaceAllUsesWith(V);
02073     if (Instruction *VI = dyn_cast<Instruction>(V))
02074       VI->setDebugLoc(I->getDebugLoc());
02075     RedoInsts.insert(I);
02076     ++NumAnnihil;
02077     return;
02078   }
02079 
02080   // We want to sink immediates as deeply as possible except in the case where
02081   // this is a multiply tree used only by an add, and the immediate is a -1.
02082   // In this case we reassociate to put the negation on the outside so that we
02083   // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
02084   if (I->hasOneUse()) {
02085     if (I->getOpcode() == Instruction::Mul &&
02086         cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add &&
02087         isa<ConstantInt>(Ops.back().Op) &&
02088         cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
02089       ValueEntry Tmp = Ops.pop_back_val();
02090       Ops.insert(Ops.begin(), Tmp);
02091     } else if (I->getOpcode() == Instruction::FMul &&
02092                cast<Instruction>(I->user_back())->getOpcode() ==
02093                    Instruction::FAdd &&
02094                isa<ConstantFP>(Ops.back().Op) &&
02095                cast<ConstantFP>(Ops.back().Op)->isExactlyValue(-1.0)) {
02096       ValueEntry Tmp = Ops.pop_back_val();
02097       Ops.insert(Ops.begin(), Tmp);
02098     }
02099   }
02100 
02101   DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n');
02102 
02103   if (Ops.size() == 1) {
02104     if (Ops[0].Op == I)
02105       // Self-referential expression in unreachable code.
02106       return;
02107 
02108     // This expression tree simplified to something that isn't a tree,
02109     // eliminate it.
02110     I->replaceAllUsesWith(Ops[0].Op);
02111     if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op))
02112       OI->setDebugLoc(I->getDebugLoc());
02113     RedoInsts.insert(I);
02114     return;
02115   }
02116 
02117   // Now that we ordered and optimized the expressions, splat them back into
02118   // the expression tree, removing any unneeded nodes.
02119   RewriteExprTree(I, Ops);
02120 }
02121 
02122 bool Reassociate::runOnFunction(Function &F) {
02123   if (skipOptnoneFunction(F))
02124     return false;
02125 
02126   // Calculate the rank map for F
02127   BuildRankMap(F);
02128 
02129   MadeChange = false;
02130   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
02131     // Optimize every instruction in the basic block.
02132     for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; )
02133       if (isInstructionTriviallyDead(II)) {
02134         EraseInst(II++);
02135       } else {
02136         OptimizeInst(II);
02137         assert(II->getParent() == BI && "Moved to a different block!");
02138         ++II;
02139       }
02140 
02141     // If this produced extra instructions to optimize, handle them now.
02142     while (!RedoInsts.empty()) {
02143       Instruction *I = RedoInsts.pop_back_val();
02144       if (isInstructionTriviallyDead(I))
02145         EraseInst(I);
02146       else
02147         OptimizeInst(I);
02148     }
02149   }
02150 
02151   // We are done with the rank map.
02152   RankMap.clear();
02153   ValueRankMap.clear();
02154 
02155   return MadeChange;
02156 }