Bug Summary

File:lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
Warning:line 1674, column 7
Value stored to 'MadeChange' is never read

Annotated Source Code

1//===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains logic for simplifying instructions based on information
11// about how they are used.
12//
13//===----------------------------------------------------------------------===//
14
15#include "InstCombineInternal.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/IR/IntrinsicInst.h"
18#include "llvm/IR/PatternMatch.h"
19#include "llvm/Support/KnownBits.h"
20
21using namespace llvm;
22using namespace llvm::PatternMatch;
23
24#define DEBUG_TYPE"instcombine" "instcombine"
25
26/// Check to see if the specified operand of the specified instruction is a
27/// constant integer. If so, check to see if there are any bits set in the
28/// constant that are not demanded. If so, shrink the constant and return true.
29static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
30 const APInt &Demanded) {
31 assert(I && "No instruction?")((I && "No instruction?") ? static_cast<void> (
0) : __assert_fail ("I && \"No instruction?\"", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 31, __PRETTY_FUNCTION__))
;
32 assert(OpNo < I->getNumOperands() && "Operand index too large")((OpNo < I->getNumOperands() && "Operand index too large"
) ? static_cast<void> (0) : __assert_fail ("OpNo < I->getNumOperands() && \"Operand index too large\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 32, __PRETTY_FUNCTION__))
;
33
34 // The operand must be a constant integer or splat integer.
35 Value *Op = I->getOperand(OpNo);
36 const APInt *C;
37 if (!match(Op, m_APInt(C)))
38 return false;
39
40 // If there are no bits set that aren't demanded, nothing to do.
41 if (C->isSubsetOf(Demanded))
42 return false;
43
44 // This instruction is producing bits that are not demanded. Shrink the RHS.
45 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
46
47 return true;
48}
49
50
51
52/// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
53/// the instruction has any properties that allow us to simplify its operands.
54bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
55 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
56 KnownBits Known(BitWidth);
57 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
58
59 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
60 0, &Inst);
61 if (!V) return false;
62 if (V == &Inst) return true;
63 replaceInstUsesWith(Inst, V);
64 return true;
65}
66
67/// This form of SimplifyDemandedBits simplifies the specified instruction
68/// operand if possible, updating it in place. It returns true if it made any
69/// change and false otherwise.
70bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
71 const APInt &DemandedMask,
72 KnownBits &Known,
73 unsigned Depth) {
74 Use &U = I->getOperandUse(OpNo);
75 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
76 Depth, I);
77 if (!NewVal) return false;
78 U = NewVal;
79 return true;
80}
81
82
83/// This function attempts to replace V with a simpler value based on the
84/// demanded bits. When this function is called, it is known that only the bits
85/// set in DemandedMask of the result of V are ever used downstream.
86/// Consequently, depending on the mask and V, it may be possible to replace V
87/// with a constant or one of its operands. In such cases, this function does
88/// the replacement and returns true. In all other cases, it returns false after
89/// analyzing the expression and setting KnownOne and known to be one in the
90/// expression. Known.Zero contains all the bits that are known to be zero in
91/// the expression. These are provided to potentially allow the caller (which
92/// might recursively be SimplifyDemandedBits itself) to simplify the
93/// expression.
94/// Known.One and Known.Zero always follow the invariant that:
95/// Known.One & Known.Zero == 0.
96/// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
97/// Known.Zero may only be accurate for those bits set in DemandedMask. Note
98/// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
99/// be the same.
100///
101/// This returns null if it did not change anything and it permits no
102/// simplification. This returns V itself if it did some simplification of V's
103/// operands based on the information about what bits are demanded. This returns
104/// some other non-null value if it found out that V is equal to another value
105/// in the context where the specified bits are demanded, but not for all users.
106Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
107 KnownBits &Known, unsigned Depth,
108 Instruction *CxtI) {
109 assert(V != nullptr && "Null pointer of Value???")((V != nullptr && "Null pointer of Value???") ? static_cast
<void> (0) : __assert_fail ("V != nullptr && \"Null pointer of Value???\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 109, __PRETTY_FUNCTION__))
;
110 assert(Depth <= 6 && "Limit Search Depth")((Depth <= 6 && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= 6 && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 110, __PRETTY_FUNCTION__))
;
111 uint32_t BitWidth = DemandedMask.getBitWidth();
112 Type *VTy = V->getType();
113 assert((((!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits
() == BitWidth) && Known.getBitWidth() == BitWidth &&
"Value *V, DemandedMask and Known must have same BitWidth") ?
static_cast<void> (0) : __assert_fail ("(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && Known.getBitWidth() == BitWidth && \"Value *V, DemandedMask and Known must have same BitWidth\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 116, __PRETTY_FUNCTION__))
114 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&(((!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits
() == BitWidth) && Known.getBitWidth() == BitWidth &&
"Value *V, DemandedMask and Known must have same BitWidth") ?
static_cast<void> (0) : __assert_fail ("(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && Known.getBitWidth() == BitWidth && \"Value *V, DemandedMask and Known must have same BitWidth\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 116, __PRETTY_FUNCTION__))
115 Known.getBitWidth() == BitWidth &&(((!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits
() == BitWidth) && Known.getBitWidth() == BitWidth &&
"Value *V, DemandedMask and Known must have same BitWidth") ?
static_cast<void> (0) : __assert_fail ("(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && Known.getBitWidth() == BitWidth && \"Value *V, DemandedMask and Known must have same BitWidth\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 116, __PRETTY_FUNCTION__))
116 "Value *V, DemandedMask and Known must have same BitWidth")(((!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits
() == BitWidth) && Known.getBitWidth() == BitWidth &&
"Value *V, DemandedMask and Known must have same BitWidth") ?
static_cast<void> (0) : __assert_fail ("(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && Known.getBitWidth() == BitWidth && \"Value *V, DemandedMask and Known must have same BitWidth\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 116, __PRETTY_FUNCTION__))
;
117
118 if (isa<Constant>(V)) {
119 computeKnownBits(V, Known, Depth, CxtI);
120 return nullptr;
121 }
122
123 Known.resetAll();
124 if (DemandedMask.isNullValue()) // Not demanding any bits from V.
125 return UndefValue::get(VTy);
126
127 if (Depth == 6) // Limit search depth.
128 return nullptr;
129
130 Instruction *I = dyn_cast<Instruction>(V);
131 if (!I) {
132 computeKnownBits(V, Known, Depth, CxtI);
133 return nullptr; // Only analyze instructions.
134 }
135
136 // If there are multiple uses of this value and we aren't at the root, then
137 // we can't do any simplifications of the operands, because DemandedMask
138 // only reflects the bits demanded by *one* of the users.
139 if (Depth != 0 && !I->hasOneUse())
140 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
141
142 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
143
144 // If this is the root being simplified, allow it to have multiple uses,
145 // just set the DemandedMask to all bits so that we can try to simplify the
146 // operands. This allows visitTruncInst (for example) to simplify the
147 // operand of a trunc without duplicating all the logic below.
148 if (Depth == 0 && !V->hasOneUse())
149 DemandedMask.setAllBits();
150
151 switch (I->getOpcode()) {
152 default:
153 computeKnownBits(I, Known, Depth, CxtI);
154 break;
155 case Instruction::And: {
156 // If either the LHS or the RHS are Zero, the result is zero.
157 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
158 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
159 Depth + 1))
160 return I;
161 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?")((!RHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!RHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 161, __PRETTY_FUNCTION__))
;
162 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?")((!LHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!LHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 162, __PRETTY_FUNCTION__))
;
163
164 // Output known-0 are known to be clear if zero in either the LHS | RHS.
165 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
166 // Output known-1 bits are only known if set in both the LHS & RHS.
167 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
168
169 // If the client is only demanding bits that we know, return the known
170 // constant.
171 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
172 return Constant::getIntegerValue(VTy, IKnownOne);
173
174 // If all of the demanded bits are known 1 on one side, return the other.
175 // These bits cannot contribute to the result of the 'and'.
176 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
177 return I->getOperand(0);
178 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
179 return I->getOperand(1);
180
181 // If the RHS is a constant, see if we can simplify it.
182 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
183 return I;
184
185 Known.Zero = std::move(IKnownZero);
186 Known.One = std::move(IKnownOne);
187 break;
188 }
189 case Instruction::Or: {
190 // If either the LHS or the RHS are One, the result is One.
191 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
192 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
193 Depth + 1))
194 return I;
195 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?")((!RHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!RHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 195, __PRETTY_FUNCTION__))
;
196 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?")((!LHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!LHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 196, __PRETTY_FUNCTION__))
;
197
198 // Output known-0 bits are only known if clear in both the LHS & RHS.
199 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
200 // Output known-1 are known. to be set if s.et in either the LHS | RHS.
201 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
202
203 // If the client is only demanding bits that we know, return the known
204 // constant.
205 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
206 return Constant::getIntegerValue(VTy, IKnownOne);
207
208 // If all of the demanded bits are known zero on one side, return the other.
209 // These bits cannot contribute to the result of the 'or'.
210 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
211 return I->getOperand(0);
212 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
213 return I->getOperand(1);
214
215 // If the RHS is a constant, see if we can simplify it.
216 if (ShrinkDemandedConstant(I, 1, DemandedMask))
217 return I;
218
219 Known.Zero = std::move(IKnownZero);
220 Known.One = std::move(IKnownOne);
221 break;
222 }
223 case Instruction::Xor: {
224 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
225 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
226 return I;
227 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?")((!RHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!RHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 227, __PRETTY_FUNCTION__))
;
228 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?")((!LHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!LHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 228, __PRETTY_FUNCTION__))
;
229
230 // Output known-0 bits are known if clear or set in both the LHS & RHS.
231 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
232 (RHSKnown.One & LHSKnown.One);
233 // Output known-1 are known to be set if set in only one of the LHS, RHS.
234 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
235 (RHSKnown.One & LHSKnown.Zero);
236
237 // If the client is only demanding bits that we know, return the known
238 // constant.
239 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
240 return Constant::getIntegerValue(VTy, IKnownOne);
241
242 // If all of the demanded bits are known zero on one side, return the other.
243 // These bits cannot contribute to the result of the 'xor'.
244 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
245 return I->getOperand(0);
246 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
247 return I->getOperand(1);
248
249 // If all of the demanded bits are known to be zero on one side or the
250 // other, turn this into an *inclusive* or.
251 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
252 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
253 Instruction *Or =
254 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
255 I->getName());
256 return InsertNewInstWith(Or, *I);
257 }
258
259 // If all of the demanded bits on one side are known, and all of the set
260 // bits on that side are also known to be set on the other side, turn this
261 // into an AND, as we know the bits will be cleared.
262 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
263 if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
264 RHSKnown.One.isSubsetOf(LHSKnown.One)) {
265 Constant *AndC = Constant::getIntegerValue(VTy,
266 ~RHSKnown.One & DemandedMask);
267 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
268 return InsertNewInstWith(And, *I);
269 }
270
271 // If the RHS is a constant, see if we can simplify it.
272 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
273 if (ShrinkDemandedConstant(I, 1, DemandedMask))
274 return I;
275
276 // If our LHS is an 'and' and if it has one use, and if any of the bits we
277 // are flipping are known to be set, then the xor is just resetting those
278 // bits to zero. We can just knock out bits from the 'and' and the 'xor',
279 // simplifying both of them.
280 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
281 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
282 isa<ConstantInt>(I->getOperand(1)) &&
283 isa<ConstantInt>(LHSInst->getOperand(1)) &&
284 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
285 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
286 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
287 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
288
289 Constant *AndC =
290 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
291 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
292 InsertNewInstWith(NewAnd, *I);
293
294 Constant *XorC =
295 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
296 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
297 return InsertNewInstWith(NewXor, *I);
298 }
299
300 // Output known-0 bits are known if clear or set in both the LHS & RHS.
301 Known.Zero = std::move(IKnownZero);
302 // Output known-1 are known to be set if set in only one of the LHS, RHS.
303 Known.One = std::move(IKnownOne);
304 break;
305 }
306 case Instruction::Select:
307 // If this is a select as part of a min/max pattern, don't simplify any
308 // further in case we break the structure.
309 Value *LHS, *RHS;
310 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN)
311 return nullptr;
312
313 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
314 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
315 return I;
316 assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?")((!RHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!RHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 316, __PRETTY_FUNCTION__))
;
317 assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?")((!LHSKnown.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!LHSKnown.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 317, __PRETTY_FUNCTION__))
;
318
319 // If the operands are constants, see if we can simplify them.
320 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
321 ShrinkDemandedConstant(I, 2, DemandedMask))
322 return I;
323
324 // Only known if known in both the LHS and RHS.
325 Known.One = RHSKnown.One & LHSKnown.One;
326 Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
327 break;
328 case Instruction::ZExt:
329 case Instruction::Trunc: {
330 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
331
332 APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
333 KnownBits InputKnown(SrcBitWidth);
334 if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
335 return I;
336 Known = Known.zextOrTrunc(BitWidth);
337 // Any top bits are known to be zero.
338 if (BitWidth > SrcBitWidth)
339 Known.Zero.setBitsFrom(SrcBitWidth);
340 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 340, __PRETTY_FUNCTION__))
;
341 break;
342 }
343 case Instruction::BitCast:
344 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
345 return nullptr; // vector->int or fp->int?
346
347 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
348 if (VectorType *SrcVTy =
349 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
350 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
351 // Don't touch a bitcast between vectors of different element counts.
352 return nullptr;
353 } else
354 // Don't touch a scalar-to-vector bitcast.
355 return nullptr;
356 } else if (I->getOperand(0)->getType()->isVectorTy())
357 // Don't touch a vector-to-scalar bitcast.
358 return nullptr;
359
360 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
361 return I;
362 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 362, __PRETTY_FUNCTION__))
;
363 break;
364 case Instruction::SExt: {
365 // Compute the bits in the result that are not present in the input.
366 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
367
368 APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
369
370 // If any of the sign extended bits are demanded, we know that the sign
371 // bit is demanded.
372 if (DemandedMask.getActiveBits() > SrcBitWidth)
373 InputDemandedBits.setBit(SrcBitWidth-1);
374
375 KnownBits InputKnown(SrcBitWidth);
376 if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
377 return I;
378
379 // If the input sign bit is known zero, or if the NewBits are not demanded
380 // convert this into a zero extension.
381 if (InputKnown.isNonNegative() ||
382 DemandedMask.getActiveBits() <= SrcBitWidth) {
383 // Convert to ZExt cast.
384 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
385 return InsertNewInstWith(NewCast, *I);
386 }
387
388 // If the sign bit of the input is known set or clear, then we know the
389 // top bits of the result.
390 Known = InputKnown.sext(BitWidth);
391 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 391, __PRETTY_FUNCTION__))
;
392 break;
393 }
394 case Instruction::Add:
395 case Instruction::Sub: {
396 /// If the high-bits of an ADD/SUB are not demanded, then we do not care
397 /// about the high bits of the operands.
398 unsigned NLZ = DemandedMask.countLeadingZeros();
399 // Right fill the mask of bits for this ADD/SUB to demand the most
400 // significant bit and all those below it.
401 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
402 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
403 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
404 ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
405 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
406 if (NLZ > 0) {
407 // Disable the nsw and nuw flags here: We can no longer guarantee that
408 // we won't wrap after simplification. Removing the nsw/nuw flags is
409 // legal here because the top bit is not demanded.
410 BinaryOperator &BinOP = *cast<BinaryOperator>(I);
411 BinOP.setHasNoSignedWrap(false);
412 BinOP.setHasNoUnsignedWrap(false);
413 }
414 return I;
415 }
416
417 // If we are known to be adding/subtracting zeros to every bit below
418 // the highest demanded bit, we just return the other side.
419 if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
420 return I->getOperand(0);
421 // We can't do this with the LHS for subtraction, unless we are only
422 // demanding the LSB.
423 if ((I->getOpcode() == Instruction::Add ||
424 DemandedFromOps.isOneValue()) &&
425 DemandedFromOps.isSubsetOf(LHSKnown.Zero))
426 return I->getOperand(1);
427
428 // Otherwise just compute the known bits of the result.
429 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
430 Known = KnownBits::computeForAddSub(I->getOpcode() == Instruction::Add,
431 NSW, LHSKnown, RHSKnown);
432 break;
433 }
434 case Instruction::Shl: {
435 const APInt *SA;
436 if (match(I->getOperand(1), m_APInt(SA))) {
437 const APInt *ShrAmt;
438 if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) {
439 Instruction *Shr = cast<Instruction>(I->getOperand(0));
440 if (Value *R = simplifyShrShlDemandedBits(
441 Shr, *ShrAmt, I, *SA, DemandedMask, Known))
442 return R;
443 }
444
445 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
446 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
447
448 // If the shift is NUW/NSW, then it does demand the high bits.
449 ShlOperator *IOp = cast<ShlOperator>(I);
450 if (IOp->hasNoSignedWrap())
451 DemandedMaskIn.setHighBits(ShiftAmt+1);
452 else if (IOp->hasNoUnsignedWrap())
453 DemandedMaskIn.setHighBits(ShiftAmt);
454
455 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
456 return I;
457 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 457, __PRETTY_FUNCTION__))
;
458 Known.Zero <<= ShiftAmt;
459 Known.One <<= ShiftAmt;
460 // low bits known zero.
461 if (ShiftAmt)
462 Known.Zero.setLowBits(ShiftAmt);
463 }
464 break;
465 }
466 case Instruction::LShr: {
467 const APInt *SA;
468 if (match(I->getOperand(1), m_APInt(SA))) {
469 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
470
471 // Unsigned shift right.
472 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
473
474 // If the shift is exact, then it does demand the low bits (and knows that
475 // they are zero).
476 if (cast<LShrOperator>(I)->isExact())
477 DemandedMaskIn.setLowBits(ShiftAmt);
478
479 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
480 return I;
481 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 481, __PRETTY_FUNCTION__))
;
482 Known.Zero.lshrInPlace(ShiftAmt);
483 Known.One.lshrInPlace(ShiftAmt);
484 if (ShiftAmt)
485 Known.Zero.setHighBits(ShiftAmt); // high bits known zero.
486 }
487 break;
488 }
489 case Instruction::AShr: {
490 // If this is an arithmetic shift right and only the low-bit is set, we can
491 // always convert this into a logical shr, even if the shift amount is
492 // variable. The low bit of the shift cannot be an input sign bit unless
493 // the shift amount is >= the size of the datatype, which is undefined.
494 if (DemandedMask.isOneValue()) {
495 // Perform the logical shift right.
496 Instruction *NewVal = BinaryOperator::CreateLShr(
497 I->getOperand(0), I->getOperand(1), I->getName());
498 return InsertNewInstWith(NewVal, *I);
499 }
500
501 // If the sign bit is the only bit demanded by this ashr, then there is no
502 // need to do it, the shift doesn't change the high bit.
503 if (DemandedMask.isSignMask())
504 return I->getOperand(0);
505
506 const APInt *SA;
507 if (match(I->getOperand(1), m_APInt(SA))) {
508 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
509
510 // Signed shift right.
511 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
512 // If any of the high bits are demanded, we should set the sign bit as
513 // demanded.
514 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
515 DemandedMaskIn.setSignBit();
516
517 // If the shift is exact, then it does demand the low bits (and knows that
518 // they are zero).
519 if (cast<AShrOperator>(I)->isExact())
520 DemandedMaskIn.setLowBits(ShiftAmt);
521
522 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
523 return I;
524
525 unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, CxtI);
526
527 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 527, __PRETTY_FUNCTION__))
;
528 // Compute the new bits that are at the top now plus sign bits.
529 APInt HighBits(APInt::getHighBitsSet(
530 BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth)));
531 Known.Zero.lshrInPlace(ShiftAmt);
532 Known.One.lshrInPlace(ShiftAmt);
533
534 // If the input sign bit is known to be zero, or if none of the top bits
535 // are demanded, turn this into an unsigned shift right.
536 assert(BitWidth > ShiftAmt && "Shift amount not saturated?")((BitWidth > ShiftAmt && "Shift amount not saturated?"
) ? static_cast<void> (0) : __assert_fail ("BitWidth > ShiftAmt && \"Shift amount not saturated?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 536, __PRETTY_FUNCTION__))
;
537 if (Known.Zero[BitWidth-ShiftAmt-1] ||
538 !DemandedMask.intersects(HighBits)) {
539 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
540 I->getOperand(1));
541 LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
542 return InsertNewInstWith(LShr, *I);
543 } else if (Known.One[BitWidth-ShiftAmt-1]) { // New bits are known one.
544 Known.One |= HighBits;
545 }
546 }
547 break;
548 }
549 case Instruction::SRem:
550 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
551 // X % -1 demands all the bits because we don't want to introduce
552 // INT_MIN % -1 (== undef) by accident.
553 if (Rem->isMinusOne())
554 break;
555 APInt RA = Rem->getValue().abs();
556 if (RA.isPowerOf2()) {
557 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
558 return I->getOperand(0);
559
560 APInt LowBits = RA - 1;
561 APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
562 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
563 return I;
564
565 // The low bits of LHS are unchanged by the srem.
566 Known.Zero = LHSKnown.Zero & LowBits;
567 Known.One = LHSKnown.One & LowBits;
568
569 // If LHS is non-negative or has all low bits zero, then the upper bits
570 // are all zero.
571 if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero))
572 Known.Zero |= ~LowBits;
573
574 // If LHS is negative and not all low bits are zero, then the upper bits
575 // are all one.
576 if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One))
577 Known.One |= ~LowBits;
578
579 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 579, __PRETTY_FUNCTION__))
;
580 break;
581 }
582 }
583
584 // The sign bit is the LHS's sign bit, except when the result of the
585 // remainder is zero.
586 if (DemandedMask.isSignBitSet()) {
587 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
588 // If it's known zero, our sign bit is also zero.
589 if (LHSKnown.isNonNegative())
590 Known.makeNonNegative();
591 }
592 break;
593 case Instruction::URem: {
594 KnownBits Known2(BitWidth);
595 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
596 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
597 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
598 return I;
599
600 unsigned Leaders = Known2.countMinLeadingZeros();
601 Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
602 break;
603 }
604 case Instruction::Call:
605 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
606 switch (II->getIntrinsicID()) {
607 default: break;
608 case Intrinsic::bswap: {
609 // If the only bits demanded come from one byte of the bswap result,
610 // just shift the input byte into position to eliminate the bswap.
611 unsigned NLZ = DemandedMask.countLeadingZeros();
612 unsigned NTZ = DemandedMask.countTrailingZeros();
613
614 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
615 // we need all the bits down to bit 8. Likewise, round NLZ. If we
616 // have 14 leading zeros, round to 8.
617 NLZ &= ~7;
618 NTZ &= ~7;
619 // If we need exactly one byte, we can do this transformation.
620 if (BitWidth-NLZ-NTZ == 8) {
621 unsigned ResultBit = NTZ;
622 unsigned InputBit = BitWidth-NTZ-8;
623
624 // Replace this with either a left or right shift to get the byte into
625 // the right place.
626 Instruction *NewVal;
627 if (InputBit > ResultBit)
628 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
629 ConstantInt::get(I->getType(), InputBit-ResultBit));
630 else
631 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
632 ConstantInt::get(I->getType(), ResultBit-InputBit));
633 NewVal->takeName(I);
634 return InsertNewInstWith(NewVal, *I);
635 }
636
637 // TODO: Could compute known zero/one bits based on the input.
638 break;
639 }
640 case Intrinsic::x86_mmx_pmovmskb:
641 case Intrinsic::x86_sse_movmsk_ps:
642 case Intrinsic::x86_sse2_movmsk_pd:
643 case Intrinsic::x86_sse2_pmovmskb_128:
644 case Intrinsic::x86_avx_movmsk_ps_256:
645 case Intrinsic::x86_avx_movmsk_pd_256:
646 case Intrinsic::x86_avx2_pmovmskb: {
647 // MOVMSK copies the vector elements' sign bits to the low bits
648 // and zeros the high bits.
649 unsigned ArgWidth;
650 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
651 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
652 } else {
653 auto Arg = II->getArgOperand(0);
654 auto ArgType = cast<VectorType>(Arg->getType());
655 ArgWidth = ArgType->getNumElements();
656 }
657
658 // If we don't need any of low bits then return zero,
659 // we know that DemandedMask is non-zero already.
660 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
661 if (DemandedElts.isNullValue())
662 return ConstantInt::getNullValue(VTy);
663
664 // We know that the upper bits are set to zero.
665 Known.Zero.setBitsFrom(ArgWidth);
666 return nullptr;
667 }
668 case Intrinsic::x86_sse42_crc32_64_64:
669 Known.Zero.setBitsFrom(32);
670 return nullptr;
671 }
672 }
673 computeKnownBits(V, Known, Depth, CxtI);
674 break;
675 }
676
677 // If the client is only demanding bits that we know, return the known
678 // constant.
679 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
680 return Constant::getIntegerValue(VTy, Known.One);
681 return nullptr;
682}
683
684/// Helper routine of SimplifyDemandedUseBits. It computes Known
685/// bits. It also tries to handle simplifications that can be done based on
686/// DemandedMask, but without modifying the Instruction.
687Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
688 const APInt &DemandedMask,
689 KnownBits &Known,
690 unsigned Depth,
691 Instruction *CxtI) {
692 unsigned BitWidth = DemandedMask.getBitWidth();
693 Type *ITy = I->getType();
694
695 KnownBits LHSKnown(BitWidth);
696 KnownBits RHSKnown(BitWidth);
697
698 // Despite the fact that we can't simplify this instruction in all User's
699 // context, we can at least compute the known bits, and we can
700 // do simplifications that apply to *just* the one user if we know that
701 // this instruction has a simpler value in that context.
702 switch (I->getOpcode()) {
703 case Instruction::And: {
704 // If either the LHS or the RHS are Zero, the result is zero.
705 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
706 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
707 CxtI);
708
709 // Output known-0 are known to be clear if zero in either the LHS | RHS.
710 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
711 // Output known-1 bits are only known if set in both the LHS & RHS.
712 APInt IKnownOne = RHSKnown.One & LHSKnown.One;
713
714 // If the client is only demanding bits that we know, return the known
715 // constant.
716 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
717 return Constant::getIntegerValue(ITy, IKnownOne);
718
719 // If all of the demanded bits are known 1 on one side, return the other.
720 // These bits cannot contribute to the result of the 'and' in this
721 // context.
722 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
723 return I->getOperand(0);
724 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
725 return I->getOperand(1);
726
727 Known.Zero = std::move(IKnownZero);
728 Known.One = std::move(IKnownOne);
729 break;
730 }
731 case Instruction::Or: {
732 // We can simplify (X|Y) -> X or Y in the user's context if we know that
733 // only bits from X or Y are demanded.
734
735 // If either the LHS or the RHS are One, the result is One.
736 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
737 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
738 CxtI);
739
740 // Output known-0 bits are only known if clear in both the LHS & RHS.
741 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
742 // Output known-1 are known to be set if set in either the LHS | RHS.
743 APInt IKnownOne = RHSKnown.One | LHSKnown.One;
744
745 // If the client is only demanding bits that we know, return the known
746 // constant.
747 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
748 return Constant::getIntegerValue(ITy, IKnownOne);
749
750 // If all of the demanded bits are known zero on one side, return the
751 // other. These bits cannot contribute to the result of the 'or' in this
752 // context.
753 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
754 return I->getOperand(0);
755 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
756 return I->getOperand(1);
757
758 Known.Zero = std::move(IKnownZero);
759 Known.One = std::move(IKnownOne);
760 break;
761 }
762 case Instruction::Xor: {
763 // We can simplify (X^Y) -> X or Y in the user's context if we know that
764 // only bits from X or Y are demanded.
765
766 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
767 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
768 CxtI);
769
770 // Output known-0 bits are known if clear or set in both the LHS & RHS.
771 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
772 (RHSKnown.One & LHSKnown.One);
773 // Output known-1 are known to be set if set in only one of the LHS, RHS.
774 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
775 (RHSKnown.One & LHSKnown.Zero);
776
777 // If the client is only demanding bits that we know, return the known
778 // constant.
779 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
780 return Constant::getIntegerValue(ITy, IKnownOne);
781
782 // If all of the demanded bits are known zero on one side, return the
783 // other.
784 if (DemandedMask.isSubsetOf(RHSKnown.Zero))
785 return I->getOperand(0);
786 if (DemandedMask.isSubsetOf(LHSKnown.Zero))
787 return I->getOperand(1);
788
789 // Output known-0 bits are known if clear or set in both the LHS & RHS.
790 Known.Zero = std::move(IKnownZero);
791 // Output known-1 are known to be set if set in only one of the LHS, RHS.
792 Known.One = std::move(IKnownOne);
793 break;
794 }
795 default:
796 // Compute the Known bits to simplify things downstream.
797 computeKnownBits(I, Known, Depth, CxtI);
798
799 // If this user is only demanding bits that we know, return the known
800 // constant.
801 if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
802 return Constant::getIntegerValue(ITy, Known.One);
803
804 break;
805 }
806
807 return nullptr;
808}
809
810
811/// Helper routine of SimplifyDemandedUseBits. It tries to simplify
812/// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
813/// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
814/// of "C2-C1".
815///
816/// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
817/// ..., bn}, without considering the specific value X is holding.
818/// This transformation is legal iff one of following conditions is hold:
819/// 1) All the bit in S are 0, in this case E1 == E2.
820/// 2) We don't care those bits in S, per the input DemandedMask.
821/// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
822/// rest bits.
823///
824/// Currently we only test condition 2).
825///
826/// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
827/// not successful.
828Value *
829InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
830 Instruction *Shl, const APInt &ShlOp1,
831 const APInt &DemandedMask,
832 KnownBits &Known) {
833 if (!ShlOp1 || !ShrOp1)
834 return nullptr; // No-op.
835
836 Value *VarX = Shr->getOperand(0);
837 Type *Ty = VarX->getType();
838 unsigned BitWidth = Ty->getScalarSizeInBits();
839 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
840 return nullptr; // Undef.
841
842 unsigned ShlAmt = ShlOp1.getZExtValue();
843 unsigned ShrAmt = ShrOp1.getZExtValue();
844
845 Known.One.clearAllBits();
846 Known.Zero.setLowBits(ShlAmt - 1);
847 Known.Zero &= DemandedMask;
848
849 APInt BitMask1(APInt::getAllOnesValue(BitWidth));
850 APInt BitMask2(APInt::getAllOnesValue(BitWidth));
851
852 bool isLshr = (Shr->getOpcode() == Instruction::LShr);
853 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
854 (BitMask1.ashr(ShrAmt) << ShlAmt);
855
856 if (ShrAmt <= ShlAmt) {
857 BitMask2 <<= (ShlAmt - ShrAmt);
858 } else {
859 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
860 BitMask2.ashr(ShrAmt - ShlAmt);
861 }
862
863 // Check if condition-2 (see the comment to this function) is satified.
864 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
865 if (ShrAmt == ShlAmt)
866 return VarX;
867
868 if (!Shr->hasOneUse())
869 return nullptr;
870
871 BinaryOperator *New;
872 if (ShrAmt < ShlAmt) {
873 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
874 New = BinaryOperator::CreateShl(VarX, Amt);
875 BinaryOperator *Orig = cast<BinaryOperator>(Shl);
876 New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
877 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
878 } else {
879 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
880 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
881 BinaryOperator::CreateAShr(VarX, Amt);
882 if (cast<BinaryOperator>(Shr)->isExact())
883 New->setIsExact(true);
884 }
885
886 return InsertNewInstWith(New, *Shl);
887 }
888
889 return nullptr;
890}
891
892/// The specified value produces a vector with any number of elements.
893/// DemandedElts contains the set of elements that are actually used by the
894/// caller. This method analyzes which elements of the operand are undef and
895/// returns that information in UndefElts.
896///
897/// If the information about demanded elements can be used to simplify the
898/// operation, the operation is simplified, then the resultant value is
899/// returned. This returns null if no change was made.
900Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
901 APInt &UndefElts,
902 unsigned Depth) {
903 unsigned VWidth = V->getType()->getVectorNumElements();
904 APInt EltMask(APInt::getAllOnesValue(VWidth));
905 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!")(((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"
) ? static_cast<void> (0) : __assert_fail ("(DemandedElts & ~EltMask) == 0 && \"Invalid DemandedElts!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 905, __PRETTY_FUNCTION__))
;
906
907 if (isa<UndefValue>(V)) {
908 // If the entire vector is undefined, just return this info.
909 UndefElts = EltMask;
910 return nullptr;
911 }
912
913 if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef.
914 UndefElts = EltMask;
915 return UndefValue::get(V->getType());
916 }
917
918 UndefElts = 0;
919
920 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential.
921 if (Constant *C = dyn_cast<Constant>(V)) {
922 // Check if this is identity. If so, return 0 since we are not simplifying
923 // anything.
924 if (DemandedElts.isAllOnesValue())
925 return nullptr;
926
927 Type *EltTy = cast<VectorType>(V->getType())->getElementType();
928 Constant *Undef = UndefValue::get(EltTy);
929
930 SmallVector<Constant*, 16> Elts;
931 for (unsigned i = 0; i != VWidth; ++i) {
932 if (!DemandedElts[i]) { // If not demanded, set to undef.
933 Elts.push_back(Undef);
934 UndefElts.setBit(i);
935 continue;
936 }
937
938 Constant *Elt = C->getAggregateElement(i);
939 if (!Elt) return nullptr;
940
941 if (isa<UndefValue>(Elt)) { // Already undef.
942 Elts.push_back(Undef);
943 UndefElts.setBit(i);
944 } else { // Otherwise, defined.
945 Elts.push_back(Elt);
946 }
947 }
948
949 // If we changed the constant, return it.
950 Constant *NewCV = ConstantVector::get(Elts);
951 return NewCV != C ? NewCV : nullptr;
952 }
953
954 // Limit search depth.
955 if (Depth == 10)
956 return nullptr;
957
958 // If multiple users are using the root value, proceed with
959 // simplification conservatively assuming that all elements
960 // are needed.
961 if (!V->hasOneUse()) {
962 // Quit if we find multiple users of a non-root value though.
963 // They'll be handled when it's their turn to be visited by
964 // the main instcombine process.
965 if (Depth != 0)
966 // TODO: Just compute the UndefElts information recursively.
967 return nullptr;
968
969 // Conservatively assume that all elements are needed.
970 DemandedElts = EltMask;
971 }
972
973 Instruction *I = dyn_cast<Instruction>(V);
974 if (!I) return nullptr; // Only analyze instructions.
975
976 bool MadeChange = false;
977 APInt UndefElts2(VWidth, 0);
978 APInt UndefElts3(VWidth, 0);
979 Value *TmpV;
980 switch (I->getOpcode()) {
981 default: break;
982
983 case Instruction::InsertElement: {
984 // If this is a variable index, we don't know which element it overwrites.
985 // demand exactly the same input as we produce.
986 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
987 if (!Idx) {
988 // Note that we can't propagate undef elt info, because we don't know
989 // which elt is getting updated.
990 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
991 UndefElts2, Depth + 1);
992 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
993 break;
994 }
995
996 // The element inserted overwrites whatever was there, so the input demanded
997 // set is simpler than the output set.
998 unsigned IdxNo = Idx->getZExtValue();
999 APInt PreInsertDemandedElts = DemandedElts;
1000 if (IdxNo < VWidth)
1001 PreInsertDemandedElts.clearBit(IdxNo);
1002 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), PreInsertDemandedElts,
1003 UndefElts, Depth + 1);
1004 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1005
1006 // If this is inserting an element that isn't demanded, remove this
1007 // insertelement.
1008 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1009 Worklist.Add(I);
1010 return I->getOperand(0);
1011 }
1012
1013 // The inserted element is defined.
1014 UndefElts.clearBit(IdxNo);
1015 break;
1016 }
1017 case Instruction::ShuffleVector: {
1018 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1019 unsigned LHSVWidth =
1020 Shuffle->getOperand(0)->getType()->getVectorNumElements();
1021 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1022 for (unsigned i = 0; i < VWidth; i++) {
1023 if (DemandedElts[i]) {
1024 unsigned MaskVal = Shuffle->getMaskValue(i);
1025 if (MaskVal != -1u) {
1026 assert(MaskVal < LHSVWidth * 2 &&((MaskVal < LHSVWidth * 2 && "shufflevector mask index out of range!"
) ? static_cast<void> (0) : __assert_fail ("MaskVal < LHSVWidth * 2 && \"shufflevector mask index out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 1027, __PRETTY_FUNCTION__))
1027 "shufflevector mask index out of range!")((MaskVal < LHSVWidth * 2 && "shufflevector mask index out of range!"
) ? static_cast<void> (0) : __assert_fail ("MaskVal < LHSVWidth * 2 && \"shufflevector mask index out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 1027, __PRETTY_FUNCTION__))
;
1028 if (MaskVal < LHSVWidth)
1029 LeftDemanded.setBit(MaskVal);
1030 else
1031 RightDemanded.setBit(MaskVal - LHSVWidth);
1032 }
1033 }
1034 }
1035
1036 APInt LHSUndefElts(LHSVWidth, 0);
1037 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1038 LHSUndefElts, Depth + 1);
1039 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1040
1041 APInt RHSUndefElts(LHSVWidth, 0);
1042 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1043 RHSUndefElts, Depth + 1);
1044 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1045
1046 bool NewUndefElts = false;
1047 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1048 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1049 bool LHSUniform = true;
1050 bool RHSUniform = true;
1051 for (unsigned i = 0; i < VWidth; i++) {
1052 unsigned MaskVal = Shuffle->getMaskValue(i);
1053 if (MaskVal == -1u) {
1054 UndefElts.setBit(i);
1055 } else if (!DemandedElts[i]) {
1056 NewUndefElts = true;
1057 UndefElts.setBit(i);
1058 } else if (MaskVal < LHSVWidth) {
1059 if (LHSUndefElts[MaskVal]) {
1060 NewUndefElts = true;
1061 UndefElts.setBit(i);
1062 } else {
1063 LHSIdx = LHSIdx == -1u ? i : LHSVWidth;
1064 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth;
1065 LHSUniform = LHSUniform && (MaskVal == i);
1066 }
1067 } else {
1068 if (RHSUndefElts[MaskVal - LHSVWidth]) {
1069 NewUndefElts = true;
1070 UndefElts.setBit(i);
1071 } else {
1072 RHSIdx = RHSIdx == -1u ? i : LHSVWidth;
1073 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth;
1074 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i);
1075 }
1076 }
1077 }
1078
1079 // Try to transform shuffle with constant vector and single element from
1080 // this constant vector to single insertelement instruction.
1081 // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1082 // insertelement V, C[ci], ci-n
1083 if (LHSVWidth == Shuffle->getType()->getNumElements()) {
1084 Value *Op = nullptr;
1085 Constant *Value = nullptr;
1086 unsigned Idx = -1u;
1087
1088 // Find constant vector with the single element in shuffle (LHS or RHS).
1089 if (LHSIdx < LHSVWidth && RHSUniform) {
1090 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1091 Op = Shuffle->getOperand(1);
1092 Value = CV->getOperand(LHSValIdx);
1093 Idx = LHSIdx;
1094 }
1095 }
1096 if (RHSIdx < LHSVWidth && LHSUniform) {
1097 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1098 Op = Shuffle->getOperand(0);
1099 Value = CV->getOperand(RHSValIdx);
1100 Idx = RHSIdx;
1101 }
1102 }
1103 // Found constant vector with single element - convert to insertelement.
1104 if (Op && Value) {
1105 Instruction *New = InsertElementInst::Create(
1106 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1107 Shuffle->getName());
1108 InsertNewInstWith(New, *Shuffle);
1109 return New;
1110 }
1111 }
1112 if (NewUndefElts) {
1113 // Add additional discovered undefs.
1114 SmallVector<Constant*, 16> Elts;
1115 for (unsigned i = 0; i < VWidth; ++i) {
1116 if (UndefElts[i])
1117 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1118 else
1119 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1120 Shuffle->getMaskValue(i)));
1121 }
1122 I->setOperand(2, ConstantVector::get(Elts));
1123 MadeChange = true;
1124 }
1125 break;
1126 }
1127 case Instruction::Select: {
1128 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts);
1129 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) {
1130 for (unsigned i = 0; i < VWidth; i++) {
1131 Constant *CElt = CV->getAggregateElement(i);
1132 // Method isNullValue always returns false when called on a
1133 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to
1134 // to avoid propagating incorrect information.
1135 if (isa<ConstantExpr>(CElt))
1136 continue;
1137 if (CElt->isNullValue())
1138 LeftDemanded.clearBit(i);
1139 else
1140 RightDemanded.clearBit(i);
1141 }
1142 }
1143
1144 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts,
1145 Depth + 1);
1146 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1147
1148 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded,
1149 UndefElts2, Depth + 1);
1150 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; }
1151
1152 // Output elements are undefined if both are undefined.
1153 UndefElts &= UndefElts2;
1154 break;
1155 }
1156 case Instruction::BitCast: {
1157 // Vector->vector casts only.
1158 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1159 if (!VTy) break;
1160 unsigned InVWidth = VTy->getNumElements();
1161 APInt InputDemandedElts(InVWidth, 0);
1162 UndefElts2 = APInt(InVWidth, 0);
1163 unsigned Ratio;
1164
1165 if (VWidth == InVWidth) {
1166 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1167 // elements as are demanded of us.
1168 Ratio = 1;
1169 InputDemandedElts = DemandedElts;
1170 } else if ((VWidth % InVWidth) == 0) {
1171 // If the number of elements in the output is a multiple of the number of
1172 // elements in the input then an input element is live if any of the
1173 // corresponding output elements are live.
1174 Ratio = VWidth / InVWidth;
1175 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1176 if (DemandedElts[OutIdx])
1177 InputDemandedElts.setBit(OutIdx / Ratio);
1178 } else if ((InVWidth % VWidth) == 0) {
1179 // If the number of elements in the input is a multiple of the number of
1180 // elements in the output then an input element is live if the
1181 // corresponding output element is live.
1182 Ratio = InVWidth / VWidth;
1183 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1184 if (DemandedElts[InIdx / Ratio])
1185 InputDemandedElts.setBit(InIdx);
1186 } else {
1187 // Unsupported so far.
1188 break;
1189 }
1190
1191 // div/rem demand all inputs, because they don't want divide by zero.
1192 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1193 UndefElts2, Depth + 1);
1194 if (TmpV) {
1195 I->setOperand(0, TmpV);
1196 MadeChange = true;
1197 }
1198
1199 if (VWidth == InVWidth) {
1200 UndefElts = UndefElts2;
1201 } else if ((VWidth % InVWidth) == 0) {
1202 // If the number of elements in the output is a multiple of the number of
1203 // elements in the input then an output element is undef if the
1204 // corresponding input element is undef.
1205 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1206 if (UndefElts2[OutIdx / Ratio])
1207 UndefElts.setBit(OutIdx);
1208 } else if ((InVWidth % VWidth) == 0) {
1209 // If the number of elements in the input is a multiple of the number of
1210 // elements in the output then an output element is undef if all of the
1211 // corresponding input elements are undef.
1212 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1213 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1214 if (SubUndef.countPopulation() == Ratio)
1215 UndefElts.setBit(OutIdx);
1216 }
1217 } else {
1218 llvm_unreachable("Unimp")::llvm::llvm_unreachable_internal("Unimp", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 1218)
;
1219 }
1220 break;
1221 }
1222 case Instruction::And:
1223 case Instruction::Or:
1224 case Instruction::Xor:
1225 case Instruction::Add:
1226 case Instruction::Sub:
1227 case Instruction::Mul:
1228 // div/rem demand all inputs, because they don't want divide by zero.
1229 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1230 Depth + 1);
1231 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1232 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1233 UndefElts2, Depth + 1);
1234 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1235
1236 // Output elements are undefined if both are undefined. Consider things
1237 // like undef&0. The result is known zero, not undef.
1238 UndefElts &= UndefElts2;
1239 break;
1240 case Instruction::FPTrunc:
1241 case Instruction::FPExt:
1242 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1243 Depth + 1);
1244 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1245 break;
1246
1247 case Instruction::Call: {
1248 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1249 if (!II) break;
1250 switch (II->getIntrinsicID()) {
1251 default: break;
1252
1253 case Intrinsic::x86_xop_vfrcz_ss:
1254 case Intrinsic::x86_xop_vfrcz_sd:
1255 // The instructions for these intrinsics are speced to zero upper bits not
1256 // pass them through like other scalar intrinsics. So we shouldn't just
1257 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1258 // Instead we should return a zero vector.
1259 if (!DemandedElts[0]) {
1260 Worklist.Add(II);
1261 return ConstantAggregateZero::get(II->getType());
1262 }
1263
1264 // Only the lower element is used.
1265 DemandedElts = 1;
1266 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1267 UndefElts, Depth + 1);
1268 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1269
1270 // Only the lower element is undefined. The high elements are zero.
1271 UndefElts = UndefElts[0];
1272 break;
1273
1274 // Unary scalar-as-vector operations that work column-wise.
1275 case Intrinsic::x86_sse_rcp_ss:
1276 case Intrinsic::x86_sse_rsqrt_ss:
1277 case Intrinsic::x86_sse_sqrt_ss:
1278 case Intrinsic::x86_sse2_sqrt_sd:
1279 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1280 UndefElts, Depth + 1);
1281 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1282
1283 // If lowest element of a scalar op isn't used then use Arg0.
1284 if (!DemandedElts[0]) {
1285 Worklist.Add(II);
1286 return II->getArgOperand(0);
1287 }
1288 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1289 // checks).
1290 break;
1291
1292 // Binary scalar-as-vector operations that work column-wise. The high
1293 // elements come from operand 0. The low element is a function of both
1294 // operands.
1295 case Intrinsic::x86_sse_min_ss:
1296 case Intrinsic::x86_sse_max_ss:
1297 case Intrinsic::x86_sse_cmp_ss:
1298 case Intrinsic::x86_sse2_min_sd:
1299 case Intrinsic::x86_sse2_max_sd:
1300 case Intrinsic::x86_sse2_cmp_sd: {
1301 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1302 UndefElts, Depth + 1);
1303 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1304
1305 // If lowest element of a scalar op isn't used then use Arg0.
1306 if (!DemandedElts[0]) {
1307 Worklist.Add(II);
1308 return II->getArgOperand(0);
1309 }
1310
1311 // Only lower element is used for operand 1.
1312 DemandedElts = 1;
1313 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1314 UndefElts2, Depth + 1);
1315 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1316
1317 // Lower element is undefined if both lower elements are undefined.
1318 // Consider things like undef&0. The result is known zero, not undef.
1319 if (!UndefElts2[0])
1320 UndefElts.clearBit(0);
1321
1322 break;
1323 }
1324
1325 // Binary scalar-as-vector operations that work column-wise. The high
1326 // elements come from operand 0 and the low element comes from operand 1.
1327 case Intrinsic::x86_sse41_round_ss:
1328 case Intrinsic::x86_sse41_round_sd: {
1329 // Don't use the low element of operand 0.
1330 APInt DemandedElts2 = DemandedElts;
1331 DemandedElts2.clearBit(0);
1332 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2,
1333 UndefElts, Depth + 1);
1334 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1335
1336 // If lowest element of a scalar op isn't used then use Arg0.
1337 if (!DemandedElts[0]) {
1338 Worklist.Add(II);
1339 return II->getArgOperand(0);
1340 }
1341
1342 // Only lower element is used for operand 1.
1343 DemandedElts = 1;
1344 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1345 UndefElts2, Depth + 1);
1346 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1347
1348 // Take the high undef elements from operand 0 and take the lower element
1349 // from operand 1.
1350 UndefElts.clearBit(0);
1351 UndefElts |= UndefElts2[0];
1352 break;
1353 }
1354
1355 // Three input scalar-as-vector operations that work column-wise. The high
1356 // elements come from operand 0 and the low element is a function of all
1357 // three inputs.
1358 case Intrinsic::x86_avx512_mask_add_ss_round:
1359 case Intrinsic::x86_avx512_mask_div_ss_round:
1360 case Intrinsic::x86_avx512_mask_mul_ss_round:
1361 case Intrinsic::x86_avx512_mask_sub_ss_round:
1362 case Intrinsic::x86_avx512_mask_max_ss_round:
1363 case Intrinsic::x86_avx512_mask_min_ss_round:
1364 case Intrinsic::x86_avx512_mask_add_sd_round:
1365 case Intrinsic::x86_avx512_mask_div_sd_round:
1366 case Intrinsic::x86_avx512_mask_mul_sd_round:
1367 case Intrinsic::x86_avx512_mask_sub_sd_round:
1368 case Intrinsic::x86_avx512_mask_max_sd_round:
1369 case Intrinsic::x86_avx512_mask_min_sd_round:
1370 case Intrinsic::x86_fma_vfmadd_ss:
1371 case Intrinsic::x86_fma_vfmsub_ss:
1372 case Intrinsic::x86_fma_vfnmadd_ss:
1373 case Intrinsic::x86_fma_vfnmsub_ss:
1374 case Intrinsic::x86_fma_vfmadd_sd:
1375 case Intrinsic::x86_fma_vfmsub_sd:
1376 case Intrinsic::x86_fma_vfnmadd_sd:
1377 case Intrinsic::x86_fma_vfnmsub_sd:
1378 case Intrinsic::x86_avx512_mask_vfmadd_ss:
1379 case Intrinsic::x86_avx512_mask_vfmadd_sd:
1380 case Intrinsic::x86_avx512_maskz_vfmadd_ss:
1381 case Intrinsic::x86_avx512_maskz_vfmadd_sd:
1382 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1383 UndefElts, Depth + 1);
1384 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1385
1386 // If lowest element of a scalar op isn't used then use Arg0.
1387 if (!DemandedElts[0]) {
1388 Worklist.Add(II);
1389 return II->getArgOperand(0);
1390 }
1391
1392 // Only lower element is used for operand 1 and 2.
1393 DemandedElts = 1;
1394 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1395 UndefElts2, Depth + 1);
1396 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1397 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1398 UndefElts3, Depth + 1);
1399 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1400
1401 // Lower element is undefined if all three lower elements are undefined.
1402 // Consider things like undef&0. The result is known zero, not undef.
1403 if (!UndefElts2[0] || !UndefElts3[0])
1404 UndefElts.clearBit(0);
1405
1406 break;
1407
1408 case Intrinsic::x86_avx512_mask3_vfmadd_ss:
1409 case Intrinsic::x86_avx512_mask3_vfmadd_sd:
1410 case Intrinsic::x86_avx512_mask3_vfmsub_ss:
1411 case Intrinsic::x86_avx512_mask3_vfmsub_sd:
1412 case Intrinsic::x86_avx512_mask3_vfnmsub_ss:
1413 case Intrinsic::x86_avx512_mask3_vfnmsub_sd:
1414 // These intrinsics get the passthru bits from operand 2.
1415 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1416 UndefElts, Depth + 1);
1417 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1418
1419 // If lowest element of a scalar op isn't used then use Arg2.
1420 if (!DemandedElts[0]) {
1421 Worklist.Add(II);
1422 return II->getArgOperand(2);
1423 }
1424
1425 // Only lower element is used for operand 0 and 1.
1426 DemandedElts = 1;
1427 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1428 UndefElts2, Depth + 1);
1429 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1430 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1431 UndefElts3, Depth + 1);
1432 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1433
1434 // Lower element is undefined if all three lower elements are undefined.
1435 // Consider things like undef&0. The result is known zero, not undef.
1436 if (!UndefElts2[0] || !UndefElts3[0])
1437 UndefElts.clearBit(0);
1438
1439 break;
1440
1441 case Intrinsic::x86_sse2_pmulu_dq:
1442 case Intrinsic::x86_sse41_pmuldq:
1443 case Intrinsic::x86_avx2_pmul_dq:
1444 case Intrinsic::x86_avx2_pmulu_dq:
1445 case Intrinsic::x86_avx512_pmul_dq_512:
1446 case Intrinsic::x86_avx512_pmulu_dq_512: {
1447 Value *Op0 = II->getArgOperand(0);
1448 Value *Op1 = II->getArgOperand(1);
1449 unsigned InnerVWidth = Op0->getType()->getVectorNumElements();
1450 assert((VWidth * 2) == InnerVWidth && "Unexpected input size")(((VWidth * 2) == InnerVWidth && "Unexpected input size"
) ? static_cast<void> (0) : __assert_fail ("(VWidth * 2) == InnerVWidth && \"Unexpected input size\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 1450, __PRETTY_FUNCTION__))
;
1451
1452 APInt InnerDemandedElts(InnerVWidth, 0);
1453 for (unsigned i = 0; i != VWidth; ++i)
1454 if (DemandedElts[i])
1455 InnerDemandedElts.setBit(i * 2);
1456
1457 UndefElts2 = APInt(InnerVWidth, 0);
1458 TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2,
1459 Depth + 1);
1460 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1461
1462 UndefElts3 = APInt(InnerVWidth, 0);
1463 TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3,
1464 Depth + 1);
1465 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1466
1467 break;
1468 }
1469
1470 case Intrinsic::x86_sse2_packssdw_128:
1471 case Intrinsic::x86_sse2_packsswb_128:
1472 case Intrinsic::x86_sse2_packuswb_128:
1473 case Intrinsic::x86_sse41_packusdw:
1474 case Intrinsic::x86_avx2_packssdw:
1475 case Intrinsic::x86_avx2_packsswb:
1476 case Intrinsic::x86_avx2_packusdw:
1477 case Intrinsic::x86_avx2_packuswb:
1478 case Intrinsic::x86_avx512_packssdw_512:
1479 case Intrinsic::x86_avx512_packsswb_512:
1480 case Intrinsic::x86_avx512_packusdw_512:
1481 case Intrinsic::x86_avx512_packuswb_512: {
1482 auto *Ty0 = II->getArgOperand(0)->getType();
1483 unsigned InnerVWidth = Ty0->getVectorNumElements();
1484 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size")((VWidth == (InnerVWidth * 2) && "Unexpected input size"
) ? static_cast<void> (0) : __assert_fail ("VWidth == (InnerVWidth * 2) && \"Unexpected input size\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp"
, 1484, __PRETTY_FUNCTION__))
;
1485
1486 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1487 unsigned VWidthPerLane = VWidth / NumLanes;
1488 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1489
1490 // Per lane, pack the elements of the first input and then the second.
1491 // e.g.
1492 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1493 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1494 for (int OpNum = 0; OpNum != 2; ++OpNum) {
1495 APInt OpDemandedElts(InnerVWidth, 0);
1496 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1497 unsigned LaneIdx = Lane * VWidthPerLane;
1498 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1499 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1500 if (DemandedElts[Idx])
1501 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1502 }
1503 }
1504
1505 // Demand elements from the operand.
1506 auto *Op = II->getArgOperand(OpNum);
1507 APInt OpUndefElts(InnerVWidth, 0);
1508 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts,
1509 Depth + 1);
1510 if (TmpV) {
1511 II->setArgOperand(OpNum, TmpV);
1512 MadeChange = true;
1513 }
1514
1515 // Pack the operand's UNDEF elements, one lane at a time.
1516 OpUndefElts = OpUndefElts.zext(VWidth);
1517 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1518 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1519 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1520 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1521 UndefElts |= LaneElts;
1522 }
1523 }
1524 break;
1525 }
1526
1527 // PSHUFB
1528 case Intrinsic::x86_ssse3_pshuf_b_128:
1529 case Intrinsic::x86_avx2_pshuf_b:
1530 case Intrinsic::x86_avx512_pshuf_b_512:
1531 // PERMILVAR
1532 case Intrinsic::x86_avx_vpermilvar_ps:
1533 case Intrinsic::x86_avx_vpermilvar_ps_256:
1534 case Intrinsic::x86_avx512_vpermilvar_ps_512:
1535 case Intrinsic::x86_avx_vpermilvar_pd:
1536 case Intrinsic::x86_avx_vpermilvar_pd_256:
1537 case Intrinsic::x86_avx512_vpermilvar_pd_512:
1538 // PERMV
1539 case Intrinsic::x86_avx2_permd:
1540 case Intrinsic::x86_avx2_permps: {
1541 Value *Op1 = II->getArgOperand(1);
1542 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts,
1543 Depth + 1);
1544 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1545 break;
1546 }
1547
1548 // SSE4A instructions leave the upper 64-bits of the 128-bit result
1549 // in an undefined state.
1550 case Intrinsic::x86_sse4a_extrq:
1551 case Intrinsic::x86_sse4a_extrqi:
1552 case Intrinsic::x86_sse4a_insertq:
1553 case Intrinsic::x86_sse4a_insertqi:
1554 UndefElts.setHighBits(VWidth / 2);
1555 break;
1556 case Intrinsic::amdgcn_buffer_load:
1557 case Intrinsic::amdgcn_buffer_load_format:
1558 case Intrinsic::amdgcn_image_sample:
1559 case Intrinsic::amdgcn_image_sample_cl:
1560 case Intrinsic::amdgcn_image_sample_d:
1561 case Intrinsic::amdgcn_image_sample_d_cl:
1562 case Intrinsic::amdgcn_image_sample_l:
1563 case Intrinsic::amdgcn_image_sample_b:
1564 case Intrinsic::amdgcn_image_sample_b_cl:
1565 case Intrinsic::amdgcn_image_sample_lz:
1566 case Intrinsic::amdgcn_image_sample_cd:
1567 case Intrinsic::amdgcn_image_sample_cd_cl:
1568
1569 case Intrinsic::amdgcn_image_sample_c:
1570 case Intrinsic::amdgcn_image_sample_c_cl:
1571 case Intrinsic::amdgcn_image_sample_c_d:
1572 case Intrinsic::amdgcn_image_sample_c_d_cl:
1573 case Intrinsic::amdgcn_image_sample_c_l:
1574 case Intrinsic::amdgcn_image_sample_c_b:
1575 case Intrinsic::amdgcn_image_sample_c_b_cl:
1576 case Intrinsic::amdgcn_image_sample_c_lz:
1577 case Intrinsic::amdgcn_image_sample_c_cd:
1578 case Intrinsic::amdgcn_image_sample_c_cd_cl:
1579
1580 case Intrinsic::amdgcn_image_sample_o:
1581 case Intrinsic::amdgcn_image_sample_cl_o:
1582 case Intrinsic::amdgcn_image_sample_d_o:
1583 case Intrinsic::amdgcn_image_sample_d_cl_o:
1584 case Intrinsic::amdgcn_image_sample_l_o:
1585 case Intrinsic::amdgcn_image_sample_b_o:
1586 case Intrinsic::amdgcn_image_sample_b_cl_o:
1587 case Intrinsic::amdgcn_image_sample_lz_o:
1588 case Intrinsic::amdgcn_image_sample_cd_o:
1589 case Intrinsic::amdgcn_image_sample_cd_cl_o:
1590
1591 case Intrinsic::amdgcn_image_sample_c_o:
1592 case Intrinsic::amdgcn_image_sample_c_cl_o:
1593 case Intrinsic::amdgcn_image_sample_c_d_o:
1594 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
1595 case Intrinsic::amdgcn_image_sample_c_l_o:
1596 case Intrinsic::amdgcn_image_sample_c_b_o:
1597 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
1598 case Intrinsic::amdgcn_image_sample_c_lz_o:
1599 case Intrinsic::amdgcn_image_sample_c_cd_o:
1600 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
1601
1602 case Intrinsic::amdgcn_image_getlod: {
1603 if (VWidth == 1 || !DemandedElts.isMask())
1604 return nullptr;
1605
1606 // TODO: Handle 3 vectors when supported in code gen.
1607 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes());
1608 if (NewNumElts == VWidth)
1609 return nullptr;
1610
1611 Module *M = II->getParent()->getParent()->getParent();
1612 Type *EltTy = V->getType()->getVectorElementType();
1613
1614 Type *NewTy = (NewNumElts == 1) ? EltTy :
1615 VectorType::get(EltTy, NewNumElts);
1616
1617 auto IID = II->getIntrinsicID();
1618
1619 bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load ||
1620 IID == Intrinsic::amdgcn_buffer_load_format;
1621
1622 Function *NewIntrin = IsBuffer ?
1623 Intrinsic::getDeclaration(M, IID, NewTy) :
1624 // Samplers have 3 mangled types.
1625 Intrinsic::getDeclaration(M, IID,
1626 { NewTy, II->getArgOperand(0)->getType(),
1627 II->getArgOperand(1)->getType()});
1628
1629 SmallVector<Value *, 5> Args;
1630 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
1631 Args.push_back(II->getArgOperand(I));
1632
1633 IRBuilderBase::InsertPointGuard Guard(Builder);
1634 Builder.SetInsertPoint(II);
1635
1636 CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
1637 NewCall->takeName(II);
1638 NewCall->copyMetadata(*II);
1639
1640 if (!IsBuffer) {
1641 ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3));
1642 if (DMask) {
1643 unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1644
1645 unsigned PopCnt = 0;
1646 unsigned NewDMask = 0;
1647 for (unsigned I = 0; I < 4; ++I) {
1648 const unsigned Bit = 1 << I;
1649 if (!!(DMaskVal & Bit)) {
1650 if (++PopCnt > NewNumElts)
1651 break;
1652
1653 NewDMask |= Bit;
1654 }
1655 }
1656
1657 NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask));
1658 }
1659 }
1660
1661
1662 if (NewNumElts == 1) {
1663 return Builder.CreateInsertElement(UndefValue::get(V->getType()),
1664 NewCall, static_cast<uint64_t>(0));
1665 }
1666
1667 SmallVector<uint32_t, 8> EltMask;
1668 for (unsigned I = 0; I < VWidth; ++I)
1669 EltMask.push_back(I);
1670
1671 Value *Shuffle = Builder.CreateShuffleVector(
1672 NewCall, UndefValue::get(NewTy), EltMask);
1673
1674 MadeChange = true;
Value stored to 'MadeChange' is never read
1675 return Shuffle;
1676 }
1677 }
1678 break;
1679 }
1680 }
1681 return MadeChange ? I : nullptr;
1682}