LLVM 23.0.0git
SeparateConstOffsetFromGEP.cpp
Go to the documentation of this file.
1//===- SeparateConstOffsetFromGEP.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Loop unrolling may create many similar GEPs for array accesses.
10// e.g., a 2-level loop
11//
12// float a[32][32]; // global variable
13//
14// for (int i = 0; i < 2; ++i) {
15// for (int j = 0; j < 2; ++j) {
16// ...
17// ... = a[x + i][y + j];
18// ...
19// }
20// }
21//
22// will probably be unrolled to:
23//
24// gep %a, 0, %x, %y; load
25// gep %a, 0, %x, %y + 1; load
26// gep %a, 0, %x + 1, %y; load
27// gep %a, 0, %x + 1, %y + 1; load
28//
29// LLVM's GVN does not use partial redundancy elimination yet, and is thus
30// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
31// significant slowdown in targets with limited addressing modes. For instance,
32// because the PTX target does not support the reg+reg addressing mode, the
33// NVPTX backend emits PTX code that literally computes the pointer address of
34// each GEP, wasting tons of registers. It emits the following PTX for the
35// first load and similar PTX for other loads.
36//
37// mov.u32 %r1, %x;
38// mov.u32 %r2, %y;
39// mul.wide.u32 %rl2, %r1, 128;
40// mov.u64 %rl3, a;
41// add.s64 %rl4, %rl3, %rl2;
42// mul.wide.u32 %rl5, %r2, 4;
43// add.s64 %rl6, %rl4, %rl5;
44// ld.global.f32 %f1, [%rl6];
45//
46// To reduce the register pressure, the optimization implemented in this file
47// merges the common part of a group of GEPs, so we can compute each pointer
48// address by adding a simple offset to the common part, saving many registers.
49//
50// It works by splitting each GEP into a variadic base and a constant offset.
51// The variadic base can be computed once and reused by multiple GEPs, and the
52// constant offsets can be nicely folded into the reg+immediate addressing mode
53// (supported by most targets) without using any extra register.
54//
55// For instance, we transform the four GEPs and four loads in the above example
56// into:
57//
58// base = gep a, 0, x, y
59// load base
60// load base + 1 * sizeof(float)
61// load base + 32 * sizeof(float)
62// load base + 33 * sizeof(float)
63//
64// Given the transformed IR, a backend that supports the reg+immediate
65// addressing mode can easily fold the pointer arithmetics into the loads. For
66// example, the NVPTX backend can easily fold the pointer arithmetics into the
67// ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
68//
69// mov.u32 %r1, %tid.x;
70// mov.u32 %r2, %tid.y;
71// mul.wide.u32 %rl2, %r1, 128;
72// mov.u64 %rl3, a;
73// add.s64 %rl4, %rl3, %rl2;
74// mul.wide.u32 %rl5, %r2, 4;
75// add.s64 %rl6, %rl4, %rl5;
76// ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
77// ld.global.f32 %f2, [%rl6+4]; // much better
78// ld.global.f32 %f3, [%rl6+128]; // much better
79// ld.global.f32 %f4, [%rl6+132]; // much better
80//
81// Another improvement enabled by the LowerGEP flag is to lower a GEP with
82// multiple indices to multiple GEPs with a single index.
83// Such transformation can have following benefits:
84// (1) It can always extract constants in the indices of structure type.
85// (2) After such Lowering, there are more optimization opportunities such as
86// CSE, LICM and CGP.
87//
88// E.g. The following GEPs have multiple indices:
89// BB1:
90// %p = getelementptr [10 x %struct], ptr %ptr, i64 %i, i64 %j1, i32 3
91// load %p
92// ...
93// BB2:
94// %p2 = getelementptr [10 x %struct], ptr %ptr, i64 %i, i64 %j1, i32 2
95// load %p2
96// ...
97//
98// We can not do CSE to the common part related to index "i64 %i". Lowering
99// GEPs can achieve such goals.
100//
101// This pass will lower a GEP with multiple indices into multiple GEPs with a
102// single index:
103// BB1:
104// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
105// %3 = getelementptr i8, ptr %ptr, i64 %2 ; CSE opportunity
106// %4 = mul i64 %j1, length_of_struct
107// %5 = getelementptr i8, ptr %3, i64 %4
108// %p = getelementptr i8, ptr %5, struct_field_3 ; Constant offset
109// load %p
110// ...
111// BB2:
112// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
113// %9 = getelementptr i8, ptr %ptr, i64 %8 ; CSE opportunity
114// %10 = mul i64 %j2, length_of_struct
115// %11 = getelementptr i8, ptr %9, i64 %10
116// %p2 = getelementptr i8, ptr %11, struct_field_2 ; Constant offset
117// load %p2
118// ...
119//
120// Lowering GEPs can also benefit other passes such as LICM and CGP.
121// LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
122// indices if one of the index is variant. If we lower such GEP into invariant
123// parts and variant parts, LICM can hoist/sink those invariant parts.
124// CGP (CodeGen Prepare) tries to sink address calculations that match the
125// target's addressing modes. A GEP with multiple indices may not match and will
126// not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
127// them. So we end up with a better addressing mode.
128//
129//===----------------------------------------------------------------------===//
130
132#include "llvm/ADT/APInt.h"
133#include "llvm/ADT/DenseMap.h"
135#include "llvm/ADT/SmallVector.h"
141#include "llvm/IR/BasicBlock.h"
142#include "llvm/IR/Constant.h"
143#include "llvm/IR/Constants.h"
144#include "llvm/IR/DataLayout.h"
145#include "llvm/IR/DerivedTypes.h"
146#include "llvm/IR/Dominators.h"
147#include "llvm/IR/Function.h"
149#include "llvm/IR/IRBuilder.h"
150#include "llvm/IR/InstrTypes.h"
151#include "llvm/IR/Instruction.h"
152#include "llvm/IR/Instructions.h"
153#include "llvm/IR/Module.h"
154#include "llvm/IR/PassManager.h"
155#include "llvm/IR/PatternMatch.h"
156#include "llvm/IR/Type.h"
157#include "llvm/IR/User.h"
158#include "llvm/IR/Value.h"
160#include "llvm/Pass.h"
161#include "llvm/Support/Casting.h"
167#include <cassert>
168#include <cstdint>
169#include <string>
170
171using namespace llvm;
172using namespace llvm::PatternMatch;
173
175 "disable-separate-const-offset-from-gep", cl::init(false),
176 cl::desc("Do not separate the constant offset from a GEP instruction"),
177 cl::Hidden);
178
179// Setting this flag may emit false positives when the input module already
180// contains dead instructions. Therefore, we set it only in unit tests that are
181// free of dead code.
182static cl::opt<bool>
183 VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
184 cl::desc("Verify this pass produces no dead code"),
185 cl::Hidden);
186
187namespace {
188
189/// A helper class for separating a constant offset from a GEP index.
190///
191/// In real programs, a GEP index may be more complicated than a simple addition
192/// of something and a constant integer which can be trivially splitted. For
193/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
194/// constant offset, so that we can separate the index to (a << 3) + b and 5.
195///
196/// Therefore, this class looks into the expression that computes a given GEP
197/// index, and tries to find a constant integer that can be hoisted to the
198/// outermost level of the expression as an addition. Not every constant in an
199/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
200/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
201/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
202class ConstantOffsetExtractor {
203public:
204 /// Extracts a constant offset from the given GEP index. It returns the
205 /// new index representing the remainder (equal to the original index minus
206 /// the constant offset), or nullptr if we cannot extract a constant offset.
207 /// \p Idx The given GEP index
208 /// \p GEP The given GEP
209 /// \p UserChainTail Outputs the tail of UserChain so that we can
210 /// garbage-collect unused instructions in UserChain.
211 /// \p PreservesNUW Outputs whether the extraction allows preserving the
212 /// GEP's nuw flag, if it has one.
213 static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
214 User *&UserChainTail, bool &PreservesNUW);
215
216 /// Looks for a constant offset from the given GEP index without extracting
217 /// it. It returns the numeric value of the extracted constant offset (0 if
218 /// failed). The meaning of the arguments are the same as Extract.
219 static APInt Find(Value *Idx, GetElementPtrInst *GEP);
220
221private:
222 ConstantOffsetExtractor(BasicBlock::iterator InsertionPt)
223 : IP(InsertionPt), DL(InsertionPt->getDataLayout()) {}
224
225 /// Searches the expression that computes V for a non-zero constant C s.t.
226 /// V can be reassociated into the form V' + C. If the searching is
227 /// successful, returns C and update UserChain as a def-use chain from C to V;
228 /// otherwise, UserChain is empty.
229 ///
230 /// \p V The given expression
231 /// \p GEP The base GEP instruction, used for determining relevant
232 /// types, flags, and non-negativity needed for safe
233 /// reassociation
234 /// \p Idx The original index of the GEP
235 /// \p SignExtended Whether V will be sign-extended in the computation of
236 /// the GEP index
237 /// \p ZeroExtended Whether V will be zero-extended in the computation of
238 /// the GEP index
239 APInt find(Value *V, GetElementPtrInst *GEP, Value *Idx, bool SignExtended,
240 bool ZeroExtended);
241
242 /// A helper function to look into both operands of a binary operator.
243 APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
244 bool ZeroExtended);
245
246 /// After finding the constant offset C from the GEP index I, we build a new
247 /// index I' s.t. I' + C = I. This function builds and returns the new
248 /// index I' according to UserChain produced by function "find".
249 ///
250 /// The building conceptually takes two steps:
251 /// 1) iteratively distribute sext/zext/trunc towards the leaves of the
252 /// expression tree that computes I
253 /// 2) reassociate the expression tree to the form I' + C.
254 ///
255 /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
256 /// sext to a, b and 5 so that we have
257 /// sext(a) + (sext(b) + 5).
258 /// Then, we reassociate it to
259 /// (sext(a) + sext(b)) + 5.
260 /// Given this form, we know I' is sext(a) + sext(b).
261 Value *rebuildWithoutConstOffset();
262
263 /// After the first step of rebuilding the GEP index without the constant
264 /// offset, distribute sext/zext/trunc to the operands of all operators in
265 /// UserChain. e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
266 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
267 ///
268 /// The function also updates UserChain to point to new subexpressions after
269 /// distributing sext/zext/trunc. e.g., the old UserChain of the above example
270 /// is
271 /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
272 /// and the new UserChain is
273 /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
274 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
275 ///
276 /// \p ChainIndex The index to UserChain. ChainIndex is initially
277 /// UserChain.size() - 1, and is decremented during
278 /// the recursion.
279 Value *distributeCastsAndCloneChain(unsigned ChainIndex);
280
281 /// Reassociates the GEP index to the form I' + C and returns I'.
282 Value *removeConstOffset(unsigned ChainIndex);
283
284 /// A helper function to apply CastInsts, a list of sext/zext/trunc, to value
285 /// V. e.g., if CastInsts = [sext i32 to i64, zext i16 to i32], this function
286 /// returns "sext i32 (zext i16 V to i32) to i64".
287 Value *applyCasts(Value *V);
288
289 /// A helper function that returns whether we can trace into the operands
290 /// of binary operator BO for a constant offset.
291 ///
292 /// \p SignExtended Whether BO is surrounded by sext
293 /// \p ZeroExtended Whether BO is surrounded by zext
294 /// \p GEP The base GEP instruction, used for determining relevant
295 /// types and flags needed for safe reassociation.
296 /// \p Idx The original index of the GEP
297 bool canTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
298 GetElementPtrInst *GEP, Value *Idx);
299
300 /// The path from the constant offset to the old GEP index. e.g., if the GEP
301 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
302 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
303 /// UserChain[2] will be the entire expression "a * b + (c + 5)".
304 ///
305 /// This path helps to rebuild the new GEP index.
306 SmallVector<User *, 8> UserChain;
307
308 /// A data structure used in rebuildWithoutConstOffset. Contains all
309 /// sext/zext/trunc instructions along UserChain.
311
312 /// Insertion position of cloned instructions.
314
315 const DataLayout &DL;
316};
317
318/// A pass that tries to split every GEP in the function into a variadic
319/// base and a constant offset. It is a FunctionPass because searching for the
320/// constant offset may inspect other basic blocks.
321class SeparateConstOffsetFromGEPLegacyPass : public FunctionPass {
322public:
323 static char ID;
324
325 SeparateConstOffsetFromGEPLegacyPass(bool LowerGEP = false)
326 : FunctionPass(ID), LowerGEP(LowerGEP) {
329 }
330
331 void getAnalysisUsage(AnalysisUsage &AU) const override {
332 AU.addRequired<DominatorTreeWrapperPass>();
333 AU.addRequired<TargetTransformInfoWrapperPass>();
334 AU.addRequired<LoopInfoWrapperPass>();
335 AU.setPreservesCFG();
336 AU.addRequired<TargetLibraryInfoWrapperPass>();
337 }
338
339 bool runOnFunction(Function &F) override;
340
341private:
342 bool LowerGEP;
343};
344
345/// A pass that tries to split every GEP in the function into a variadic
346/// base and a constant offset. It is a FunctionPass because searching for the
347/// constant offset may inspect other basic blocks.
348class SeparateConstOffsetFromGEP {
349public:
350 SeparateConstOffsetFromGEP(
351 DominatorTree *DT, LoopInfo *LI, TargetLibraryInfo *TLI,
352 function_ref<TargetTransformInfo &(Function &)> GetTTI, bool LowerGEP)
353 : DT(DT), LI(LI), TLI(TLI), GetTTI(GetTTI), LowerGEP(LowerGEP) {}
354
355 bool run(Function &F);
356
357private:
358 /// Track the operands of an add or sub.
359 using ExprKey = std::pair<Value *, Value *>;
360
361 /// Create a pair for use as a map key for a commutable operation.
362 static ExprKey createNormalizedCommutablePair(Value *A, Value *B) {
363 if (A < B)
364 return {A, B};
365 return {B, A};
366 }
367
368 /// Tries to split the given GEP into a variadic base and a constant offset,
369 /// and returns true if the splitting succeeds.
370 bool splitGEP(GetElementPtrInst *GEP);
371
372 /// Tries to reorder the given GEP with the GEP that produces the base if
373 /// doing so results in producing a constant offset as the outermost
374 /// index.
375 bool reorderGEP(GetElementPtrInst *GEP, TargetTransformInfo &TTI);
376
377 /// Lower a GEP with multiple indices into multiple GEPs with a single index.
378 /// Function splitGEP already split the original GEP into a variadic part and
379 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
380 /// variadic part into a set of GEPs with a single index and applies
381 /// AccumulativeByteOffset to it.
382 /// \p Variadic The variadic part of the original GEP.
383 /// \p AccumulativeByteOffset The constant offset.
384 void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
385 const APInt &AccumulativeByteOffset);
386
387 /// Finds the constant offset within each index and accumulates them. If
388 /// LowerGEP is true, it finds in indices of both sequential and structure
389 /// types, otherwise it only finds in sequential indices. The output
390 /// NeedsExtraction indicates whether we successfully find a non-zero constant
391 /// offset.
392 APInt accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
393
394 /// Canonicalize array indices to pointer-size integers. This helps to
395 /// simplify the logic of splitting a GEP. For example, if a + b is a
396 /// pointer-size integer, we have
397 /// gep base, a + b = gep (gep base, a), b
398 /// However, this equality may not hold if the size of a + b is smaller than
399 /// the pointer size, because LLVM conceptually sign-extends GEP indices to
400 /// pointer size before computing the address
401 /// (http://llvm.org/docs/LangRef.html#id181).
402 ///
403 /// This canonicalization is very likely already done in clang and
404 /// instcombine. Therefore, the program will probably remain the same.
405 ///
406 /// Returns true if the module changes.
407 ///
408 /// Verified in @i32_add in split-gep.ll
409 bool canonicalizeArrayIndicesToIndexSize(GetElementPtrInst *GEP);
410
411 /// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
412 /// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
413 /// the constant offset. After extraction, it becomes desirable to reunion the
414 /// distributed sexts. For example,
415 ///
416 /// &a[sext(i +nsw (j +nsw 5)]
417 /// => distribute &a[sext(i) +nsw (sext(j) +nsw 5)]
418 /// => constant extraction &a[sext(i) + sext(j)] + 5
419 /// => reunion &a[sext(i +nsw j)] + 5
420 bool reuniteExts(Function &F);
421
422 /// A helper that reunites sexts in an instruction.
423 bool reuniteExts(Instruction *I);
424
425 /// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
426 Instruction *findClosestMatchingDominator(
427 ExprKey Key, Instruction *Dominatee,
428 DenseMap<ExprKey, SmallVector<Instruction *, 2>> &DominatingExprs);
429
430 /// Verify F is free of dead code.
431 void verifyNoDeadCode(Function &F);
432
433 bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
434
435 // Swap the index operand of two GEP.
436 void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
437
438 // Check if it is safe to swap operand of two GEP.
439 bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
440 Loop *CurLoop);
441
442 const DataLayout *DL = nullptr;
443 DominatorTree *DT = nullptr;
444 LoopInfo *LI;
445 TargetLibraryInfo *TLI;
446 // Retrieved lazily since not always used.
447 function_ref<TargetTransformInfo &(Function &)> GetTTI;
448
449 /// Whether to lower a GEP with multiple indices into arithmetic operations or
450 /// multiple GEPs with a single index.
451 bool LowerGEP;
452
453 DenseMap<ExprKey, SmallVector<Instruction *, 2>> DominatingAdds;
454 DenseMap<ExprKey, SmallVector<Instruction *, 2>> DominatingSubs;
455};
456
457} // end anonymous namespace
458
459char SeparateConstOffsetFromGEPLegacyPass::ID = 0;
460
462 SeparateConstOffsetFromGEPLegacyPass, "separate-const-offset-from-gep",
463 "Split GEPs to a variadic base and a constant offset for better CSE", false,
464 false)
471 SeparateConstOffsetFromGEPLegacyPass, "separate-const-offset-from-gep",
472 "Split GEPs to a variadic base and a constant offset for better CSE", false,
473 false)
474
476 return new SeparateConstOffsetFromGEPLegacyPass(LowerGEP);
477}
478
479// Checks if it is safe to reorder an add/sext result used in a GEP.
480//
481// An inbounds GEP does not guarantee that the index is non-negative.
482// This helper checks first if the index is known non-negative. If the index is
483// non-negative, the transform is always safe.
484// Second, it checks whether the GEP is inbounds and directly based on a global
485// or an alloca, which are required to prove futher transform validity.
486// If the GEP:
487// - Has a zero offset from the base, the index is non-negative (any negative
488// value would produce poison/UB)
489// - Has ObjectSize < (2^(N-1) - C + 1) * stride, where C is a constant from the
490// add, stride is the element size of Idx, and N is bitwidth of Idx.
491// This is because with this pattern:
492// %add = add iN %val, C
493// %sext = sext iN %add to i64
494// %gep = getelementptr inbounds TYPE, %sext
495// The worst-case is when %val sign-flips to produce the smallest magnitude
496// negative value, at 2^(N-1)-1. In this case, the add/sext is -(2^(N-1)-C+1),
497// and the sext/add is 2^(N-1)+C-1 (2^N difference). The original add/sext
498// only produces a defined GEP when -(2^(N-1)-C+1) is inbounds. So, if
499// ObjectSize < (2^(N-1) - C + 1) * stride, it is impossible for the
500// worst-case sign-flip to be defined.
501// Note that in this case the GEP is not neccesarily non-negative, but any
502// negative results will still produce the same behavior in the reordered
503// version with a defined GEP.
504// This can also work for negative C, but the threshold is instead
505// (2^(N-1)+C)*stride, since the sign-flip is done in reverse and is instead
506// producing a large positive value that still needs to be inbounds to the
507// object size. If C is negative, we cannot make any useful assumptions based
508// on the offset, since it would need to be extremely large.
510 const Value *Idx, const BinaryOperator *Add,
511 const DataLayout &DL) {
512 if (isKnownNonNegative(Idx, DL))
513 return true;
514
515 if (!GEP->isInBounds())
516 return false;
517
518 const Value *Ptr = GEP->getPointerOperand();
519 int64_t Offset = 0;
520 const Value *Base =
521 GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL);
522
523 // We need one of the operands to be a constant to be able to trace into the
524 // operator.
525 const ConstantInt *CI = dyn_cast<ConstantInt>(Add->getOperand(0));
526 if (!CI)
527 CI = dyn_cast<ConstantInt>(Add->getOperand(1));
528 if (!CI)
529 return false;
530 // Calculate the threshold
531 APInt Threshold;
532 unsigned N = Add->getType()->getIntegerBitWidth();
533 uint64_t Stride =
534 DL.getTypeAllocSize(GEP->getSourceElementType()).getFixedValue();
535 if (!CI->isNegative()) {
536 // (2^(N-1) - C + 1) * stride
537 Threshold = (APInt::getSignedMinValue(N).zext(128) -
538 CI->getValue().zextOrTrunc(128) + 1) *
539 APInt(128, Stride);
540 } else {
541 // (2^(N-1) + C) * stride
542 Threshold = (APInt::getSignedMinValue(N).zext(128) +
543 CI->getValue().zextOrTrunc(128)) *
544 APInt(128, Stride);
545 }
546
548 !CI->isNegative()) {
549 // If the offset is zero from an alloca or global, inbounds is sufficient to
550 // prove non-negativity if one add operand is non-negative
551 if (Offset == 0)
552 return true;
553
554 // Check if the Offset < Threshold (positive CI only) otherwise
555 if (Offset < 0)
556 return true;
557 if (APInt(128, (uint64_t)Offset).ult(Threshold))
558 return true;
559 } else {
560 // If we can't determine the offset from the base object, we can still use
561 // the underlying object and type size constraints
563 // Can only prove non-negativity if the base object is known
565 return false;
566 }
567
568 // Check if the ObjectSize < Threshold (for both positive or negative C)
569 uint64_t ObjSize = 0;
570 if (const auto *AI = dyn_cast<AllocaInst>(Base)) {
571 if (auto AllocSize = AI->getAllocationSize(DL))
572 if (!AllocSize->isScalable())
573 ObjSize = AllocSize->getFixedValue();
574 } else if (const auto *GV = dyn_cast<GlobalVariable>(Base)) {
575 ObjSize = DL.getTypeAllocSize(GV->getValueType()).getFixedValue();
576 }
577 if (ObjSize > 0 && APInt(128, ObjSize).ult(Threshold))
578 return true;
579
580 return false;
581}
582
583bool ConstantOffsetExtractor::canTraceInto(bool SignExtended, bool ZeroExtended,
584 BinaryOperator *BO,
585 GetElementPtrInst *GEP, Value *Idx) {
586 // We only consider ADD, SUB and OR, because a non-zero constant found in
587 // expressions composed of these operations can be easily hoisted as a
588 // constant offset by reassociation.
589 if (BO->getOpcode() != Instruction::Add &&
590 BO->getOpcode() != Instruction::Sub &&
591 BO->getOpcode() != Instruction::Or) {
592 return false;
593 }
594
595 // Do not trace into "or" unless it is equivalent to "add nuw nsw".
596 // This is the case if the or's disjoint flag is set.
597 if (BO->getOpcode() == Instruction::Or &&
598 !cast<PossiblyDisjointInst>(BO)->isDisjoint())
599 return false;
600
601 // FIXME: We don't currently support constants from the RHS of subs,
602 // when we are zero-extended, because we need a way to zero-extended
603 // them before they are negated.
604 if (ZeroExtended && !SignExtended && BO->getOpcode() == Instruction::Sub)
605 return false;
606
607 // In addition, tracing into BO requires that its surrounding sext/zext/trunc
608 // (if any) is distributable to both operands.
609 //
610 // Suppose BO = A op B.
611 // SignExtended | ZeroExtended | Distributable?
612 // --------------+--------------+----------------------------------
613 // 0 | 0 | true because no s/zext exists
614 // 0 | 1 | zext(BO) == zext(A) op zext(B)
615 // 1 | 0 | sext(BO) == sext(A) op sext(B)
616 // 1 | 1 | zext(sext(BO)) ==
617 // | | zext(sext(A)) op zext(sext(B))
618 if (BO->getOpcode() == Instruction::Add && !ZeroExtended && GEP) {
619 // If a + b >= 0 and (a >= 0 or b >= 0), then
620 // sext(a + b) = sext(a) + sext(b)
621 // even if the addition is not marked nsw.
622 //
623 // Leveraging this invariant, we can trace into an sext'ed inbound GEP
624 // index under certain conditions (see canReorderAddSextToGEP).
625 //
626 // Verified in @sext_add in split-gep.ll.
627 if (canReorderAddSextToGEP(GEP, Idx, BO, DL))
628 return true;
629 }
630
631 // For a sext(add nuw), allow tracing through when the enclosing GEP is both
632 // inbounds and nuw.
633 bool GEPInboundsNUW =
634 GEP ? (GEP->isInBounds() && GEP->hasNoUnsignedWrap()) : false;
635 if (BO->getOpcode() == Instruction::Add && SignExtended && !ZeroExtended &&
636 GEPInboundsNUW && BO->hasNoUnsignedWrap())
637 return true;
638
639 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
640 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
641 if (BO->getOpcode() == Instruction::Add ||
642 BO->getOpcode() == Instruction::Sub) {
643 if (SignExtended && !BO->hasNoSignedWrap())
644 return false;
645 if (ZeroExtended && !BO->hasNoUnsignedWrap())
646 return false;
647 }
648
649 return true;
650}
651
652APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
653 bool SignExtended,
654 bool ZeroExtended) {
655 // Save off the current height of the chain, in case we need to restore it.
656 size_t ChainLength = UserChain.size();
657
658 // BO cannot use information from the base GEP at this point, so clear it.
659 APInt ConstantOffset =
660 find(BO->getOperand(0), nullptr, nullptr, SignExtended, ZeroExtended);
661 // If we found a constant offset in the left operand, stop and return that.
662 // This shortcut might cause us to miss opportunities of combining the
663 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
664 // However, such cases are probably already handled by -instcombine,
665 // given this pass runs after the standard optimizations.
666 if (ConstantOffset != 0) return ConstantOffset;
667
668 // Reset the chain back to where it was when we started exploring this node,
669 // since visiting the LHS didn't pan out.
670 UserChain.resize(ChainLength);
671
672 ConstantOffset =
673 find(BO->getOperand(1), nullptr, nullptr, SignExtended, ZeroExtended);
674 // If U is a sub operator, negate the constant offset found in the right
675 // operand.
676 if (BO->getOpcode() == Instruction::Sub)
677 ConstantOffset = -ConstantOffset;
678
679 // If RHS wasn't a suitable candidate either, reset the chain again.
680 if (ConstantOffset == 0)
681 UserChain.resize(ChainLength);
682
683 return ConstantOffset;
684}
685
686APInt ConstantOffsetExtractor::find(Value *V, GetElementPtrInst *GEP,
687 Value *Idx, bool SignExtended,
688 bool ZeroExtended) {
689 // TODO(jingyue): We could trace into integer/pointer casts, such as
690 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
691 // integers because it gives good enough results for our benchmarks.
692 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
693
694 // We cannot do much with Values that are not a User, such as an Argument.
695 User *U = dyn_cast<User>(V);
696 if (U == nullptr) return APInt(BitWidth, 0);
697
698 APInt ConstantOffset(BitWidth, 0);
699 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
700 // Hooray, we found it!
701 ConstantOffset = CI->getValue();
702 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
703 // Trace into subexpressions for more hoisting opportunities.
704 if (canTraceInto(SignExtended, ZeroExtended, BO, GEP, Idx))
705 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
706 } else if (isa<TruncInst>(V)) {
707 ConstantOffset =
708 find(U->getOperand(0), GEP, Idx, SignExtended, ZeroExtended)
709 .trunc(BitWidth);
710 } else if (isa<SExtInst>(V)) {
711 ConstantOffset =
712 find(U->getOperand(0), GEP, Idx, /* SignExtended */ true, ZeroExtended)
713 .sext(BitWidth);
714 } else if (isa<ZExtInst>(V)) {
715 // As an optimization, we can clear the SignExtended flag because
716 // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
717 ConstantOffset = find(U->getOperand(0), GEP, Idx, /* SignExtended */ false,
718 /* ZeroExtended */ true)
719 .zext(BitWidth);
720 }
721
722 // If we found a non-zero constant offset, add it to the path for
723 // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
724 // help this optimization.
725 if (ConstantOffset != 0)
726 UserChain.push_back(U);
727 return ConstantOffset;
728}
729
730Value *ConstantOffsetExtractor::applyCasts(Value *V) {
731 Value *Current = V;
732 // CastInsts is built in the use-def order. Therefore, we apply them to V
733 // in the reversed order.
734 for (CastInst *I : llvm::reverse(CastInsts)) {
735 if (Constant *C = dyn_cast<Constant>(Current)) {
736 // Try to constant fold the cast.
737 Current = ConstantFoldCastOperand(I->getOpcode(), C, I->getType(), DL);
738 if (Current)
739 continue;
740 }
741
742 Instruction *Cast = I->clone();
743 Cast->setOperand(0, Current);
744 // In ConstantOffsetExtractor::find we do not analyze nuw/nsw for trunc, so
745 // we assume that it is ok to redistribute trunc over add/sub/or. But for
746 // example (add (trunc nuw A), (trunc nuw B)) is more poisonous than (trunc
747 // nuw (add A, B))). To make such redistributions legal we drop all the
748 // poison generating flags from cloned trunc instructions here.
749 if (isa<TruncInst>(Cast))
751 Cast->insertBefore(*IP->getParent(), IP);
752 Current = Cast;
753 }
754 return Current;
755}
756
757Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
758 distributeCastsAndCloneChain(UserChain.size() - 1);
759 // Remove all nullptrs (used to be sext/zext/trunc) from UserChain.
760 unsigned NewSize = 0;
761 for (User *I : UserChain) {
762 if (I != nullptr) {
763 UserChain[NewSize] = I;
764 NewSize++;
765 }
766 }
767 UserChain.resize(NewSize);
768 return removeConstOffset(UserChain.size() - 1);
769}
770
771Value *
772ConstantOffsetExtractor::distributeCastsAndCloneChain(unsigned ChainIndex) {
773 User *U = UserChain[ChainIndex];
774 if (ChainIndex == 0) {
776 // If U is a ConstantInt, applyCasts will return a ConstantInt as well.
777 return UserChain[ChainIndex] = cast<ConstantInt>(applyCasts(U));
778 }
779
780 if (CastInst *Cast = dyn_cast<CastInst>(U)) {
781 assert(
782 (isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) &&
783 "Only following instructions can be traced: sext, zext & trunc");
784 CastInsts.push_back(Cast);
785 UserChain[ChainIndex] = nullptr;
786 return distributeCastsAndCloneChain(ChainIndex - 1);
787 }
788
789 // Function find only trace into BinaryOperator and CastInst.
790 BinaryOperator *BO = cast<BinaryOperator>(U);
791 // OpNo = which operand of BO is UserChain[ChainIndex - 1]
792 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
793 Value *TheOther = applyCasts(BO->getOperand(1 - OpNo));
794 Value *NextInChain = distributeCastsAndCloneChain(ChainIndex - 1);
795
796 BinaryOperator *NewBO = nullptr;
797 if (OpNo == 0) {
798 NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
799 BO->getName(), IP);
800 } else {
801 NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
802 BO->getName(), IP);
803 }
804 return UserChain[ChainIndex] = NewBO;
805}
806
807Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
808 if (ChainIndex == 0) {
809 assert(isa<ConstantInt>(UserChain[ChainIndex]));
810 return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
811 }
812
813 BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
814 assert((BO->use_empty() || BO->hasOneUse()) &&
815 "distributeCastsAndCloneChain clones each BinaryOperator in "
816 "UserChain, so no one should be used more than "
817 "once");
818
819 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
820 assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
821 Value *NextInChain = removeConstOffset(ChainIndex - 1);
822 Value *TheOther = BO->getOperand(1 - OpNo);
823
824 // If NextInChain is 0 and not the LHS of a sub, we can simplify the
825 // sub-expression to be just TheOther.
826 if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
827 if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
828 return TheOther;
829 }
830
831 BinaryOperator::BinaryOps NewOp = BO->getOpcode();
832 if (BO->getOpcode() == Instruction::Or) {
833 // Rebuild "or" as "add", because "or" may be invalid for the new
834 // expression.
835 //
836 // For instance, given
837 // a | (b + 5) where a and b + 5 have no common bits,
838 // we can extract 5 as the constant offset.
839 //
840 // However, reusing the "or" in the new index would give us
841 // (a | b) + 5
842 // which does not equal a | (b + 5).
843 //
844 // Replacing the "or" with "add" is fine, because
845 // a | (b + 5) = a + (b + 5) = (a + b) + 5
846 NewOp = Instruction::Add;
847 }
848
849 BinaryOperator *NewBO;
850 if (OpNo == 0) {
851 NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
852 } else {
853 NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
854 }
855 NewBO->takeName(BO);
856 return NewBO;
857}
858
859/// A helper function to check if reassociating through an entry in the user
860/// chain would invalidate the GEP's nuw flag.
861static bool allowsPreservingNUW(const User *U) {
862 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
863 // Binary operations need to be effectively add nuw.
864 auto Opcode = BO->getOpcode();
865 if (Opcode == BinaryOperator::Or) {
866 // Ors are only considered here if they are disjoint. The addition that
867 // they represent in this case is NUW.
868 assert(cast<PossiblyDisjointInst>(BO)->isDisjoint());
869 return true;
870 }
871 return Opcode == BinaryOperator::Add && BO->hasNoUnsignedWrap();
872 }
873 // UserChain can only contain ConstantInt, CastInst, or BinaryOperator.
874 // Among the possible CastInsts, only trunc without nuw is a problem: If it
875 // is distributed through an add nuw, wrapping may occur:
876 // "add nuw trunc(a), trunc(b)" is more poisonous than "trunc(add nuw a, b)"
877 if (const TruncInst *TI = dyn_cast<TruncInst>(U))
878 return TI->hasNoUnsignedWrap();
879 assert((isa<CastInst>(U) || isa<ConstantInt>(U)) && "Unexpected User.");
880 return true;
881}
882
883Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
884 User *&UserChainTail,
885 bool &PreservesNUW) {
886 ConstantOffsetExtractor Extractor(GEP->getIterator());
887 // Find a non-zero constant offset first.
888 APInt ConstantOffset = Extractor.find(Idx, GEP, Idx, /* SignExtended */ false,
889 /* ZeroExtended */ false);
890 if (ConstantOffset == 0) {
891 UserChainTail = nullptr;
892 PreservesNUW = true;
893 return nullptr;
894 }
895
896 PreservesNUW = all_of(Extractor.UserChain, allowsPreservingNUW);
897
898 // Separates the constant offset from the GEP index.
899 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
900 UserChainTail = Extractor.UserChain.back();
901 return IdxWithoutConstOffset;
902}
903
904APInt ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP) {
905 return ConstantOffsetExtractor(GEP->getIterator())
906 .find(Idx, GEP, Idx, /* SignExtended */ false, /* ZeroExtended */ false);
907}
908
909bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToIndexSize(
910 GetElementPtrInst *GEP) {
911 bool Changed = false;
912 Type *PtrIdxTy = DL->getIndexType(GEP->getType());
914 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
915 I != E; ++I, ++GTI) {
916 // Skip struct member indices which must be i32.
917 if (GTI.isSequential()) {
918 if ((*I)->getType() != PtrIdxTy) {
919 *I = CastInst::CreateIntegerCast(*I, PtrIdxTy, true, "idxprom",
920 GEP->getIterator());
921 Changed = true;
922 }
923 }
924 }
925 return Changed;
926}
927
928APInt SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
929 bool &NeedsExtraction) {
930 NeedsExtraction = false;
931 unsigned IdxWidth = DL->getIndexTypeSizeInBits(GEP->getType());
932 APInt AccumulativeByteOffset(IdxWidth, 0);
934 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
935 if (GTI.isSequential()) {
936 // Constant offsets of scalable types are not really constant.
937 if (GTI.getIndexedType()->isScalableTy())
938 continue;
939
940 // Tries to extract a constant offset from this GEP index.
941 APInt ConstantOffset =
942 ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP)
943 .sextOrTrunc(IdxWidth);
944 if (ConstantOffset != 0) {
945 NeedsExtraction = true;
946 // A GEP may have multiple indices. We accumulate the extracted
947 // constant offset to a byte offset, and later offset the remainder of
948 // the original GEP with this byte offset.
949 AccumulativeByteOffset +=
950 ConstantOffset * APInt(IdxWidth,
952 /*IsSigned=*/true, /*ImplicitTrunc=*/true);
953 }
954 } else if (LowerGEP) {
955 StructType *StTy = GTI.getStructType();
956 uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
957 // Skip field 0 as the offset is always 0.
958 if (Field != 0) {
959 NeedsExtraction = true;
960 AccumulativeByteOffset +=
961 APInt(IdxWidth, DL->getStructLayout(StTy)->getElementOffset(Field),
962 /*IsSigned=*/true, /*ImplicitTrunc=*/true);
963 }
964 }
965 }
966 return AccumulativeByteOffset;
967}
968
969void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
970 GetElementPtrInst *Variadic, const APInt &AccumulativeByteOffset) {
971 IRBuilder<> Builder(Variadic);
972 Type *PtrIndexTy = DL->getIndexType(Variadic->getType());
973
974 Value *ResultPtr = Variadic->getOperand(0);
975 Loop *L = LI->getLoopFor(Variadic->getParent());
976 // Check if the base is not loop invariant or used more than once.
977 bool isSwapCandidate =
978 L && L->isLoopInvariant(ResultPtr) &&
979 !hasMoreThanOneUseInLoop(ResultPtr, L);
980 Value *FirstResult = nullptr;
981
982 gep_type_iterator GTI = gep_type_begin(*Variadic);
983 // Create an ugly GEP for each sequential index. We don't create GEPs for
984 // structure indices, as they are accumulated in the constant offset index.
985 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
986 if (GTI.isSequential()) {
987 Value *Idx = Variadic->getOperand(I);
988 // Skip zero indices.
989 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
990 if (CI->isZero())
991 continue;
992
993 APInt ElementSize = APInt(PtrIndexTy->getIntegerBitWidth(),
995 // Scale the index by element size.
996 if (ElementSize != 1) {
997 if (ElementSize.isPowerOf2()) {
998 Idx = Builder.CreateShl(
999 Idx, ConstantInt::get(PtrIndexTy, ElementSize.logBase2()));
1000 } else {
1001 Idx =
1002 Builder.CreateMul(Idx, ConstantInt::get(PtrIndexTy, ElementSize));
1003 }
1004 }
1005 // Create an ugly GEP with a single index for each index.
1006 ResultPtr = Builder.CreatePtrAdd(ResultPtr, Idx, "uglygep");
1007 if (FirstResult == nullptr)
1008 FirstResult = ResultPtr;
1009 }
1010 }
1011
1012 // Create a GEP with the constant offset index.
1013 if (AccumulativeByteOffset != 0) {
1014 Value *Offset = ConstantInt::get(PtrIndexTy, AccumulativeByteOffset);
1015 ResultPtr = Builder.CreatePtrAdd(ResultPtr, Offset, "uglygep");
1016 } else
1017 isSwapCandidate = false;
1018
1019 // If we created a GEP with constant index, and the base is loop invariant,
1020 // then we swap the first one with it, so LICM can move constant GEP out
1021 // later.
1022 auto *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
1023 auto *SecondGEP = dyn_cast<GetElementPtrInst>(ResultPtr);
1024 if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
1025 swapGEPOperand(FirstGEP, SecondGEP);
1026
1027 Variadic->replaceAllUsesWith(ResultPtr);
1028 Variadic->eraseFromParent();
1029}
1030
1031bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
1032 TargetTransformInfo &TTI) {
1033 auto PtrGEP = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
1034 if (!PtrGEP)
1035 return false;
1036
1037 bool NestedNeedsExtraction;
1038 APInt NestedByteOffset = accumulateByteOffset(PtrGEP, NestedNeedsExtraction);
1039 if (!NestedNeedsExtraction)
1040 return false;
1041
1042 unsigned AddrSpace = PtrGEP->getPointerAddressSpace();
1043 if (!TTI.isLegalAddressingMode(GEP->getResultElementType(),
1044 /*BaseGV=*/nullptr,
1045 NestedByteOffset.getSExtValue(),
1046 /*HasBaseReg=*/true, /*Scale=*/0, AddrSpace))
1047 return false;
1048
1049 bool GEPInBounds = GEP->isInBounds();
1050 bool PtrGEPInBounds = PtrGEP->isInBounds();
1051 bool IsChainInBounds = GEPInBounds && PtrGEPInBounds;
1052 if (IsChainInBounds) {
1053 auto IsKnownNonNegative = [this](Value *V) {
1054 return isKnownNonNegative(V, *DL);
1055 };
1056 IsChainInBounds &= all_of(GEP->indices(), IsKnownNonNegative);
1057 if (IsChainInBounds)
1058 IsChainInBounds &= all_of(PtrGEP->indices(), IsKnownNonNegative);
1059 }
1060
1061 IRBuilder<> Builder(GEP);
1062 // For trivial GEP chains, we can swap the indices.
1063 Value *NewSrc = Builder.CreateGEP(
1064 GEP->getSourceElementType(), PtrGEP->getPointerOperand(),
1065 SmallVector<Value *, 4>(GEP->indices()), "", IsChainInBounds);
1066 Value *NewGEP = Builder.CreateGEP(PtrGEP->getSourceElementType(), NewSrc,
1067 SmallVector<Value *, 4>(PtrGEP->indices()),
1068 "", IsChainInBounds);
1069 GEP->replaceAllUsesWith(NewGEP);
1071 return true;
1072}
1073
1074bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
1075 // Skip vector GEPs.
1076 if (GEP->getType()->isVectorTy())
1077 return false;
1078
1079 // If the base of this GEP is a ptradd of a constant, lets pass the constant
1080 // along. This ensures that when we have a chain of GEPs the constant
1081 // offset from each is accumulated.
1082 Value *NewBase;
1083 const APInt *BaseOffset;
1084 bool ExtractBase = match(GEP->getPointerOperand(),
1085 m_PtrAdd(m_Value(NewBase), m_APInt(BaseOffset)));
1086
1087 unsigned IdxWidth = DL->getIndexTypeSizeInBits(GEP->getType());
1088 APInt BaseByteOffset =
1089 ExtractBase ? BaseOffset->sextOrTrunc(IdxWidth) : APInt(IdxWidth, 0);
1090
1091 // The backend can already nicely handle the case where all indices are
1092 // constant.
1093 if (GEP->hasAllConstantIndices() && !ExtractBase)
1094 return false;
1095
1096 bool Changed = canonicalizeArrayIndicesToIndexSize(GEP);
1097
1098 bool NeedsExtraction;
1099 APInt NonBaseByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
1100 APInt AccumulativeByteOffset = BaseByteOffset + NonBaseByteOffset;
1101
1102 TargetTransformInfo &TTI = GetTTI(*GEP->getFunction());
1103
1104 if (!NeedsExtraction && !ExtractBase) {
1105 Changed |= reorderGEP(GEP, TTI);
1106 return Changed;
1107 }
1108
1109 // If LowerGEP is disabled, before really splitting the GEP, check whether the
1110 // backend supports the addressing mode we are about to produce. If no, this
1111 // splitting probably won't be beneficial.
1112 // If LowerGEP is enabled, even the extracted constant offset can not match
1113 // the addressing mode, we can still do optimizations to other lowered parts
1114 // of variable indices. Therefore, we don't check for addressing modes in that
1115 // case.
1116 if (!LowerGEP) {
1117 unsigned AddrSpace = GEP->getPointerAddressSpace();
1119 GEP->getResultElementType(),
1120 /*BaseGV=*/nullptr, AccumulativeByteOffset.getSExtValue(),
1121 /*HasBaseReg=*/true, /*Scale=*/0, AddrSpace)) {
1122 // If the addressing mode was not legal and the base byte offset was not
1123 // 0, it could be a case where the total offset became too large for
1124 // the addressing mode. Try again without extracting the base offset.
1125 if (!ExtractBase)
1126 return Changed;
1127 ExtractBase = false;
1128 BaseByteOffset = APInt(IdxWidth, 0);
1129 AccumulativeByteOffset = NonBaseByteOffset;
1131 GEP->getResultElementType(),
1132 /*BaseGV=*/nullptr, AccumulativeByteOffset.getSExtValue(),
1133 /*HasBaseReg=*/true, /*Scale=*/0, AddrSpace))
1134 return Changed;
1135 // We can proceed with just extracting the other (non-base) offsets.
1136 NeedsExtraction = true;
1137 }
1138 }
1139
1140 // Track information for preserving GEP flags.
1141 bool AllOffsetsNonNegative = AccumulativeByteOffset.isNonNegative();
1142 bool AllNUWPreserved = GEP->hasNoUnsignedWrap();
1143 bool NewGEPInBounds = GEP->isInBounds();
1144 bool NewGEPNUSW = GEP->hasNoUnsignedSignedWrap();
1145
1146 // Remove the constant offset in each sequential index. The resultant GEP
1147 // computes the variadic base.
1148 // Notice that we don't remove struct field indices here. If LowerGEP is
1149 // disabled, a structure index is not accumulated and we still use the old
1150 // one. If LowerGEP is enabled, a structure index is accumulated in the
1151 // constant offset. LowerToSingleIndexGEPs will later handle the constant
1152 // offset and won't need a new structure index.
1154 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
1155 if (GTI.isSequential()) {
1156 // Constant offsets of scalable types are not really constant.
1157 if (GTI.getIndexedType()->isScalableTy())
1158 continue;
1159
1160 // Splits this GEP index into a variadic part and a constant offset, and
1161 // uses the variadic part as the new index.
1162 Value *Idx = GEP->getOperand(I);
1163 User *UserChainTail;
1164 bool PreservesNUW;
1165 Value *NewIdx = ConstantOffsetExtractor::Extract(Idx, GEP, UserChainTail,
1166 PreservesNUW);
1167 if (NewIdx != nullptr) {
1168 // Switches to the index with the constant offset removed.
1169 GEP->setOperand(I, NewIdx);
1170 // After switching to the new index, we can garbage-collect UserChain
1171 // and the old index if they are not used.
1174 Idx = NewIdx;
1175 AllNUWPreserved &= PreservesNUW;
1176 }
1177 AllOffsetsNonNegative =
1178 AllOffsetsNonNegative && isKnownNonNegative(Idx, *DL);
1179 }
1180 }
1181 if (ExtractBase) {
1182 GEPOperator *Base = cast<GEPOperator>(GEP->getPointerOperand());
1183 AllNUWPreserved &= Base->hasNoUnsignedWrap();
1184 NewGEPInBounds &= Base->isInBounds();
1185 NewGEPNUSW &= Base->hasNoUnsignedSignedWrap();
1186 AllOffsetsNonNegative &= BaseByteOffset.isNonNegative();
1187
1188 GEP->setOperand(0, NewBase);
1190 }
1191
1192 // Clear the inbounds attribute because the new index may be off-bound.
1193 // e.g.,
1194 //
1195 // b = add i64 a, 5
1196 // addr = gep inbounds float, float* p, i64 b
1197 //
1198 // is transformed to:
1199 //
1200 // addr2 = gep float, float* p, i64 a ; inbounds removed
1201 // addr = gep float, float* addr2, i64 5 ; inbounds removed
1202 //
1203 // If a is -4, although the old index b is in bounds, the new index a is
1204 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
1205 // inbounds keyword is not present, the offsets are added to the base
1206 // address with silently-wrapping two's complement arithmetic".
1207 // Therefore, the final code will be a semantically equivalent.
1208 GEPNoWrapFlags NewGEPFlags = GEPNoWrapFlags::none();
1209
1210 // If the initial GEP was inbounds/nusw and all variable indices and the
1211 // accumulated offsets are non-negative, they can be added in any order and
1212 // the intermediate results are in bounds and don't overflow in a nusw sense.
1213 // So, we can preserve the inbounds/nusw flag for both GEPs.
1214 bool CanPreserveInBoundsNUSW = AllOffsetsNonNegative;
1215
1216 // If the initial GEP was NUW and all operations that we reassociate were NUW
1217 // additions, the resulting GEPs are also NUW.
1218 if (AllNUWPreserved) {
1219 NewGEPFlags |= GEPNoWrapFlags::noUnsignedWrap();
1220 // If the initial GEP additionally had NUSW (or inbounds, which implies
1221 // NUSW), we know that the indices in the initial GEP must all have their
1222 // signbit not set. For indices that are the result of NUW adds, the
1223 // add-operands therefore also don't have their signbit set. Therefore, all
1224 // indices of the resulting GEPs are non-negative -> we can preserve
1225 // the inbounds/nusw flag.
1226 CanPreserveInBoundsNUSW |= NewGEPNUSW;
1227 }
1228
1229 if (CanPreserveInBoundsNUSW) {
1230 if (NewGEPInBounds)
1231 NewGEPFlags |= GEPNoWrapFlags::inBounds();
1232 else if (NewGEPNUSW)
1233 NewGEPFlags |= GEPNoWrapFlags::noUnsignedSignedWrap();
1234 }
1235
1236 GEP->setNoWrapFlags(NewGEPFlags);
1237
1238 // Lowers a GEP to GEPs with a single index.
1239 if (LowerGEP) {
1240 lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
1241 return true;
1242 }
1243
1244 // No need to create another GEP if the accumulative byte offset is 0.
1245 if (AccumulativeByteOffset == 0)
1246 return true;
1247
1248 // Offsets the base with the accumulative byte offset.
1249 //
1250 // %gep ; the base
1251 // ... %gep ...
1252 //
1253 // => add the offset
1254 //
1255 // %gep2 ; clone of %gep
1256 // %new.gep = gep i8, %gep2, %offset
1257 // %gep ; will be removed
1258 // ... %gep ...
1259 //
1260 // => replace all uses of %gep with %new.gep and remove %gep
1261 //
1262 // %gep2 ; clone of %gep
1263 // %new.gep = gep i8, %gep2, %offset
1264 // ... %new.gep ...
1265 Instruction *NewGEP = GEP->clone();
1266 NewGEP->insertBefore(GEP->getIterator());
1267
1268 Type *PtrIdxTy = DL->getIndexType(GEP->getType());
1269 IRBuilder<> Builder(GEP);
1270 NewGEP = cast<Instruction>(Builder.CreatePtrAdd(
1271 NewGEP, ConstantInt::get(PtrIdxTy, AccumulativeByteOffset),
1272 GEP->getName(), NewGEPFlags));
1273 NewGEP->copyMetadata(*GEP);
1274
1275 GEP->replaceAllUsesWith(NewGEP);
1276 GEP->eraseFromParent();
1277
1278 return true;
1279}
1280
1281bool SeparateConstOffsetFromGEPLegacyPass::runOnFunction(Function &F) {
1282 if (skipFunction(F))
1283 return false;
1284 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1285 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1286 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1287 auto GetTTI = [this](Function &F) -> TargetTransformInfo & {
1288 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1289 };
1290 SeparateConstOffsetFromGEP Impl(DT, LI, TLI, GetTTI, LowerGEP);
1291 return Impl.run(F);
1292}
1293
1294bool SeparateConstOffsetFromGEP::run(Function &F) {
1296 return false;
1297
1298 DL = &F.getDataLayout();
1299 bool Changed = false;
1300
1301 ReversePostOrderTraversal<Function *> RPOT(&F);
1302 for (BasicBlock *B : RPOT) {
1303 if (!DT->isReachableFromEntry(B))
1304 continue;
1305
1306 for (Instruction &I : llvm::make_early_inc_range(*B))
1307 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I))
1308 Changed |= splitGEP(GEP);
1309 // No need to split GEP ConstantExprs because all its indices are constant
1310 // already.
1311 }
1312
1313 Changed |= reuniteExts(F);
1314
1315 if (VerifyNoDeadCode)
1316 verifyNoDeadCode(F);
1317
1318 return Changed;
1319}
1320
1321Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1322 ExprKey Key, Instruction *Dominatee,
1323 DenseMap<ExprKey, SmallVector<Instruction *, 2>> &DominatingExprs) {
1324 auto Pos = DominatingExprs.find(Key);
1325 if (Pos == DominatingExprs.end())
1326 return nullptr;
1327
1328 auto &Candidates = Pos->second;
1329 // Because we process the basic blocks in pre-order of the dominator tree, a
1330 // candidate that doesn't dominate the current instruction won't dominate any
1331 // future instruction either. Therefore, we pop it out of the stack. This
1332 // optimization makes the algorithm O(n).
1333 while (!Candidates.empty()) {
1334 Instruction *Candidate = Candidates.back();
1335 if (DT->dominates(Candidate, Dominatee))
1336 return Candidate;
1337 Candidates.pop_back();
1338 }
1339 return nullptr;
1340}
1341
1342bool SeparateConstOffsetFromGEP::reuniteExts(Instruction *I) {
1343 if (!I->getType()->isIntOrIntVectorTy())
1344 return false;
1345
1346 // Dom: LHS+RHS
1347 // I: sext(LHS)+sext(RHS)
1348 // If Dom can't sign overflow and Dom dominates I, optimize I to sext(Dom).
1349 // TODO: handle zext
1350 Value *LHS = nullptr, *RHS = nullptr;
1351 if (match(I, m_Add(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1352 if (LHS->getType() == RHS->getType()) {
1353 ExprKey Key = createNormalizedCommutablePair(LHS, RHS);
1354 if (auto *Dom = findClosestMatchingDominator(Key, I, DominatingAdds)) {
1355 Instruction *NewSExt =
1356 new SExtInst(Dom, I->getType(), "", I->getIterator());
1357 NewSExt->takeName(I);
1358 I->replaceAllUsesWith(NewSExt);
1359 NewSExt->setDebugLoc(I->getDebugLoc());
1361 return true;
1362 }
1363 }
1364 } else if (match(I, m_Sub(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1365 if (LHS->getType() == RHS->getType()) {
1366 if (auto *Dom =
1367 findClosestMatchingDominator({LHS, RHS}, I, DominatingSubs)) {
1368 Instruction *NewSExt =
1369 new SExtInst(Dom, I->getType(), "", I->getIterator());
1370 NewSExt->takeName(I);
1371 I->replaceAllUsesWith(NewSExt);
1372 NewSExt->setDebugLoc(I->getDebugLoc());
1374 return true;
1375 }
1376 }
1377 }
1378
1379 // Add I to DominatingExprs if it's an add/sub that can't sign overflow.
1380 if (match(I, m_NSWAdd(m_Value(LHS), m_Value(RHS)))) {
1382 ExprKey Key = createNormalizedCommutablePair(LHS, RHS);
1383 DominatingAdds[Key].push_back(I);
1384 }
1385 } else if (match(I, m_NSWSub(m_Value(LHS), m_Value(RHS)))) {
1387 DominatingSubs[{LHS, RHS}].push_back(I);
1388 }
1389 return false;
1390}
1391
1392bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) {
1393 bool Changed = false;
1394 DominatingAdds.clear();
1395 DominatingSubs.clear();
1396 for (const auto Node : depth_first(DT)) {
1397 BasicBlock *BB = Node->getBlock();
1398 for (Instruction &I : llvm::make_early_inc_range(*BB))
1399 Changed |= reuniteExts(&I);
1400 }
1401 return Changed;
1402}
1403
1404void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1405 for (BasicBlock &B : F) {
1406 for (Instruction &I : B) {
1408 std::string ErrMessage;
1409 raw_string_ostream RSO(ErrMessage);
1410 RSO << "Dead instruction detected!\n" << I << "\n";
1411 llvm_unreachable(RSO.str().c_str());
1412 }
1413 }
1414 }
1415}
1416
1417bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1418 GetElementPtrInst *FirstGEP, GetElementPtrInst *SecondGEP, Loop *CurLoop) {
1419 if (!FirstGEP || !FirstGEP->hasOneUse())
1420 return false;
1421
1422 if (!SecondGEP || FirstGEP->getParent() != SecondGEP->getParent())
1423 return false;
1424
1425 if (FirstGEP == SecondGEP)
1426 return false;
1427
1428 unsigned FirstNum = FirstGEP->getNumOperands();
1429 unsigned SecondNum = SecondGEP->getNumOperands();
1430 // Give up if the number of operands are not 2.
1431 if (FirstNum != SecondNum || FirstNum != 2)
1432 return false;
1433
1434 Value *FirstBase = FirstGEP->getOperand(0);
1435 Value *SecondBase = SecondGEP->getOperand(0);
1436 Value *FirstOffset = FirstGEP->getOperand(1);
1437 // Give up if the index of the first GEP is loop invariant.
1438 if (CurLoop->isLoopInvariant(FirstOffset))
1439 return false;
1440
1441 // Give up if base doesn't have same type.
1442 if (FirstBase->getType() != SecondBase->getType())
1443 return false;
1444
1445 Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1446
1447 // Check if the second operand of first GEP has constant coefficient.
1448 // For an example, for the following code, we won't gain anything by
1449 // hoisting the second GEP out because the second GEP can be folded away.
1450 // %scevgep.sum.ur159 = add i64 %idxprom48.ur, 256
1451 // %67 = shl i64 %scevgep.sum.ur159, 2
1452 // %uglygep160 = getelementptr i8* %65, i64 %67
1453 // %uglygep161 = getelementptr i8* %uglygep160, i64 -1024
1454
1455 // Skip constant shift instruction which may be generated by Splitting GEPs.
1456 if (FirstOffsetDef && FirstOffsetDef->isShift() &&
1457 isa<ConstantInt>(FirstOffsetDef->getOperand(1)))
1458 FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->getOperand(0));
1459
1460 // Give up if FirstOffsetDef is an Add or Sub with constant.
1461 // Because it may not profitable at all due to constant folding.
1462 if (FirstOffsetDef)
1463 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1464 unsigned opc = BO->getOpcode();
1465 if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1466 (isa<ConstantInt>(BO->getOperand(0)) ||
1468 return false;
1469 }
1470 return true;
1471}
1472
1473bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(Value *V, Loop *L) {
1474 // TODO: Could look at uses of globals, but we need to make sure we are
1475 // looking at the correct function.
1476 if (isa<Constant>(V))
1477 return false;
1478
1479 int UsesInLoop = 0;
1480 for (User *U : V->users()) {
1481 if (Instruction *User = dyn_cast<Instruction>(U))
1482 if (L->contains(User))
1483 if (++UsesInLoop > 1)
1484 return true;
1485 }
1486 return false;
1487}
1488
1489void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
1490 GetElementPtrInst *Second) {
1491 Value *Offset1 = First->getOperand(1);
1492 Value *Offset2 = Second->getOperand(1);
1493 First->setOperand(1, Offset2);
1494 Second->setOperand(1, Offset1);
1495
1496 // We changed p+o+c to p+c+o, p+c may not be inbound anymore.
1497 const DataLayout &DAL = First->getDataLayout();
1498 APInt Offset(DAL.getIndexSizeInBits(
1499 cast<PointerType>(First->getType())->getAddressSpace()),
1500 0);
1501 Value *NewBase =
1503 uint64_t ObjectSize;
1504 if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
1505 Offset.ugt(ObjectSize)) {
1506 // TODO(gep_nowrap): Make flag preservation more precise.
1507 First->setNoWrapFlags(GEPNoWrapFlags::none());
1509 } else
1510 First->setIsInBounds(true);
1511}
1512
1514 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1516 ->printPipeline(OS, MapClassName2PassName);
1517 OS << '<';
1518 if (LowerGEP)
1519 OS << "lower-gep";
1520 OS << '>';
1521}
1522
1525 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1526 auto *LI = &AM.getResult<LoopAnalysis>(F);
1527 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1528 auto GetTTI = [&AM](Function &F) -> TargetTransformInfo & {
1529 return AM.getResult<TargetIRAnalysis>(F);
1530 };
1531 SeparateConstOffsetFromGEP Impl(DT, LI, TLI, GetTTI, LowerGEP);
1532 if (!Impl.run(F))
1533 return PreservedAnalyses::all();
1536 return PA;
1537}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
static const T * Find(StringRef S, ArrayRef< T > A)
Find KV in array using binary search.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > DisableSeparateConstOffsetFromGEP("disable-separate-const-offset-from-gep", cl::init(false), cl::desc("Do not separate the constant offset from a GEP instruction"), cl::Hidden)
static bool allowsPreservingNUW(const User *U)
A helper function to check if reassociating through an entry in the user chain would invalidate the G...
static cl::opt< bool > VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false), cl::desc("Verify this pass produces no dead code"), cl::Hidden)
static bool canReorderAddSextToGEP(const GetElementPtrInst *GEP, const Value *Idx, const BinaryOperator *Add, const DataLayout &DL)
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1044
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1052
unsigned logBase2() const
Definition APInt.h:1776
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:996
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isNegative() const
Definition Constants.h:214
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
Definition DataLayout.h:502
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags noUnsignedSignedWrap()
static GEPNoWrapFlags none()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition LoopInfo.h:596
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:66
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
This class represents a truncation of integer types.
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
Use * op_iterator
Definition User.h:254
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:761
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:440
bool use_empty() const
Definition Value.h:347
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:538
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
LLVM_ABI void initializeSeparateConstOffsetFromGEPLegacyPassPass(PassRegistry &)
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:406
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Add
Sum of integers.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
iterator_range< df_iterator< T > > depth_first(const T &G)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
#define N
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70