Go to the documentation of this file.
8 #define DEBUG_TYPE "vncoerce"
11 namespace VNCoercion {
22 if (StoredTy == LoadTy)
31 uint64_t StoreSize =
DL.getTypeSizeInBits(StoredTy).getFixedSize();
38 if (StoreSize <
DL.getTypeSizeInBits(LoadTy).getFixedSize())
44 if (StoredNI != LoadNI) {
48 if (
auto *CI = dyn_cast<Constant>(StoredVal))
49 return CI->isNullValue();
51 }
else if (StoredNI && LoadNI &&
61 if (StoredNI && StoreSize !=
DL.getTypeSizeInBits(LoadTy).getFixedSize())
67 template <
class T,
class HelperClass>
72 "precondition violation - materialization can't fail");
73 if (
auto *
C = dyn_cast<Constant>(StoredVal))
77 Type *StoredValTy = StoredVal->getType();
79 uint64_t StoredValSize =
DL.getTypeSizeInBits(StoredValTy).getFixedSize();
80 uint64_t LoadedValSize =
DL.getTypeSizeInBits(LoadedTy).getFixedSize();
83 if (StoredValSize == LoadedValSize) {
86 StoredVal = Helper.CreateBitCast(StoredVal, LoadedTy);
90 StoredValTy =
DL.getIntPtrType(StoredValTy);
91 StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
94 Type *TypeToCastTo = LoadedTy;
96 TypeToCastTo =
DL.getIntPtrType(TypeToCastTo);
98 if (StoredValTy != TypeToCastTo)
99 StoredVal = Helper.CreateBitCast(StoredVal, TypeToCastTo);
103 StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
106 if (
auto *
C = dyn_cast<ConstantExpr>(StoredVal))
114 assert(StoredValSize >= LoadedValSize &&
115 "canCoerceMustAliasedValueToLoad fail");
119 StoredValTy =
DL.getIntPtrType(StoredValTy);
120 StoredVal = Helper.CreatePtrToInt(StoredVal, StoredValTy);
126 StoredVal = Helper.CreateBitCast(StoredVal, StoredValTy);
131 if (
DL.isBigEndian()) {
132 uint64_t ShiftAmt =
DL.getTypeStoreSizeInBits(StoredValTy).getFixedSize() -
133 DL.getTypeStoreSizeInBits(LoadedTy).getFixedSize();
134 StoredVal = Helper.CreateLShr(
140 StoredVal = Helper.CreateTruncOrBitCast(StoredVal, NewIntTy);
142 if (LoadedTy != NewIntTy) {
145 StoredVal = Helper.CreateIntToPtr(StoredVal, LoadedTy);
148 StoredVal = Helper.CreateBitCast(StoredVal, LoadedTy);
151 if (
auto *
C = dyn_cast<Constant>(StoredVal))
186 int64_t StoreOffset = 0, LoadOffset = 0;
190 if (StoreBase != LoadBase)
193 uint64_t LoadSize =
DL.getTypeSizeInBits(LoadTy).getFixedSize();
195 if ((WriteSizeInBits & 7) | (LoadSize & 7))
197 uint64_t StoreSize = WriteSizeInBits / 8;
204 if (StoreOffset > LoadOffset ||
205 StoreOffset + int64_t(StoreSize) < LoadOffset + int64_t(LoadSize))
210 return LoadOffset - StoreOffset;
257 const Value *LIBase =
262 if (LIBase != MemLocBase)
272 if (MemLocOffs < LIOffs)
282 int64_t MemLocEnd = MemLocOffs + MemLocSize;
285 if (LIOffs + LoadAlign < MemLocEnd)
296 if (NewLoadByteSize > LoadAlign ||
297 !
DL.fitsInLegalInteger(NewLoadByteSize * 8))
300 if (LIOffs + NewLoadByteSize > MemLocEnd &&
302 Attribute::SanitizeAddress) ||
304 Attribute::SanitizeHWAddress)))
311 if (LIOffs + NewLoadByteSize >= MemLocEnd)
312 return NewLoadByteSize;
314 NewLoadByteSize <<= 1;
338 int64_t LoadOffs = 0;
339 const Value *LoadBase =
341 unsigned LoadSize =
DL.getTypeStoreSize(LoadTy).getFixedSize();
350 assert(DepLI->
isSimple() &&
"Cannot widen volatile/atomic load!");
359 ConstantInt *SizeCst = dyn_cast<ConstantInt>(
MI->getLength());
366 if (
MI->getIntrinsicID() == Intrinsic::memset) {
368 auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(
MI)->getValue());
369 if (!CI || !CI->isZero())
397 unsigned IndexSize =
DL.getIndexTypeSizeInBits(Src->getType());
403 template <
class T,
class HelperClass>
407 LLVMContext &Ctx = SrcVal->getType()->getContext();
412 if (SrcVal->getType()->isPointerTy() && LoadTy->
isPointerTy() &&
413 cast<PointerType>(SrcVal->getType())->getAddressSpace() ==
414 cast<PointerType>(LoadTy)->getAddressSpace()) {
419 (
DL.getTypeSizeInBits(SrcVal->getType()).getFixedSize() + 7) / 8;
420 uint64_t LoadSize = (
DL.getTypeSizeInBits(LoadTy).getFixedSize() + 7) / 8;
423 if (SrcVal->getType()->isPtrOrPtrVectorTy())
424 SrcVal = Helper.CreatePtrToInt(SrcVal,
DL.getIntPtrType(SrcVal->getType()));
425 if (!SrcVal->getType()->isIntegerTy())
426 SrcVal = Helper.CreateBitCast(SrcVal,
IntegerType::get(Ctx, StoreSize * 8));
430 if (
DL.isLittleEndian())
431 ShiftAmt = Offset * 8;
433 ShiftAmt = (StoreSize - LoadSize - Offset) * 8;
435 SrcVal = Helper.CreateLShr(SrcVal,
438 if (LoadSize != StoreSize)
439 SrcVal = Helper.CreateTruncOrBitCast(SrcVal,
472 unsigned SrcValStoreSize =
473 DL.getTypeStoreSize(SrcVal->
getType()).getFixedSize();
474 unsigned LoadSize =
DL.getTypeStoreSize(LoadTy).getFixedSize();
475 if (Offset + LoadSize > SrcValStoreSize) {
476 assert(SrcVal->
isSimple() &&
"Cannot widen volatile/atomic load!");
480 unsigned NewLoadSize = Offset + LoadSize;
493 PtrVal =
Builder.CreateBitCast(PtrVal, DestPTy);
504 if (
DL.isBigEndian())
505 RV =
Builder.CreateLShr(RV, (NewLoadSize - SrcValStoreSize) * 8);
517 unsigned SrcValStoreSize =
518 DL.getTypeStoreSize(SrcVal->
getType()).getFixedSize();
519 unsigned LoadSize =
DL.getTypeStoreSize(LoadTy).getFixedSize();
520 if (Offset + LoadSize > SrcValStoreSize)
525 template <
class T,
class HelperClass>
527 Type *LoadTy, HelperClass &Helper,
530 uint64_t LoadSize =
DL.getTypeSizeInBits(LoadTy).getFixedSize() / 8;
534 if (
MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
537 T *Val = cast<T>(MSI->getValue());
544 for (
unsigned NumBytesSet = 1; NumBytesSet != LoadSize;) {
546 if (NumBytesSet * 2 <= LoadSize) {
547 T *ShVal = Helper.CreateShl(
549 Val = Helper.CreateOr(Val, ShVal);
556 Val = Helper.CreateOr(OneElt, ShVal);
569 unsigned IndexSize =
DL.getIndexTypeSizeInBits(Src->getType());
571 Src, LoadTy,
APInt(IndexSize, Offset),
DL);
580 return getMemInstValueForLoadHelper<Value, IRBuilder<>>(SrcInst, Offset,
588 if (
auto *MSI = dyn_cast<MemSetInst>(SrcInst))
589 if (!isa<Constant>(MSI->getValue()))
592 return getMemInstValueForLoadHelper<Constant, ConstantFolder>(SrcInst, Offset,
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
This is an optimization pass for GlobalISel generic memory operations.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
A parsed version of the target data layout string in and methods for querying it.
Value * getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore returned an offset, this function can be used to actually perform t...
InstListType::iterator iterator
Instruction iterators...
const Function * getParent() const
Return the enclosing method, or null if none.
bool isPointerTy() const
True if this is an instance of PointerType.
static int analyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &DL)
This function is called when we have a memdep query of a load that ends up being a clobbering memory ...
This class wraps the llvm.memcpy/memmove intrinsics.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
The instances of the Type class are immutable: once they are created, they are never changed.
This is the common base class for memset/memcpy/memmove.
Value * coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, IRBuilderBase &IRB, const DataLayout &DL)
If we saw a store of a value to memory, and then a load from a must-aliased pointer of a different ty...
Value * getPointerOperand()
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Align getAlign() const
Return the alignment of the access that is being performed.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
This is the shared class of boolean and integer constants.
bool isArrayTy() const
True if this is an instance of ArrayType.
ConstantFolder - Create constants with minimum, target independent, folding.
Value * getValueOperand()
(vector float) vec_cmpeq(*A, *B) C
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
uint64_t getAlignment() const
Return the alignment of the access that is being performed.
This class wraps the llvm.memset intrinsic.
static T * coerceAvailableValueToLoadTypeHelper(T *StoredVal, Type *LoadedTy, HelperClass &Helper, const DataLayout &DL)
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
An instruction for storing to memory.
This is an important base class in LLVM.
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This is an important class for using LLVM in a threaded context.
void setAlignment(Align Align)
T * getMemInstValueForLoadHelper(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, HelperClass &Helper, const DataLayout &DL)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Constant * getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset, Type *LoadTy, const DataLayout &DL)
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Common base class shared among various IRBuilders.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static bool isFirstClassAggregateOrScalableType(Type *Ty)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
An instruction for reading from memory.
Value * getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingLoad returned an offset, this function can be used to actually perform th...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
Constant * getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, const DataLayout &DL)
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI)
Looks at a memory location for a load (specified by MemLocBase, Offs, and Size) and compares it again...
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
Constant * getConstantStoreValueForLoad(Constant *SrcVal, unsigned Offset, Type *LoadTy, const DataLayout &DL)
bool isStructTy() const
True if this is an instance of StructType.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Value * getPointerOperand()
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
const BasicBlock * getParent() const
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void takeName(Value *V)
Transfer the name from V to this value.
static T * getStoreValueForLoadHelper(T *SrcVal, unsigned Offset, Type *LoadTy, HelperClass &Helper, const DataLayout &DL)
LLVM Value Representation.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getSource() const
This is just like getRawSource, but it strips off any cast instructions that feed it,...