52#include "llvm/IR/IntrinsicsSPIRV.h"
66 {Arg->
getType()}, OfType, Arg, {},
B);
67 GR->addAssignPtrTypeInstr(Arg, AssignCI);
75 FixedVectorType *TargetType,
Value *Source) {
76 LoadInst *NewLoad =
B.CreateLoad(SourceType, Source);
77 buildAssignType(
B, SourceType, NewLoad);
78 Value *AssignValue = NewLoad;
80 const DataLayout &
DL =
B.GetInsertBlock()->getModule()->getDataLayout();
81 [[maybe_unused]] TypeSize TargetTypeSize =
82 DL.getTypeSizeInBits(TargetType);
83 [[maybe_unused]] TypeSize SourceTypeSize =
84 DL.getTypeSizeInBits(SourceType);
85 assert(TargetTypeSize == SourceTypeSize);
86 AssignValue =
B.CreateIntrinsic(Intrinsic::spv_bitcast,
87 {TargetType, SourceType}, {NewLoad});
88 buildAssignType(
B, TargetType, AssignValue);
96 Value *Output =
B.CreateShuffleVector(AssignValue, AssignValue, Mask);
97 buildAssignType(
B, TargetType, Output);
105 Value *Source, LoadInst *BadLoad) {
108 SmallVector<Value *, 8>
Args{
B.getInt1(
false),
Source};
110 Type *AggregateType = GR->findDeducedElementType(Source);
111 assert(AggregateType &&
"Could not deduce aggregate type");
112 buildGEPIndexChain(
B, ElementType, AggregateType, Args);
114 auto *
GEP =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
115 GR->buildAssignPtr(
B, ElementType,
GEP);
117 LoadInst *LI =
B.CreateLoad(ElementType,
GEP);
119 buildAssignType(
B, ElementType, LI);
132 Args.push_back(
B.getInt1(
false));
133 Args.push_back(Source);
134 Args.push_back(
B.getInt32(0));
135 Args.push_back(ConstantInt::get(
B.getInt32Ty(), i));
136 auto *ElementPtr =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
147 buildAssignType(
B, TargetType, NewVector);
155 NewVector =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
156 buildAssignType(
B, TargetType, NewVector);
163 Value *DstArrayPtr, ArrayType *ArrTy,
168 assert(VecTy->getElementType() == ArrTy->getElementType() &&
169 "Element types of array and vector must be the same.");
171 for (
unsigned i = 0; i < VecTy->getNumElements(); ++i) {
176 Args.push_back(
B.getInt1(
false));
177 Args.push_back(DstArrayPtr);
178 Args.push_back(
B.getInt32(0));
179 Args.push_back(ConstantInt::get(
B.getInt32Ty(), i));
180 auto *ElementPtr =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
181 GR->buildAssignPtr(
B, ArrTy->getElementType(), ElementPtr);
187 SmallVector<Value *, 2> EltArgs = {SrcVector,
Index};
189 B.CreateIntrinsic(Intrinsic::spv_extractelt, {EltTypes}, {EltArgs});
190 buildAssignType(
B, VecTy->getElementType(), Element);
193 Args = {Element, ElementPtr,
B.getInt16(2),
B.getInt8(Alignment.
value())};
194 B.CreateIntrinsic(Intrinsic::spv_store, {
Types}, {
Args});
201 Value *OriginalOperand) {
202 Type *FromTy = GR->findDeducedElementType(OriginalOperand);
203 Type *ToTy = GR->findDeducedElementType(CastedOperand);
204 Value *Output =
nullptr;
210 B.SetInsertPoint(LI);
215 if (isTypeFirstElementAggregate(ToTy, FromTy))
216 Output = loadFirstValueFromAggregate(
B, ToTy, OriginalOperand, LI);
221 Output = loadVectorFromVector(
B, SVT, DVT, OriginalOperand);
222 else if (
SAT && DVT &&
SAT->getElementType() == DVT->getElementType())
223 Output = loadVectorFromArray(
B, DVT, OriginalOperand);
227 GR->replaceAllUsesWith(LI, Output,
true);
228 DeadInstructions.push_back(LI);
239 B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
240 buildAssignType(
B,
Vector->getType(), NewI);
252 B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
253 buildAssignType(
B, ElementType, NewI);
262 FixedVectorType *DstType =
271 [[maybe_unused]]
auto dstBitWidth =
273 [[maybe_unused]]
auto srcBitWidth =
275 assert(dstBitWidth == srcBitWidth &&
276 "Unsupported bitcast between vectors of different sizes.");
279 B.CreateIntrinsic(Intrinsic::spv_bitcast, {DstType, SrcType}, {Src});
280 buildAssignType(
B, DstType, Src);
283 StoreInst *
SI =
B.CreateStore(Src, Dst);
284 SI->setAlignment(Alignment);
289 LoadInst *LI =
B.CreateLoad(DstType, Dst);
291 Value *OldValues = LI;
292 buildAssignType(
B, OldValues->
getType(), OldValues);
293 Value *NewValues = Src;
298 OldValues = makeInsertElement(
B, OldValues, Element,
I);
301 StoreInst *
SI =
B.CreateStore(OldValues, Dst);
302 SI->setAlignment(Alignment);
307 SmallVectorImpl<Value *> &Indices) {
310 if (Search == Aggregate)
314 buildGEPIndexChain(
B, Search,
ST->getTypeAtIndex(0u), Indices);
316 buildGEPIndexChain(
B, Search, AT->getElementType(), Indices);
318 buildGEPIndexChain(
B, Search, VT->getElementType(), Indices);
325 Type *DstPointeeType, Align Alignment) {
327 SmallVector<Value *, 8>
Args{
B.getInt1(
true), Dst};
328 buildGEPIndexChain(
B, Src->getType(), DstPointeeType, Args);
329 auto *
GEP =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
330 GR->buildAssignPtr(
B, Src->getType(),
GEP);
331 StoreInst *
SI =
B.CreateStore(Src,
GEP);
332 SI->setAlignment(Alignment);
336 bool isTypeFirstElementAggregate(
Type *Search,
Type *Aggregate) {
337 if (Search == Aggregate)
340 return isTypeFirstElementAggregate(Search,
ST->getTypeAtIndex(0u));
342 return isTypeFirstElementAggregate(Search, VT->getElementType());
344 return isTypeFirstElementAggregate(Search, AT->getElementType());
351 Value *Dst, Align Alignment) {
352 Type *ToTy = GR->findDeducedElementType(Dst);
353 Type *FromTy = Src->getType();
360 B.SetInsertPoint(BadStore);
361 if (D_ST && isTypeFirstElementAggregate(FromTy, D_ST))
362 storeToFirstValueAggregate(
B, Src, Dst, D_ST, Alignment);
363 else if (D_VT && S_VT)
364 storeVectorFromVector(
B, Src, Dst, Alignment);
365 else if (D_VT && !S_VT && FromTy == D_VT->getElementType())
366 storeToFirstValueAggregate(
B, Src, Dst, D_VT, Alignment);
367 else if (D_AT && S_VT && S_VT->getElementType() == D_AT->getElementType())
368 storeArrayFromVector(
B, Src, Dst, D_AT, Alignment);
372 DeadInstructions.push_back(BadStore);
375 void legalizePointerCast(IntrinsicInst *
II) {
377 Value *OriginalOperand =
II->getOperand(0);
380 std::vector<Value *>
Users;
381 for (Use &U :
II->uses())
382 Users.push_back(
U.getUser());
386 transformLoad(
B, LI, CastedOperand, OriginalOperand);
391 transformStore(
B, SI,
SI->getValueOperand(), OriginalOperand,
397 if (Intrin->getIntrinsicID() == Intrinsic::spv_assign_ptr_type) {
398 DeadInstructions.push_back(Intrin);
402 if (Intrin->getIntrinsicID() == Intrinsic::spv_gep) {
403 GR->replaceAllUsesWith(CastedOperand, OriginalOperand,
408 if (Intrin->getIntrinsicID() == Intrinsic::spv_store) {
411 Alignment =
Align(
C->getZExtValue());
412 transformStore(
B, Intrin, Intrin->getArgOperand(0), OriginalOperand,
421 DeadInstructions.push_back(
II);
425 SPIRVLegalizePointerCast(SPIRVTargetMachine *TM) : FunctionPass(ID), TM(TM) {}
428 const SPIRVSubtarget &
ST = TM->getSubtarget<SPIRVSubtarget>(
F);
429 GR =
ST.getSPIRVGlobalRegistry();
430 DeadInstructions.clear();
432 std::vector<IntrinsicInst *> WorkList;
436 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_ptrcast)
437 WorkList.push_back(
II);
441 for (IntrinsicInst *
II : WorkList)
442 legalizePointerCast(
II);
444 for (Instruction *
I : DeadInstructions)
445 I->eraseFromParent();
447 return DeadInstructions.size() != 0;
451 SPIRVTargetMachine *TM =
nullptr;
452 SPIRVGlobalRegistry *GR =
nullptr;
453 std::vector<Instruction *> DeadInstructions;
460char SPIRVLegalizePointerCast::ID = 0;
462 "SPIRV legalize bitcast pass",
false,
false)
465 return new SPIRVLegalizePointerCast(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
unsigned getNumElements() const
FunctionPass class - This class is used to implement most global optimizations.
void setAlignment(Align Align)
Type * getPointerOperandType() const
Align getAlign() const
Return the alignment of the access that is being performed.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
void push_back(const T &Elt)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getType() const
All values are typed, get the type of this value.
Type * getElementType() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
ElementType
The element type of an SRV or UAV resource.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
FunctionPass * createSPIRVLegalizePointerCastPass(SPIRVTargetMachine *TM)
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.