LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
29
30#include <cassert>
31#include <queue>
32#include <unordered_set>
33
34// This pass performs the following transformation on LLVM IR level required
35// for the following translation to SPIR-V:
36// - replaces direct usages of aggregate constants with target-specific
37// intrinsics;
38// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
39// with a target-specific intrinsics;
40// - emits intrinsics for the global variable initializers since IRTranslator
41// doesn't handle them and it's not very convenient to translate them
42// ourselves;
43// - emits intrinsics to keep track of the string names assigned to the values;
44// - emits intrinsics to keep track of constants (this is necessary to have an
45// LLVM IR constant after the IRTranslation is completed) for their further
46// deduplication;
47// - emits intrinsics to keep track of original LLVM types of the values
48// to be able to emit proper SPIR-V types eventually.
49//
50// TODO: consider removing spv.track.constant in favor of spv.assign.type.
51
52using namespace llvm;
53
54static cl::opt<bool>
55 SpirvEmitOpNames("spirv-emit-op-names",
56 cl::desc("Emit OpName for all instructions"),
57 cl::init(false));
58
59namespace llvm::SPIRV {
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
62} // namespace llvm::SPIRV
63
64namespace {
65
66class SPIRVEmitIntrinsics
67 : public ModulePass,
68 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
69 SPIRVTargetMachine *TM = nullptr;
70 SPIRVGlobalRegistry *GR = nullptr;
71 Function *CurrF = nullptr;
72 bool TrackConstants = true;
73 bool HaveFunPtrs = false;
74 DenseMap<Instruction *, Constant *> AggrConsts;
75 DenseMap<Instruction *, Type *> AggrConstTypes;
76 DenseSet<Instruction *> AggrStores;
77 std::unordered_set<Value *> Named;
78
79 // map of function declarations to <pointer arg index => element type>
80 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
81
82 // a register of Instructions that don't have a complete type definition
83 bool CanTodoType = true;
84 unsigned TodoTypeSz = 0;
85 DenseMap<Value *, bool> TodoType;
86 void insertTodoType(Value *Op) {
87 // TODO: add isa<CallInst>(Op) to no-insert
88 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
89 auto It = TodoType.try_emplace(Op, true);
90 if (It.second)
91 ++TodoTypeSz;
92 }
93 }
94 void eraseTodoType(Value *Op) {
95 auto It = TodoType.find(Op);
96 if (It != TodoType.end() && It->second) {
97 It->second = false;
98 --TodoTypeSz;
99 }
100 }
101 bool isTodoType(Value *Op) {
103 return false;
104 auto It = TodoType.find(Op);
105 return It != TodoType.end() && It->second;
106 }
107 // a register of Instructions that were visited by deduceOperandElementType()
108 // to validate operand types with an instruction
109 std::unordered_set<Instruction *> TypeValidated;
110
111 // well known result types of builtins
112 enum WellKnownTypes { Event };
113
114 // deduce element type of untyped pointers
115 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
116 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
117 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8,
119 bool IgnoreKnownType = false);
120 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
121 bool UnknownElemTypeI8);
122 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
123 std::unordered_set<Value *> &Visited,
124 bool UnknownElemTypeI8);
125 Type *deduceElementTypeByUsersDeep(Value *Op,
126 std::unordered_set<Value *> &Visited,
127 bool UnknownElemTypeI8);
128 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
129 bool UnknownElemTypeI8);
130
131 // deduce nested types of composites
132 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
133 Type *deduceNestedTypeHelper(User *U, Type *Ty,
134 std::unordered_set<Value *> &Visited,
135 bool UnknownElemTypeI8);
136
137 // deduce Types of operands of the Instruction if possible
138 void deduceOperandElementType(Instruction *I,
139 SmallPtrSet<Instruction *, 4> *IncompleteRets,
140 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
141 bool IsPostprocessing = false);
142
143 void preprocessCompositeConstants(IRBuilder<> &B);
144 void preprocessUndefs(IRBuilder<> &B);
145
146 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
147 bool IsPostprocessing);
148
149 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
150 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
151 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
152 bool UnknownElemTypeI8);
153 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
154 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
155 IRBuilder<> &B);
156 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
157 Type *ExpectedElementType,
158 unsigned OperandToReplace,
159 IRBuilder<> &B);
160 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
161 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
163 void insertConstantsForFPFastMathDefault(Module &M);
164 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
165 void processParamTypes(Function *F, IRBuilder<> &B);
166 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
167 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
168 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
169 std::unordered_set<Function *> &FVisited);
170
171 bool deduceOperandElementTypeCalledFunction(
172 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
173 Type *&KnownElemTy, bool &Incomplete);
174 void deduceOperandElementTypeFunctionPointer(
175 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
176 Type *&KnownElemTy, bool IsPostprocessing);
177 bool deduceOperandElementTypeFunctionRet(
178 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
179 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
180 Type *&KnownElemTy, Value *Op, Function *F);
181
182 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
183 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
184 DenseMap<Function *, CallInst *> Ptrcasts);
185 void propagateElemType(Value *Op, Type *ElemTy,
186 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
187 void
188 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
189 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
190 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
191 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
192 std::unordered_set<Value *> &Visited,
193 DenseMap<Function *, CallInst *> Ptrcasts);
194
195 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
196 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
197 Instruction *Dest, bool DeleteOld = true);
198
199 void applyDemangledPtrArgTypes(IRBuilder<> &B);
200
201 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
202
203 bool runOnFunction(Function &F);
204 bool postprocessTypes(Module &M);
205 bool processFunctionPointers(Module &M);
206 void parseFunDeclarations(Module &M);
207
208 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
209
210 // Tries to walk the type accessed by the given GEP instruction.
211 // For each nested type access, one of the 2 callbacks is called:
212 // - OnLiteralIndexing when the index is a known constant value.
213 // Parameters:
214 // PointedType: the pointed type resulting of this indexing.
215 // If the parent type is an array, this is the index in the array.
216 // If the parent type is a struct, this is the field index.
217 // Index: index of the element in the parent type.
218 // - OnDynamnicIndexing when the index is a non-constant value.
219 // This callback is only called when indexing into an array.
220 // Parameters:
221 // ElementType: the type of the elements stored in the parent array.
222 // Offset: the Value* containing the byte offset into the array.
223 // Return true if an error occured during the walk, false otherwise.
224 bool walkLogicalAccessChain(
225 GetElementPtrInst &GEP,
226 const std::function<void(Type *PointedType, uint64_t Index)>
227 &OnLiteralIndexing,
228 const std::function<void(Type *ElementType, Value *Offset)>
229 &OnDynamicIndexing);
230
231 // Returns the type accessed using the given GEP instruction by relying
232 // on the GEP type.
233 // FIXME: GEP types are not supposed to be used to retrieve the pointed
234 // type. This must be fixed.
235 Type *getGEPType(GetElementPtrInst *GEP);
236
237 // Returns the type accessed using the given GEP instruction by walking
238 // the source type using the GEP indices.
239 // FIXME: without help from the frontend, this method cannot reliably retrieve
240 // the stored type, nor can robustly determine the depth of the type
241 // we are accessing.
242 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
243
244 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
245
246public:
247 static char ID;
248 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
249 : ModulePass(ID), TM(TM) {}
250 Instruction *visitInstruction(Instruction &I) { return &I; }
251 Instruction *visitSwitchInst(SwitchInst &I);
252 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
253 Instruction *visitBitCastInst(BitCastInst &I);
254 Instruction *visitInsertElementInst(InsertElementInst &I);
255 Instruction *visitExtractElementInst(ExtractElementInst &I);
256 Instruction *visitInsertValueInst(InsertValueInst &I);
257 Instruction *visitExtractValueInst(ExtractValueInst &I);
258 Instruction *visitLoadInst(LoadInst &I);
259 Instruction *visitStoreInst(StoreInst &I);
260 Instruction *visitAllocaInst(AllocaInst &I);
261 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
262 Instruction *visitUnreachableInst(UnreachableInst &I);
263 Instruction *visitCallInst(CallInst &I);
264
265 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
266
267 bool runOnModule(Module &M) override;
268
269 void getAnalysisUsage(AnalysisUsage &AU) const override {
270 ModulePass::getAnalysisUsage(AU);
271 }
272};
273
274bool isConvergenceIntrinsic(const Instruction *I) {
275 const auto *II = dyn_cast<IntrinsicInst>(I);
276 if (!II)
277 return false;
278
279 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
280 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
281 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
282}
283
284bool expectIgnoredInIRTranslation(const Instruction *I) {
285 const auto *II = dyn_cast<IntrinsicInst>(I);
286 if (!II)
287 return false;
288 switch (II->getIntrinsicID()) {
289 case Intrinsic::invariant_start:
290 case Intrinsic::spv_resource_handlefrombinding:
291 case Intrinsic::spv_resource_getpointer:
292 return true;
293 default:
294 return false;
295 }
296}
297
298// Returns the source pointer from `I` ignoring intermediate ptrcast.
299Value *getPointerRoot(Value *I) {
300 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
301 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
302 Value *V = II->getArgOperand(0);
303 return getPointerRoot(V);
304 }
305 }
306 return I;
307}
308
309} // namespace
310
311char SPIRVEmitIntrinsics::ID = 0;
312
313INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
314 false, false)
315
316static inline bool isAssignTypeInstr(const Instruction *I) {
317 return isa<IntrinsicInst>(I) &&
318 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
319}
320
325
326static bool isAggrConstForceInt32(const Value *V) {
327 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
329 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
330}
331
333 if (isa<PHINode>(I))
334 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
335 else
336 B.SetInsertPoint(I);
337}
338
340 B.SetCurrentDebugLocation(I->getDebugLoc());
341 if (I->getType()->isVoidTy())
342 B.SetInsertPoint(I->getNextNode());
343 else
344 B.SetInsertPoint(*I->getInsertionPointAfterDef());
345}
346
348 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
349 switch (Intr->getIntrinsicID()) {
350 case Intrinsic::invariant_start:
351 case Intrinsic::invariant_end:
352 return false;
353 }
354 }
355 return true;
356}
357
358static inline void reportFatalOnTokenType(const Instruction *I) {
359 if (I->getType()->isTokenTy())
360 report_fatal_error("A token is encountered but SPIR-V without extensions "
361 "does not support token type",
362 false);
363}
364
366 if (!I->hasName() || I->getType()->isAggregateType() ||
367 expectIgnoredInIRTranslation(I))
368 return;
369
370 // We want to be conservative when adding the names because they can interfere
371 // with later optimizations.
372 bool KeepName = SpirvEmitOpNames;
373 if (!KeepName) {
374 if (isa<AllocaInst>(I)) {
375 KeepName = true;
376 } else if (auto *CI = dyn_cast<CallBase>(I)) {
377 Function *F = CI->getCalledFunction();
378 if (F && F->getName().starts_with("llvm.spv.alloca"))
379 KeepName = true;
380 }
381 }
382
383 if (!KeepName)
384 return;
385
388 LLVMContext &Ctx = I->getContext();
389 std::vector<Value *> Args = {
391 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
392 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
393}
394
395void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
396 bool DeleteOld) {
397 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
398 // Update uncomplete type records if any
399 if (isTodoType(Src)) {
400 if (DeleteOld)
401 eraseTodoType(Src);
402 insertTodoType(Dest);
403 }
404}
405
406void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
407 Instruction *Src,
408 Instruction *Dest,
409 bool DeleteOld) {
410 replaceAllUsesWith(Src, Dest, DeleteOld);
411 std::string Name = Src->hasName() ? Src->getName().str() : "";
412 Src->eraseFromParent();
413 if (!Name.empty()) {
414 Dest->setName(Name);
415 if (Named.insert(Dest).second)
416 emitAssignName(Dest, B);
417 }
418}
419
421 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
422 isPointerTy(SI->getValueOperand()->getType()) &&
423 isa<Argument>(SI->getValueOperand());
424}
425
426// Maybe restore original function return type.
428 Type *Ty) {
430 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
432 return Ty;
433 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
434 return OriginalTy;
435 return Ty;
436}
437
438// Reconstruct type with nested element types according to deduced type info.
439// Return nullptr if no detailed type info is available.
440Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
441 bool IsPostprocessing) {
442 Type *Ty = Op->getType();
443 if (auto *OpI = dyn_cast<Instruction>(Op))
444 Ty = restoreMutatedType(GR, OpI, Ty);
445 if (!isUntypedPointerTy(Ty))
446 return Ty;
447 // try to find the pointee type
448 if (Type *NestedTy = GR->findDeducedElementType(Op))
450 // not a pointer according to the type info (e.g., Event object)
451 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
452 if (CI) {
453 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
454 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
455 }
456 if (UnknownElemTypeI8) {
457 if (!IsPostprocessing)
458 insertTodoType(Op);
459 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
461 }
462 return nullptr;
463}
464
465CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
466 Type *ElemTy) {
467 IRBuilder<> B(Op->getContext());
468 if (auto *OpI = dyn_cast<Instruction>(Op)) {
469 // spv_ptrcast's argument Op denotes an instruction that generates
470 // a value, and we may use getInsertionPointAfterDef()
472 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
473 B.SetInsertPointPastAllocas(OpA->getParent());
474 B.SetCurrentDebugLocation(DebugLoc());
475 } else {
476 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
477 }
478 Type *OpTy = Op->getType();
479 SmallVector<Type *, 2> Types = {OpTy, OpTy};
480 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
481 B.getInt32(getPointerAddressSpace(OpTy))};
482 CallInst *PtrCasted =
483 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
484 GR->buildAssignPtr(B, ElemTy, PtrCasted);
485 return PtrCasted;
486}
487
488void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
489 Value *Op, Type *ElemTy, Instruction *I,
490 DenseMap<Function *, CallInst *> Ptrcasts) {
491 Function *F = I->getParent()->getParent();
492 CallInst *PtrCastedI = nullptr;
493 auto It = Ptrcasts.find(F);
494 if (It == Ptrcasts.end()) {
495 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
496 Ptrcasts[F] = PtrCastedI;
497 } else {
498 PtrCastedI = It->second;
499 }
500 I->replaceUsesOfWith(Op, PtrCastedI);
501}
502
503void SPIRVEmitIntrinsics::propagateElemType(
504 Value *Op, Type *ElemTy,
505 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
506 DenseMap<Function *, CallInst *> Ptrcasts;
507 SmallVector<User *> Users(Op->users());
508 for (auto *U : Users) {
509 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
510 continue;
511 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
512 continue;
514 // If the instruction was validated already, we need to keep it valid by
515 // keeping current Op type.
516 if (isa<GetElementPtrInst>(UI) ||
517 TypeValidated.find(UI) != TypeValidated.end())
518 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
519 }
520}
521
522void SPIRVEmitIntrinsics::propagateElemTypeRec(
523 Value *Op, Type *PtrElemTy, Type *CastElemTy,
524 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
525 std::unordered_set<Value *> Visited;
526 DenseMap<Function *, CallInst *> Ptrcasts;
527 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
528 std::move(Ptrcasts));
529}
530
531void SPIRVEmitIntrinsics::propagateElemTypeRec(
532 Value *Op, Type *PtrElemTy, Type *CastElemTy,
533 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
534 std::unordered_set<Value *> &Visited,
535 DenseMap<Function *, CallInst *> Ptrcasts) {
536 if (!Visited.insert(Op).second)
537 return;
538 SmallVector<User *> Users(Op->users());
539 for (auto *U : Users) {
540 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
541 continue;
542 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
543 continue;
545 // If the instruction was validated already, we need to keep it valid by
546 // keeping current Op type.
547 if (isa<GetElementPtrInst>(UI) ||
548 TypeValidated.find(UI) != TypeValidated.end())
549 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
550 }
551}
552
553// Set element pointer type to the given value of ValueTy and tries to
554// specify this type further (recursively) by Operand value, if needed.
555
556Type *
557SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
558 bool UnknownElemTypeI8) {
559 std::unordered_set<Value *> Visited;
560 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
561 UnknownElemTypeI8);
562}
563
564Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
565 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
566 bool UnknownElemTypeI8) {
567 Type *Ty = ValueTy;
568 if (Operand) {
569 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
570 if (Type *NestedTy =
571 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
572 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
573 } else {
574 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
575 UnknownElemTypeI8);
576 }
577 }
578 return Ty;
579}
580
581// Traverse User instructions to deduce an element pointer type of the operand.
582Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
583 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
584 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
586 return nullptr;
587
588 if (auto ElemTy = getPointeeType(Op->getType()))
589 return ElemTy;
590
591 // maybe we already know operand's element type
592 if (Type *KnownTy = GR->findDeducedElementType(Op))
593 return KnownTy;
594
595 for (User *OpU : Op->users()) {
596 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
597 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
598 return Ty;
599 }
600 }
601 return nullptr;
602}
603
604// Implements what we know in advance about intrinsics and builtin calls
605// TODO: consider feasibility of this particular case to be generalized by
606// encoding knowledge about intrinsics and builtin calls by corresponding
607// specification rules
609 Function *CalledF, unsigned OpIdx) {
610 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
611 DemangledName.starts_with("printf(")) &&
612 OpIdx == 0)
613 return IntegerType::getInt8Ty(CalledF->getContext());
614 return nullptr;
615}
616
617// Deduce and return a successfully deduced Type of the Instruction,
618// or nullptr otherwise.
619Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
620 bool UnknownElemTypeI8) {
621 std::unordered_set<Value *> Visited;
622 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
623}
624
625void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
626 bool UnknownElemTypeI8) {
627 if (isUntypedPointerTy(RefTy)) {
628 if (!UnknownElemTypeI8)
629 return;
630 insertTodoType(Op);
631 }
632 Ty = RefTy;
633}
634
635bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
636 GetElementPtrInst &GEP,
637 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
638 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
639 // We only rewrite i8* GEP. Other should be left as-is.
640 // Valid i8* GEP must always have a single index.
641 assert(GEP.getSourceElementType() ==
642 IntegerType::getInt8Ty(CurrF->getContext()));
643 assert(GEP.getNumIndices() == 1);
644
645 auto &DL = CurrF->getDataLayout();
646 Value *Src = getPointerRoot(GEP.getPointerOperand());
647 Type *CurType = deduceElementType(Src, true);
648
649 Value *Operand = *GEP.idx_begin();
650 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
651 if (!CI) {
652 ArrayType *AT = dyn_cast<ArrayType>(CurType);
653 // Operand is not constant. Either we have an array and accept it, or we
654 // give up.
655 if (AT)
656 OnDynamicIndexing(AT->getElementType(), Operand);
657 return AT == nullptr;
658 }
659
660 assert(CI);
661 uint64_t Offset = CI->getZExtValue();
662
663 do {
664 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
665 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
666 assert(Offset < AT->getNumElements() * EltTypeSize);
667 uint64_t Index = Offset / EltTypeSize;
668 Offset = Offset - (Index * EltTypeSize);
669 CurType = AT->getElementType();
670 OnLiteralIndexing(CurType, Index);
671 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
672 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
673 assert(Offset < StructSize);
674 (void)StructSize;
675 const auto &STL = DL.getStructLayout(ST);
676 unsigned Element = STL->getElementContainingOffset(Offset);
677 Offset -= STL->getElementOffset(Element);
678 CurType = ST->getElementType(Element);
679 OnLiteralIndexing(CurType, Element);
680 } else if (auto *VT = dyn_cast<FixedVectorType>(CurType)) {
681 Type *EltTy = VT->getElementType();
682 TypeSize EltSizeBits = DL.getTypeSizeInBits(EltTy);
683 assert(EltSizeBits % 8 == 0 &&
684 "Element type size in bits must be a multiple of 8.");
685 uint32_t EltTypeSize = EltSizeBits / 8;
686 assert(Offset < VT->getNumElements() * EltTypeSize);
687 uint64_t Index = Offset / EltTypeSize;
688 Offset -= Index * EltTypeSize;
689 CurType = EltTy;
690 OnLiteralIndexing(CurType, Index);
691
692 } else {
693 // Unknown composite kind; give up.
694 return true;
695 }
696 } while (Offset > 0);
697
698 return false;
699}
700
702SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
703 auto &DL = CurrF->getDataLayout();
704 IRBuilder<> B(GEP.getParent());
705 B.SetInsertPoint(&GEP);
706
707 std::vector<Value *> Indices;
708 Indices.push_back(ConstantInt::get(
709 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
710 walkLogicalAccessChain(
711 GEP,
712 [&Indices, &B](Type *EltType, uint64_t Index) {
713 Indices.push_back(
714 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
715 },
716 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
717 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
718 Value *Index = B.CreateUDiv(
719 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
720 /* Signed= */ false));
721 Indices.push_back(Index);
722 });
723
724 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
726 Args.push_back(B.getInt1(GEP.isInBounds()));
727 Args.push_back(GEP.getOperand(0));
728 llvm::append_range(Args, Indices);
729 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
730 replaceAllUsesWithAndErase(B, &GEP, NewI);
731 return NewI;
732}
733
734Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
735
736 Type *CurType = GEP->getResultElementType();
737
738 bool Interrupted = walkLogicalAccessChain(
739 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
740 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
741
742 return Interrupted ? GEP->getResultElementType() : CurType;
743}
744
745Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
746 if (Ref->getSourceElementType() ==
747 IntegerType::getInt8Ty(CurrF->getContext()) &&
749 return getGEPTypeLogical(Ref);
750 }
751
752 Type *Ty = nullptr;
753 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
754 // useful here
755 if (isNestedPointer(Ref->getSourceElementType())) {
756 Ty = Ref->getSourceElementType();
757 for (Use &U : drop_begin(Ref->indices()))
758 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
759 } else {
760 Ty = Ref->getResultElementType();
761 }
762 return Ty;
763}
764
765Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
766 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
767 bool IgnoreKnownType) {
768 // allow to pass nullptr as an argument
769 if (!I)
770 return nullptr;
771
772 // maybe already known
773 if (!IgnoreKnownType)
774 if (Type *KnownTy = GR->findDeducedElementType(I))
775 return KnownTy;
776
777 // maybe a cycle
778 if (!Visited.insert(I).second)
779 return nullptr;
780
781 // fallback value in case when we fail to deduce a type
782 Type *Ty = nullptr;
783 // look for known basic patterns of type inference
784 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
785 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
786 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
787 Ty = getGEPType(Ref);
788 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
789 Value *Op = Ref->getPointerOperand();
790 Type *KnownTy = GR->findDeducedElementType(Op);
791 if (!KnownTy)
792 KnownTy = Op->getType();
793 if (Type *ElemTy = getPointeeType(KnownTy))
794 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
795 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
796 if (auto *Fn = dyn_cast<Function>(Ref)) {
797 Ty = SPIRV::getOriginalFunctionType(*Fn);
798 GR->addDeducedElementType(I, Ty);
799 } else {
800 Ty = deduceElementTypeByValueDeep(
801 Ref->getValueType(),
802 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
803 UnknownElemTypeI8);
804 }
805 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
806 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
807 UnknownElemTypeI8);
808 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
809 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
810 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
811 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
812 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
813 isPointerTy(Src) && isPointerTy(Dest))
814 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
815 UnknownElemTypeI8);
816 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
817 Value *Op = Ref->getNewValOperand();
818 if (isPointerTy(Op->getType()))
819 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
820 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
821 Value *Op = Ref->getValOperand();
822 if (isPointerTy(Op->getType()))
823 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
824 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
825 Type *BestTy = nullptr;
826 unsigned MaxN = 1;
827 DenseMap<Type *, unsigned> PhiTys;
828 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
829 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
830 UnknownElemTypeI8);
831 if (!Ty)
832 continue;
833 auto It = PhiTys.try_emplace(Ty, 1);
834 if (!It.second) {
835 ++It.first->second;
836 if (It.first->second > MaxN) {
837 MaxN = It.first->second;
838 BestTy = Ty;
839 }
840 }
841 }
842 if (BestTy)
843 Ty = BestTy;
844 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
845 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
846 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
847 if (Ty)
848 break;
849 }
850 } else if (auto *CI = dyn_cast<CallInst>(I)) {
851 static StringMap<unsigned> ResTypeByArg = {
852 {"to_global", 0},
853 {"to_local", 0},
854 {"to_private", 0},
855 {"__spirv_GenericCastToPtr_ToGlobal", 0},
856 {"__spirv_GenericCastToPtr_ToLocal", 0},
857 {"__spirv_GenericCastToPtr_ToPrivate", 0},
858 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
859 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
860 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
861 // TODO: maybe improve performance by caching demangled names
862
864 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
865 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
866 if (HandleType->getTargetExtName() == "spirv.Image" ||
867 HandleType->getTargetExtName() == "spirv.SignedImage") {
868 for (User *U : II->users()) {
869 Ty = cast<Instruction>(U)->getAccessType();
870 if (Ty)
871 break;
872 }
873 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
874 // This call is supposed to index into an array
875 Ty = HandleType->getTypeParameter(0);
876 if (Ty->isArrayTy())
877 Ty = Ty->getArrayElementType();
878 else {
879 assert(Ty && Ty->isStructTy());
880 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
881 Ty = cast<StructType>(Ty)->getElementType(Index);
882 }
884 } else {
885 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
886 }
887 } else if (II && II->getIntrinsicID() ==
888 Intrinsic::spv_generic_cast_to_ptr_explicit) {
889 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
890 UnknownElemTypeI8);
891 } else if (Function *CalledF = CI->getCalledFunction()) {
892 std::string DemangledName =
893 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
894 if (DemangledName.length() > 0)
895 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
896 auto AsArgIt = ResTypeByArg.find(DemangledName);
897 if (AsArgIt != ResTypeByArg.end())
898 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
899 Visited, UnknownElemTypeI8);
900 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
901 Ty = KnownRetTy;
902 }
903 }
904
905 // remember the found relationship
906 if (Ty && !IgnoreKnownType) {
907 // specify nested types if needed, otherwise return unchanged
909 }
910
911 return Ty;
912}
913
914// Re-create a type of the value if it has untyped pointer fields, also nested.
915// Return the original value type if no corrections of untyped pointer
916// information is found or needed.
917Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
918 bool UnknownElemTypeI8) {
919 std::unordered_set<Value *> Visited;
920 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
921}
922
923Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
924 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
925 bool UnknownElemTypeI8) {
926 if (!U)
927 return OrigTy;
928
929 // maybe already known
930 if (Type *KnownTy = GR->findDeducedCompositeType(U))
931 return KnownTy;
932
933 // maybe a cycle
934 if (!Visited.insert(U).second)
935 return OrigTy;
936
937 if (isa<StructType>(OrigTy)) {
939 bool Change = false;
940 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
941 Value *Op = U->getOperand(i);
942 assert(Op && "Operands should not be null.");
943 Type *OpTy = Op->getType();
944 Type *Ty = OpTy;
945 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
946 if (Type *NestedTy =
947 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
948 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
949 } else {
950 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
951 UnknownElemTypeI8);
952 }
953 Tys.push_back(Ty);
954 Change |= Ty != OpTy;
955 }
956 if (Change) {
957 Type *NewTy = StructType::create(Tys);
958 GR->addDeducedCompositeType(U, NewTy);
959 return NewTy;
960 }
961 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
962 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
963 Type *OpTy = ArrTy->getElementType();
964 Type *Ty = OpTy;
965 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
966 if (Type *NestedTy =
967 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
968 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
969 } else {
970 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
971 UnknownElemTypeI8);
972 }
973 if (Ty != OpTy) {
974 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
975 GR->addDeducedCompositeType(U, NewTy);
976 return NewTy;
977 }
978 }
979 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
980 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
981 Type *OpTy = VecTy->getElementType();
982 Type *Ty = OpTy;
983 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
984 if (Type *NestedTy =
985 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
986 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
987 } else {
988 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
989 UnknownElemTypeI8);
990 }
991 if (Ty != OpTy) {
992 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
994 return NewTy;
995 }
996 }
997 }
998
999 return OrigTy;
1000}
1001
1002Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
1003 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
1004 return Ty;
1005 if (!UnknownElemTypeI8)
1006 return nullptr;
1007 insertTodoType(I);
1008 return IntegerType::getInt8Ty(I->getContext());
1009}
1010
1012 Value *PointerOperand) {
1013 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
1014 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1015 return nullptr;
1016 auto *PtrTy = dyn_cast<PointerType>(I->getType());
1017 if (!PtrTy)
1018 return I->getType();
1019 if (Type *NestedTy = GR->findDeducedElementType(I))
1020 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1021 return nullptr;
1022}
1023
1024// Try to deduce element type for a call base. Returns false if this is an
1025// indirect function invocation, and true otherwise.
1026bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1027 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1028 Type *&KnownElemTy, bool &Incomplete) {
1029 Function *CalledF = CI->getCalledFunction();
1030 if (!CalledF)
1031 return false;
1032 std::string DemangledName =
1034 if (DemangledName.length() > 0 &&
1035 !StringRef(DemangledName).starts_with("llvm.")) {
1036 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1037 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1038 DemangledName, ST.getPreferredInstructionSet());
1039 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1040 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1041 Value *Op = CI->getArgOperand(i);
1042 if (!isPointerTy(Op->getType()))
1043 continue;
1044 ++PtrCnt;
1045 if (Type *ElemTy = GR->findDeducedElementType(Op))
1046 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1047 Ops.push_back(std::make_pair(Op, i));
1048 }
1049 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1050 if (CI->arg_size() == 0)
1051 return true;
1052 Value *Op = CI->getArgOperand(0);
1053 if (!isPointerTy(Op->getType()))
1054 return true;
1055 switch (Opcode) {
1056 case SPIRV::OpAtomicFAddEXT:
1057 case SPIRV::OpAtomicFMinEXT:
1058 case SPIRV::OpAtomicFMaxEXT:
1059 case SPIRV::OpAtomicLoad:
1060 case SPIRV::OpAtomicCompareExchangeWeak:
1061 case SPIRV::OpAtomicCompareExchange:
1062 case SPIRV::OpAtomicExchange:
1063 case SPIRV::OpAtomicIAdd:
1064 case SPIRV::OpAtomicISub:
1065 case SPIRV::OpAtomicOr:
1066 case SPIRV::OpAtomicXor:
1067 case SPIRV::OpAtomicAnd:
1068 case SPIRV::OpAtomicUMin:
1069 case SPIRV::OpAtomicUMax:
1070 case SPIRV::OpAtomicSMin:
1071 case SPIRV::OpAtomicSMax: {
1072 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1073 : CI->getType();
1074 if (!KnownElemTy)
1075 return true;
1076 Incomplete = isTodoType(Op);
1077 Ops.push_back(std::make_pair(Op, 0));
1078 } break;
1079 case SPIRV::OpAtomicStore: {
1080 if (CI->arg_size() < 4)
1081 return true;
1082 Value *ValOp = CI->getArgOperand(3);
1083 KnownElemTy = isPointerTy(ValOp->getType())
1084 ? getAtomicElemTy(GR, CI, Op)
1085 : ValOp->getType();
1086 if (!KnownElemTy)
1087 return true;
1088 Incomplete = isTodoType(Op);
1089 Ops.push_back(std::make_pair(Op, 0));
1090 } break;
1091 }
1092 }
1093 }
1094 return true;
1095}
1096
1097// Try to deduce element type for a function pointer.
1098void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1099 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1100 Type *&KnownElemTy, bool IsPostprocessing) {
1101 Value *Op = CI->getCalledOperand();
1102 if (!Op || !isPointerTy(Op->getType()))
1103 return;
1104 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1105 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1106 bool IsNewFTy = false, IsIncomplete = false;
1108 for (auto &&[ParmIdx, Arg] : llvm::enumerate(CI->args())) {
1109 Type *ArgTy = Arg->getType();
1110 if (ArgTy->isPointerTy()) {
1111 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1112 IsNewFTy = true;
1113 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1114 if (isTodoType(Arg))
1115 IsIncomplete = true;
1116 } else {
1117 IsIncomplete = true;
1118 }
1119 } else {
1120 ArgTy = FTy->getFunctionParamType(ParmIdx);
1121 }
1122 ArgTys.push_back(ArgTy);
1123 }
1124 Type *RetTy = FTy->getReturnType();
1125 if (CI->getType()->isPointerTy()) {
1126 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1127 IsNewFTy = true;
1128 RetTy =
1130 if (isTodoType(CI))
1131 IsIncomplete = true;
1132 } else {
1133 IsIncomplete = true;
1134 }
1135 }
1136 if (!IsPostprocessing && IsIncomplete)
1137 insertTodoType(Op);
1138 KnownElemTy =
1139 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1140}
1141
1142bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1143 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1144 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1145 Type *&KnownElemTy, Value *Op, Function *F) {
1146 KnownElemTy = GR->findDeducedElementType(F);
1147 if (KnownElemTy)
1148 return false;
1149 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1150 OpElemTy = normalizeType(OpElemTy);
1151 GR->addDeducedElementType(F, OpElemTy);
1152 GR->addReturnType(
1153 F, TypedPointerType::get(OpElemTy,
1154 getPointerAddressSpace(F->getReturnType())));
1155 // non-recursive update of types in function uses
1156 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1157 for (User *U : F->users()) {
1158 CallInst *CI = dyn_cast<CallInst>(U);
1159 if (!CI || CI->getCalledFunction() != F)
1160 continue;
1161 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1162 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1163 GR->updateAssignType(AssignCI, CI,
1164 getNormalizedPoisonValue(OpElemTy));
1165 propagateElemType(CI, PrevElemTy, VisitedSubst);
1166 }
1167 }
1168 }
1169 // Non-recursive update of types in the function uncomplete returns.
1170 // This may happen just once per a function, the latch is a pair of
1171 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1172 // With or without the latch it is a non-recursive call due to
1173 // IncompleteRets set to nullptr in this call.
1174 if (IncompleteRets)
1175 for (Instruction *IncompleteRetI : *IncompleteRets)
1176 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1177 IsPostprocessing);
1178 } else if (IncompleteRets) {
1179 IncompleteRets->insert(I);
1180 }
1181 TypeValidated.insert(I);
1182 return true;
1183}
1184
1185// If the Instruction has Pointer operands with unresolved types, this function
1186// tries to deduce them. If the Instruction has Pointer operands with known
1187// types which differ from expected, this function tries to insert a bitcast to
1188// resolve the issue.
1189void SPIRVEmitIntrinsics::deduceOperandElementType(
1190 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1191 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1193 Type *KnownElemTy = nullptr;
1194 bool Incomplete = false;
1195 // look for known basic patterns of type inference
1196 if (auto *Ref = dyn_cast<PHINode>(I)) {
1197 if (!isPointerTy(I->getType()) ||
1198 !(KnownElemTy = GR->findDeducedElementType(I)))
1199 return;
1200 Incomplete = isTodoType(I);
1201 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1202 Value *Op = Ref->getIncomingValue(i);
1203 if (isPointerTy(Op->getType()))
1204 Ops.push_back(std::make_pair(Op, i));
1205 }
1206 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1207 KnownElemTy = GR->findDeducedElementType(I);
1208 if (!KnownElemTy)
1209 return;
1210 Incomplete = isTodoType(I);
1211 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1212 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1213 if (!isPointerTy(I->getType()))
1214 return;
1215 KnownElemTy = GR->findDeducedElementType(I);
1216 if (!KnownElemTy)
1217 return;
1218 Incomplete = isTodoType(I);
1219 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1220 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1221 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1222 return;
1223 KnownElemTy = Ref->getSourceElementType();
1224 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1226 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1227 KnownElemTy = I->getType();
1228 if (isUntypedPointerTy(KnownElemTy))
1229 return;
1230 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1231 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1232 return;
1233 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1235 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1236 if (!(KnownElemTy =
1237 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1238 return;
1239 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1240 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1241 return;
1242 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1244 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1245 KnownElemTy = isPointerTy(I->getType())
1246 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1247 : I->getType();
1248 if (!KnownElemTy)
1249 return;
1250 Incomplete = isTodoType(Ref->getPointerOperand());
1251 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1253 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1254 KnownElemTy = isPointerTy(I->getType())
1255 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1256 : I->getType();
1257 if (!KnownElemTy)
1258 return;
1259 Incomplete = isTodoType(Ref->getPointerOperand());
1260 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1262 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1263 if (!isPointerTy(I->getType()) ||
1264 !(KnownElemTy = GR->findDeducedElementType(I)))
1265 return;
1266 Incomplete = isTodoType(I);
1267 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1268 Value *Op = Ref->getOperand(i);
1269 if (isPointerTy(Op->getType()))
1270 Ops.push_back(std::make_pair(Op, i));
1271 }
1272 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1273 if (!isPointerTy(CurrF->getReturnType()))
1274 return;
1275 Value *Op = Ref->getReturnValue();
1276 if (!Op)
1277 return;
1278 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1279 IsPostprocessing, KnownElemTy, Op,
1280 CurrF))
1281 return;
1282 Incomplete = isTodoType(CurrF);
1283 Ops.push_back(std::make_pair(Op, 0));
1284 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1285 if (!isPointerTy(Ref->getOperand(0)->getType()))
1286 return;
1287 Value *Op0 = Ref->getOperand(0);
1288 Value *Op1 = Ref->getOperand(1);
1289 bool Incomplete0 = isTodoType(Op0);
1290 bool Incomplete1 = isTodoType(Op1);
1291 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1292 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1293 ? nullptr
1294 : GR->findDeducedElementType(Op0);
1295 if (ElemTy0) {
1296 KnownElemTy = ElemTy0;
1297 Incomplete = Incomplete0;
1298 Ops.push_back(std::make_pair(Op1, 1));
1299 } else if (ElemTy1) {
1300 KnownElemTy = ElemTy1;
1301 Incomplete = Incomplete1;
1302 Ops.push_back(std::make_pair(Op0, 0));
1303 }
1304 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1305 if (!CI->isIndirectCall())
1306 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1307 else if (HaveFunPtrs)
1308 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1309 IsPostprocessing);
1310 }
1311
1312 // There is no enough info to deduce types or all is valid.
1313 if (!KnownElemTy || Ops.size() == 0)
1314 return;
1315
1316 LLVMContext &Ctx = CurrF->getContext();
1317 IRBuilder<> B(Ctx);
1318 for (auto &OpIt : Ops) {
1319 Value *Op = OpIt.first;
1320 if (AskOps && !AskOps->contains(Op))
1321 continue;
1322 Type *AskTy = nullptr;
1323 CallInst *AskCI = nullptr;
1324 if (IsPostprocessing && AskOps) {
1325 AskTy = GR->findDeducedElementType(Op);
1326 AskCI = GR->findAssignPtrTypeInstr(Op);
1327 assert(AskTy && AskCI);
1328 }
1329 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1330 if (Ty == KnownElemTy)
1331 continue;
1332 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1333 Type *OpTy = Op->getType();
1334 if (Op->hasUseList() &&
1335 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1336 Type *PrevElemTy = GR->findDeducedElementType(Op);
1337 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1338 // check if KnownElemTy is complete
1339 if (!Incomplete)
1340 eraseTodoType(Op);
1341 else if (!IsPostprocessing)
1342 insertTodoType(Op);
1343 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1344 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1345 if (AssignCI == nullptr) {
1346 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1347 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1348 CallInst *CI =
1349 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1350 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1351 GR->addAssignPtrTypeInstr(Op, CI);
1352 } else {
1353 GR->updateAssignType(AssignCI, Op, OpTyVal);
1354 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1355 std::make_pair(I, Op)};
1356 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1357 }
1358 } else {
1359 eraseTodoType(Op);
1360 CallInst *PtrCastI =
1361 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1362 if (OpIt.second == std::numeric_limits<unsigned>::max())
1363 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1364 else
1365 I->setOperand(OpIt.second, PtrCastI);
1366 }
1367 }
1368 TypeValidated.insert(I);
1369}
1370
1371void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1372 Instruction *New,
1373 IRBuilder<> &B) {
1374 while (!Old->user_empty()) {
1375 auto *U = Old->user_back();
1376 if (isAssignTypeInstr(U)) {
1377 B.SetInsertPoint(U);
1378 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1379 CallInst *AssignCI =
1380 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1381 GR->addAssignPtrTypeInstr(New, AssignCI);
1382 U->eraseFromParent();
1383 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1384 isa<CallInst>(U)) {
1385 U->replaceUsesOfWith(Old, New);
1386 } else {
1387 llvm_unreachable("illegal aggregate intrinsic user");
1388 }
1389 }
1390 New->copyMetadata(*Old);
1391 Old->eraseFromParent();
1392}
1393
1394void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1395 std::queue<Instruction *> Worklist;
1396 for (auto &I : instructions(CurrF))
1397 Worklist.push(&I);
1398
1399 while (!Worklist.empty()) {
1400 Instruction *I = Worklist.front();
1401 bool BPrepared = false;
1402 Worklist.pop();
1403
1404 for (auto &Op : I->operands()) {
1405 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1406 if (!AggrUndef || !Op->getType()->isAggregateType())
1407 continue;
1408
1409 if (!BPrepared) {
1411 BPrepared = true;
1412 }
1413 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1414 Worklist.push(IntrUndef);
1415 I->replaceUsesOfWith(Op, IntrUndef);
1416 AggrConsts[IntrUndef] = AggrUndef;
1417 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1418 }
1419 }
1420}
1421
1422void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1423 std::queue<Instruction *> Worklist;
1424 for (auto &I : instructions(CurrF))
1425 Worklist.push(&I);
1426
1427 while (!Worklist.empty()) {
1428 auto *I = Worklist.front();
1429 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1430 assert(I);
1431 bool KeepInst = false;
1432 for (const auto &Op : I->operands()) {
1433 Constant *AggrConst = nullptr;
1434 Type *ResTy = nullptr;
1435 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1436 AggrConst = COp;
1437 ResTy = COp->getType();
1438 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1439 AggrConst = COp;
1440 ResTy = B.getInt32Ty();
1441 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1442 AggrConst = COp;
1443 ResTy = B.getInt32Ty();
1444 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1445 AggrConst = COp;
1446 ResTy = B.getInt32Ty();
1447 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1448 AggrConst = COp;
1449 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1450 }
1451 if (AggrConst) {
1453 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1454 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1455 Args.push_back(COp->getElementAsConstant(i));
1456 else
1457 llvm::append_range(Args, AggrConst->operands());
1458 if (!BPrepared) {
1459 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1460 : B.SetInsertPoint(I);
1461 BPrepared = true;
1462 }
1463 auto *CI =
1464 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1465 Worklist.push(CI);
1466 I->replaceUsesOfWith(Op, CI);
1467 KeepInst = true;
1468 AggrConsts[CI] = AggrConst;
1469 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1470 }
1471 }
1472 if (!KeepInst)
1473 Worklist.pop();
1474 }
1475}
1476
1478 IRBuilder<> &B) {
1479 LLVMContext &Ctx = I->getContext();
1481 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1482 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1483}
1484
1486 unsigned RoundingModeDeco,
1487 IRBuilder<> &B) {
1488 LLVMContext &Ctx = I->getContext();
1490 MDNode *RoundingModeNode = MDNode::get(
1491 Ctx,
1493 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1494 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1495 createDecorationIntrinsic(I, RoundingModeNode, B);
1496}
1497
1499 IRBuilder<> &B) {
1500 LLVMContext &Ctx = I->getContext();
1502 MDNode *SaturatedConversionNode =
1503 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1504 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1505 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1506}
1507
1509 if (auto *CI = dyn_cast<CallInst>(I)) {
1510 if (Function *Fu = CI->getCalledFunction()) {
1511 if (Fu->isIntrinsic()) {
1512 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1513 switch (IntrinsicId) {
1514 case Intrinsic::fptosi_sat:
1515 case Intrinsic::fptoui_sat:
1517 break;
1518 default:
1519 break;
1520 }
1521 }
1522 }
1523 }
1524}
1525
1526Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1527 if (!Call.isInlineAsm())
1528 return &Call;
1529
1530 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1531 LLVMContext &Ctx = CurrF->getContext();
1532
1533 Constant *TyC = UndefValue::get(IA->getFunctionType());
1534 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1536 buildMD(TyC),
1537 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1538 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1539 Args.push_back(Call.getArgOperand(OpIdx));
1540
1542 B.SetInsertPoint(&Call);
1543 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1544 return &Call;
1545}
1546
1547// Use a tip about rounding mode to create a decoration.
1548void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1549 IRBuilder<> &B) {
1550 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1551 if (!RM.has_value())
1552 return;
1553 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1554 switch (RM.value()) {
1555 default:
1556 // ignore unknown rounding modes
1557 break;
1558 case RoundingMode::NearestTiesToEven:
1559 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1560 break;
1561 case RoundingMode::TowardNegative:
1562 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1563 break;
1564 case RoundingMode::TowardPositive:
1565 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1566 break;
1567 case RoundingMode::TowardZero:
1568 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1569 break;
1570 case RoundingMode::Dynamic:
1571 case RoundingMode::NearestTiesToAway:
1572 // TODO: check if supported
1573 break;
1574 }
1575 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1576 return;
1577 // Convert the tip about rounding mode into a decoration record.
1578 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1579}
1580
1581Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1582 BasicBlock *ParentBB = I.getParent();
1583 Function *F = ParentBB->getParent();
1584 IRBuilder<> B(ParentBB);
1585 B.SetInsertPoint(&I);
1588 Args.push_back(I.getCondition());
1589 BBCases.push_back(I.getDefaultDest());
1590 Args.push_back(BlockAddress::get(F, I.getDefaultDest()));
1591 for (auto &Case : I.cases()) {
1592 Args.push_back(Case.getCaseValue());
1593 BBCases.push_back(Case.getCaseSuccessor());
1594 Args.push_back(BlockAddress::get(F, Case.getCaseSuccessor()));
1595 }
1596 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1597 {I.getOperand(0)->getType()}, {Args});
1598 // remove switch to avoid its unneeded and undesirable unwrap into branches
1599 // and conditions
1600 replaceAllUsesWith(&I, NewI);
1601 I.eraseFromParent();
1602 // insert artificial and temporary instruction to preserve valid CFG,
1603 // it will be removed after IR translation pass
1604 B.SetInsertPoint(ParentBB);
1605 IndirectBrInst *BrI = B.CreateIndirectBr(
1606 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1607 BBCases.size());
1608 for (BasicBlock *BBCase : BBCases)
1609 BrI->addDestination(BBCase);
1610 return BrI;
1611}
1612
1614 if (GEP->getNumIndices() == 0)
1615 return false;
1616 if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1617 return CI->getZExtValue() == 0;
1618 }
1619 return false;
1620}
1621
1622Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1623 IRBuilder<> B(I.getParent());
1624 B.SetInsertPoint(&I);
1625
1627 // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
1628 // index of the GEP is not 0, then we need to try to adjust it.
1629 //
1630 // If the GEP is doing byte addressing, try to rebuild the full access chain
1631 // from the type of the pointer.
1632 if (I.getSourceElementType() ==
1633 IntegerType::getInt8Ty(CurrF->getContext())) {
1634 return buildLogicalAccessChainFromGEP(I);
1635 }
1636
1637 // Look for the array-to-pointer decay. If this is the pattern
1638 // we can adjust the types, and prepend a 0 to the indices.
1639 Value *PtrOp = I.getPointerOperand();
1640 Type *SrcElemTy = I.getSourceElementType();
1641 Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
1642
1643 if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
1644 if (ArrTy->getElementType() == SrcElemTy) {
1645 SmallVector<Value *> NewIndices;
1646 Type *FirstIdxType = I.getOperand(1)->getType();
1647 NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
1648 for (Value *Idx : I.indices())
1649 NewIndices.push_back(Idx);
1650
1651 SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
1653 Args.push_back(B.getInt1(I.isInBounds()));
1654 Args.push_back(I.getPointerOperand());
1655 Args.append(NewIndices.begin(), NewIndices.end());
1656
1657 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1658 replaceAllUsesWithAndErase(B, &I, NewI);
1659 return NewI;
1660 }
1661 }
1662 }
1663
1664 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1666 Args.push_back(B.getInt1(I.isInBounds()));
1667 llvm::append_range(Args, I.operands());
1668 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1669 replaceAllUsesWithAndErase(B, &I, NewI);
1670 return NewI;
1671}
1672
1673Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1674 IRBuilder<> B(I.getParent());
1675 B.SetInsertPoint(&I);
1676 Value *Source = I.getOperand(0);
1677
1678 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1679 // varying element types. In case of IR coming from older versions of LLVM
1680 // such bitcasts do not provide sufficient information, should be just skipped
1681 // here, and handled in insertPtrCastOrAssignTypeInstr.
1682 if (isPointerTy(I.getType())) {
1683 replaceAllUsesWith(&I, Source);
1684 I.eraseFromParent();
1685 return nullptr;
1686 }
1687
1688 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1689 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1690 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1691 replaceAllUsesWithAndErase(B, &I, NewI);
1692 return NewI;
1693}
1694
1695void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1696 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1697 Type *VTy = V->getType();
1698
1699 // A couple of sanity checks.
1700 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1701 if (Type *ElemTy = getPointeeType(VTy))
1702 if (ElemTy != AssignedType)
1703 report_fatal_error("Unexpected pointer element type!");
1704
1705 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1706 if (!AssignCI) {
1707 GR->buildAssignType(B, AssignedType, V);
1708 return;
1709 }
1710
1711 Type *CurrentType =
1713 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1714 ->getType();
1715 if (CurrentType == AssignedType)
1716 return;
1717
1718 // Builtin types cannot be redeclared or casted.
1719 if (CurrentType->isTargetExtTy())
1720 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1721 "/" + AssignedType->getTargetExtName() +
1722 " for value " + V->getName(),
1723 false);
1724
1725 // Our previous guess about the type seems to be wrong, let's update
1726 // inferred type according to a new, more precise type information.
1727 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1728}
1729
1730void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1731 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1732 unsigned OperandToReplace, IRBuilder<> &B) {
1733 TypeValidated.insert(I);
1734
1735 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1736 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1737 if (PointerElemTy == ExpectedElementType ||
1738 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1739 return;
1740
1742 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1743 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1744 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1745 bool FirstPtrCastOrAssignPtrType = true;
1746
1747 // Do not emit new spv_ptrcast if equivalent one already exists or when
1748 // spv_assign_ptr_type already targets this pointer with the same element
1749 // type.
1750 if (Pointer->hasUseList()) {
1751 for (auto User : Pointer->users()) {
1752 auto *II = dyn_cast<IntrinsicInst>(User);
1753 if (!II ||
1754 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1755 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1756 II->getOperand(0) != Pointer)
1757 continue;
1758
1759 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1760 // pointer.
1761 FirstPtrCastOrAssignPtrType = false;
1762 if (II->getOperand(1) != VMD ||
1763 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1765 continue;
1766
1767 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1768 // same element type and address space.
1769 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1770 return;
1771
1772 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1773 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1774 if (II->getParent() != I->getParent())
1775 continue;
1776
1777 I->setOperand(OperandToReplace, II);
1778 return;
1779 }
1780 }
1781
1782 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1783 if (FirstPtrCastOrAssignPtrType) {
1784 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1785 // emit spv_assign_ptr_type instead.
1786 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1787 return;
1788 } else if (isTodoType(Pointer)) {
1789 eraseTodoType(Pointer);
1790 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1791 // If this wouldn't be the first spv_ptrcast but existing type info is
1792 // uncomplete, update spv_assign_ptr_type arguments.
1793 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1794 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1795 assert(PrevElemTy);
1796 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1797 std::make_pair(I, Pointer)};
1798 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1799 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1800 } else {
1801 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1802 }
1803 return;
1804 }
1805 }
1806 }
1807
1808 // Emit spv_ptrcast
1809 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1810 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1811 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1812 I->setOperand(OperandToReplace, PtrCastI);
1813 // We need to set up a pointee type for the newly created spv_ptrcast.
1814 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1815}
1816
1817void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1818 IRBuilder<> &B) {
1819 // Handle basic instructions:
1820 StoreInst *SI = dyn_cast<StoreInst>(I);
1821 if (IsKernelArgInt8(CurrF, SI)) {
1822 replacePointerOperandWithPtrCast(
1823 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1824 0, B);
1825 }
1826 if (SI) {
1827 Value *Op = SI->getValueOperand();
1828 Value *Pointer = SI->getPointerOperand();
1829 Type *OpTy = Op->getType();
1830 if (auto *OpI = dyn_cast<Instruction>(Op))
1831 OpTy = restoreMutatedType(GR, OpI, OpTy);
1832 if (OpTy == Op->getType())
1833 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1834 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1835 return;
1836 }
1837 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1838 Value *Pointer = LI->getPointerOperand();
1839 Type *OpTy = LI->getType();
1840 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1841 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1842 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1843 } else {
1844 Type *NewOpTy = OpTy;
1845 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1846 if (OpTy == NewOpTy)
1847 insertTodoType(Pointer);
1848 }
1849 }
1850 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1851 return;
1852 }
1853 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1854 Value *Pointer = GEPI->getPointerOperand();
1855 Type *OpTy = nullptr;
1856
1857 // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
1858 // the first index is 0, then we can trivially lower to OpAccessChain. If
1859 // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
1860 // this time, and will rewrite the GEP when visiting it.
1861 if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
1862 return;
1863 }
1864
1865 // In all cases, fall back to the GEP type if type scavenging failed.
1866 if (!OpTy)
1867 OpTy = GEPI->getSourceElementType();
1868
1869 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1870 if (isNestedPointer(OpTy))
1871 insertTodoType(Pointer);
1872 return;
1873 }
1874
1875 // TODO: review and merge with existing logics:
1876 // Handle calls to builtins (non-intrinsics):
1877 CallInst *CI = dyn_cast<CallInst>(I);
1878 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1880 return;
1881
1882 // collect information about formal parameter types
1883 std::string DemangledName =
1885 Function *CalledF = CI->getCalledFunction();
1886 SmallVector<Type *, 4> CalledArgTys;
1887 bool HaveTypes = false;
1888 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1889 Argument *CalledArg = CalledF->getArg(OpIdx);
1890 Type *ArgType = CalledArg->getType();
1891 if (!isPointerTy(ArgType)) {
1892 CalledArgTys.push_back(nullptr);
1893 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1894 CalledArgTys.push_back(ArgTypeElem);
1895 HaveTypes = true;
1896 } else {
1897 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1898 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1899 ElemTy = getPointeeTypeByAttr(CalledArg);
1900 if (!ElemTy) {
1901 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1902 if (ElemTy) {
1903 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1904 } else {
1905 for (User *U : CalledArg->users()) {
1906 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1907 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1908 break;
1909 }
1910 }
1911 }
1912 }
1913 HaveTypes |= ElemTy != nullptr;
1914 CalledArgTys.push_back(ElemTy);
1915 }
1916 }
1917
1918 if (DemangledName.empty() && !HaveTypes)
1919 return;
1920
1921 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1922 Value *ArgOperand = CI->getArgOperand(OpIdx);
1923 if (!isPointerTy(ArgOperand->getType()))
1924 continue;
1925
1926 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1927 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1928 // However, we may have assumptions about the formal argument's type and
1929 // may have a need to insert a ptr cast for the actual parameter of this
1930 // call.
1931 Argument *CalledArg = CalledF->getArg(OpIdx);
1932 if (!GR->findDeducedElementType(CalledArg))
1933 continue;
1934 }
1935
1936 Type *ExpectedType =
1937 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1938 if (!ExpectedType && !DemangledName.empty())
1939 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1940 DemangledName, OpIdx, I->getContext());
1941 if (!ExpectedType || ExpectedType->isVoidTy())
1942 continue;
1943
1944 if (ExpectedType->isTargetExtTy() &&
1946 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1947 ArgOperand, B);
1948 else
1949 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1950 }
1951}
1952
1953Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1954 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1955 // type in LLT and IRTranslator will replace it by the scalar.
1956 if (isVector1(I.getType()))
1957 return &I;
1958
1959 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1960 I.getOperand(1)->getType(),
1961 I.getOperand(2)->getType()};
1962 IRBuilder<> B(I.getParent());
1963 B.SetInsertPoint(&I);
1964 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1965 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1966 replaceAllUsesWithAndErase(B, &I, NewI);
1967 return NewI;
1968}
1969
1971SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1972 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1973 // type in LLT and IRTranslator will replace it by the scalar.
1974 if (isVector1(I.getVectorOperandType()))
1975 return &I;
1976
1977 IRBuilder<> B(I.getParent());
1978 B.SetInsertPoint(&I);
1979 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1980 I.getIndexOperand()->getType()};
1981 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1982 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1983 replaceAllUsesWithAndErase(B, &I, NewI);
1984 return NewI;
1985}
1986
1987Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1988 IRBuilder<> B(I.getParent());
1989 B.SetInsertPoint(&I);
1990 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1992 Value *AggregateOp = I.getAggregateOperand();
1993 if (isa<UndefValue>(AggregateOp))
1994 Args.push_back(UndefValue::get(B.getInt32Ty()));
1995 else
1996 Args.push_back(AggregateOp);
1997 Args.push_back(I.getInsertedValueOperand());
1998 for (auto &Op : I.indices())
1999 Args.push_back(B.getInt32(Op));
2000 Instruction *NewI =
2001 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
2002 replaceMemInstrUses(&I, NewI, B);
2003 return NewI;
2004}
2005
2006Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
2007 if (I.getAggregateOperand()->getType()->isAggregateType())
2008 return &I;
2009 IRBuilder<> B(I.getParent());
2010 B.SetInsertPoint(&I);
2011 SmallVector<Value *> Args(I.operands());
2012 for (auto &Op : I.indices())
2013 Args.push_back(B.getInt32(Op));
2014 auto *NewI =
2015 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
2016 replaceAllUsesWithAndErase(B, &I, NewI);
2017 return NewI;
2018}
2019
2020Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
2021 if (!I.getType()->isAggregateType())
2022 return &I;
2023 IRBuilder<> B(I.getParent());
2024 B.SetInsertPoint(&I);
2025 TrackConstants = false;
2026 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2028 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
2029 auto *NewI =
2030 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
2031 {I.getPointerOperand(), B.getInt16(Flags),
2032 B.getInt8(I.getAlign().value())});
2033 replaceMemInstrUses(&I, NewI, B);
2034 return NewI;
2035}
2036
2037Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
2038 if (!AggrStores.contains(&I))
2039 return &I;
2040 IRBuilder<> B(I.getParent());
2041 B.SetInsertPoint(&I);
2042 TrackConstants = false;
2043 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2045 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
2046 auto *PtrOp = I.getPointerOperand();
2047 auto *NewI = B.CreateIntrinsic(
2048 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
2049 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
2050 B.getInt8(I.getAlign().value())});
2051 NewI->copyMetadata(I);
2052 I.eraseFromParent();
2053 return NewI;
2054}
2055
2056Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
2057 Value *ArraySize = nullptr;
2058 if (I.isArrayAllocation()) {
2059 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
2060 if (!STI->canUseExtension(
2061 SPIRV::Extension::SPV_INTEL_variable_length_array))
2063 "array allocation: this instruction requires the following "
2064 "SPIR-V extension: SPV_INTEL_variable_length_array",
2065 false);
2066 ArraySize = I.getArraySize();
2067 }
2068 IRBuilder<> B(I.getParent());
2069 B.SetInsertPoint(&I);
2070 TrackConstants = false;
2071 Type *PtrTy = I.getType();
2072 auto *NewI =
2073 ArraySize
2074 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2075 {PtrTy, ArraySize->getType()},
2076 {ArraySize, B.getInt8(I.getAlign().value())})
2077 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2078 {B.getInt8(I.getAlign().value())});
2079 replaceAllUsesWithAndErase(B, &I, NewI);
2080 return NewI;
2081}
2082
2083Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2084 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2085 IRBuilder<> B(I.getParent());
2086 B.SetInsertPoint(&I);
2087 SmallVector<Value *> Args(I.operands());
2088 Args.push_back(B.getInt32(
2089 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2090 Args.push_back(B.getInt32(
2091 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2092 Args.push_back(B.getInt32(
2093 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2094 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2095 {I.getPointerOperand()->getType()}, {Args});
2096 replaceMemInstrUses(&I, NewI, B);
2097 return NewI;
2098}
2099
2100Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2101 IRBuilder<> B(I.getParent());
2102 B.SetInsertPoint(&I);
2103 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2104 return &I;
2105}
2106
2107void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2108 IRBuilder<> &B) {
2109 // Skip special artificial variables.
2110 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2111 "llvm.compiler.used"};
2112
2113 if (ArtificialGlobals.contains(GV.getName()))
2114 return;
2115
2116 Constant *Init = nullptr;
2117 if (hasInitializer(&GV)) {
2118 // Deduce element type and store results in Global Registry.
2119 // Result is ignored, because TypedPointerType is not supported
2120 // by llvm IR general logic.
2121 deduceElementTypeHelper(&GV, false);
2122 Init = GV.getInitializer();
2123 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2124 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2125 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2126 {GV.getType(), Ty}, {&GV, Const});
2127 InitInst->setArgOperand(1, Init);
2128 }
2129 if (!Init && GV.use_empty())
2130 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2131}
2132
2133// Return true, if we can't decide what is the pointee type now and will get
2134// back to the question later. Return false is spv_assign_ptr_type is not needed
2135// or can be inserted immediately.
2136bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2137 IRBuilder<> &B,
2138 bool UnknownElemTypeI8) {
2140 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2141 return false;
2142
2144 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2145 GR->buildAssignPtr(B, ElemTy, I);
2146 return false;
2147 }
2148 return true;
2149}
2150
2151void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2152 IRBuilder<> &B) {
2153 // TODO: extend the list of functions with known result types
2154 static StringMap<unsigned> ResTypeWellKnown = {
2155 {"async_work_group_copy", WellKnownTypes::Event},
2156 {"async_work_group_strided_copy", WellKnownTypes::Event},
2157 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2158
2160
2161 bool IsKnown = false;
2162 if (auto *CI = dyn_cast<CallInst>(I)) {
2163 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2164 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2165 Function *CalledF = CI->getCalledFunction();
2166 std::string DemangledName =
2168 FPDecorationId DecorationId = FPDecorationId::NONE;
2169 if (DemangledName.length() > 0)
2170 DemangledName =
2171 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2172 auto ResIt = ResTypeWellKnown.find(DemangledName);
2173 if (ResIt != ResTypeWellKnown.end()) {
2174 IsKnown = true;
2176 switch (ResIt->second) {
2177 case WellKnownTypes::Event:
2178 GR->buildAssignType(
2179 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2180 break;
2181 }
2182 }
2183 // check if a floating rounding mode or saturation info is present
2184 switch (DecorationId) {
2185 default:
2186 break;
2187 case FPDecorationId::SAT:
2189 break;
2190 case FPDecorationId::RTE:
2192 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2193 break;
2194 case FPDecorationId::RTZ:
2196 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2197 break;
2198 case FPDecorationId::RTP:
2200 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2201 break;
2202 case FPDecorationId::RTN:
2204 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2205 break;
2206 }
2207 }
2208 }
2209
2210 Type *Ty = I->getType();
2211 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2213 Type *TypeToAssign = Ty;
2214 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2215 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2216 II->getIntrinsicID() == Intrinsic::spv_undef) {
2217 auto It = AggrConstTypes.find(II);
2218 if (It == AggrConstTypes.end())
2219 report_fatal_error("Unknown composite intrinsic type");
2220 TypeToAssign = It->second;
2221 }
2222 }
2223 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2224 GR->buildAssignType(B, TypeToAssign, I);
2225 }
2226 for (const auto &Op : I->operands()) {
2228 // Check GetElementPtrConstantExpr case.
2230 (isa<GEPOperator>(Op) ||
2231 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2233 Type *OpTy = Op->getType();
2234 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2235 CallInst *AssignCI =
2236 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2237 UndefValue::get(B.getInt32Ty()), {}, B);
2238 GR->addAssignPtrTypeInstr(Op, AssignCI);
2239 } else if (!isa<Instruction>(Op)) {
2240 Type *OpTy = Op->getType();
2241 Type *OpTyElem = getPointeeType(OpTy);
2242 if (OpTyElem) {
2243 GR->buildAssignPtr(B, OpTyElem, Op);
2244 } else if (isPointerTy(OpTy)) {
2245 Type *ElemTy = GR->findDeducedElementType(Op);
2246 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2247 Op);
2248 } else {
2249 Value *OpTyVal = Op;
2250 if (OpTy->isTargetExtTy()) {
2251 // We need to do this in order to be consistent with how target ext
2252 // types are handled in `processInstrAfterVisit`
2253 OpTyVal = getNormalizedPoisonValue(OpTy);
2254 }
2255 CallInst *AssignCI =
2256 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2257 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2258 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2259 }
2260 }
2261 }
2262 }
2263}
2264
2265bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2266 Instruction *Inst) {
2267 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2268 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2269 return false;
2270 // Add aliasing decorations to internal load and store intrinsics
2271 // and atomic instructions, skipping atomic store as it won't have ID to
2272 // attach the decoration.
2273 CallInst *CI = dyn_cast<CallInst>(Inst);
2274 if (!CI)
2275 return false;
2276 if (Function *Fun = CI->getCalledFunction()) {
2277 if (Fun->isIntrinsic()) {
2278 switch (Fun->getIntrinsicID()) {
2279 case Intrinsic::spv_load:
2280 case Intrinsic::spv_store:
2281 return true;
2282 default:
2283 return false;
2284 }
2285 }
2287 const std::string Prefix = "__spirv_Atomic";
2288 const bool IsAtomic = Name.find(Prefix) == 0;
2289
2290 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2291 return true;
2292 }
2293 return false;
2294}
2295
2296void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2297 IRBuilder<> &B) {
2298 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2300 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2301 {I, MetadataAsValue::get(I->getContext(), MD)});
2302 }
2303 // Lower alias.scope/noalias metadata
2304 {
2305 auto processMemAliasingDecoration = [&](unsigned Kind) {
2306 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2307 if (shouldTryToAddMemAliasingDecoration(I)) {
2308 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2309 ? SPIRV::Decoration::AliasScopeINTEL
2310 : SPIRV::Decoration::NoAliasINTEL;
2312 I, ConstantInt::get(B.getInt32Ty(), Dec),
2313 MetadataAsValue::get(I->getContext(), AliasListMD)};
2315 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2316 {I->getType()}, {Args});
2317 }
2318 }
2319 };
2320 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2321 processMemAliasingDecoration(LLVMContext::MD_noalias);
2322 }
2323 // MD_fpmath
2324 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2325 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2326 bool AllowFPMaxError =
2327 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2328 if (!AllowFPMaxError)
2329 return;
2330
2332 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2333 {I->getType()},
2334 {I, MetadataAsValue::get(I->getContext(), MD)});
2335 }
2336}
2337
2339 const Module &M,
2341 &FPFastMathDefaultInfoMap,
2342 Function *F) {
2343 auto it = FPFastMathDefaultInfoMap.find(F);
2344 if (it != FPFastMathDefaultInfoMap.end())
2345 return it->second;
2346
2347 // If the map does not contain the entry, create a new one. Initialize it to
2348 // contain all 3 elements sorted by bit width of target type: {half, float,
2349 // double}.
2350 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2351 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2352 SPIRV::FPFastMathMode::None);
2353 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2354 SPIRV::FPFastMathMode::None);
2355 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2356 SPIRV::FPFastMathMode::None);
2357 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2358}
2359
2361 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2362 const Type *Ty) {
2363 size_t BitWidth = Ty->getScalarSizeInBits();
2364 int Index =
2366 BitWidth);
2367 assert(Index >= 0 && Index < 3 &&
2368 "Expected FPFastMathDefaultInfo for half, float, or double");
2369 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2370 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2371 return FPFastMathDefaultInfoVec[Index];
2372}
2373
2374void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2375 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2376 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2377 return;
2378
2379 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2380 // We need the entry point (function) as the key, and the target
2381 // type and flags as the value.
2382 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2383 // execution modes, as they are now deprecated and must be replaced
2384 // with FPFastMathDefaultInfo.
2385 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2386 if (!Node) {
2387 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2388 // This requires emitting ContractionOff. However, because
2389 // ContractionOff is now deprecated, we need to replace it with
2390 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2391 // We need to create the constant for that.
2392
2393 // Create constant instruction with the bitmask flags.
2394 Constant *InitValue =
2395 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2396 // TODO: Reuse constant if there is one already with the required
2397 // value.
2398 [[maybe_unused]] GlobalVariable *GV =
2399 new GlobalVariable(M, // Module
2400 Type::getInt32Ty(M.getContext()), // Type
2401 true, // isConstant
2403 InitValue // Initializer
2404 );
2405 }
2406 return;
2407 }
2408
2409 // The table maps function pointers to their default FP fast math info. It
2410 // can be assumed that the SmallVector is sorted by the bit width of the
2411 // type. The first element is the smallest bit width, and the last element
2412 // is the largest bit width, therefore, we will have {half, float, double}
2413 // in the order of their bit widths.
2414 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2415 FPFastMathDefaultInfoMap;
2416
2417 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2418 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2419 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2421 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2422 const auto EM =
2424 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2425 ->getZExtValue();
2426 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2427 assert(MDN->getNumOperands() == 4 &&
2428 "Expected 4 operands for FPFastMathDefault");
2429 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2430 unsigned Flags =
2432 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2433 ->getZExtValue();
2434 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2435 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2436 SPIRV::FPFastMathDefaultInfo &Info =
2437 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2438 Info.FastMathFlags = Flags;
2439 Info.FPFastMathDefault = true;
2440 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2441 assert(MDN->getNumOperands() == 2 &&
2442 "Expected no operands for ContractionOff");
2443
2444 // We need to save this info for every possible FP type, i.e. {half,
2445 // float, double, fp128}.
2446 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2447 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2448 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2449 Info.ContractionOff = true;
2450 }
2451 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2452 assert(MDN->getNumOperands() == 3 &&
2453 "Expected 1 operand for SignedZeroInfNanPreserve");
2454 unsigned TargetWidth =
2456 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2457 ->getZExtValue();
2458 // We need to save this info only for the FP type with TargetWidth.
2459 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2460 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2463 assert(Index >= 0 && Index < 3 &&
2464 "Expected FPFastMathDefaultInfo for half, float, or double");
2465 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2466 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2467 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2468 }
2469 }
2470
2471 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2472 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2473 if (FPFastMathDefaultInfoVec.empty())
2474 continue;
2475
2476 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2477 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2478 // Skip if none of the execution modes was used.
2479 unsigned Flags = Info.FastMathFlags;
2480 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2481 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2482 continue;
2483
2484 // Check if flags are compatible.
2485 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2486 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2487 "and AllowContract");
2488
2489 if (Info.SignedZeroInfNanPreserve &&
2490 !(Flags &
2491 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2492 SPIRV::FPFastMathMode::NSZ))) {
2493 if (Info.FPFastMathDefault)
2494 report_fatal_error("Conflicting FPFastMathFlags: "
2495 "SignedZeroInfNanPreserve but at least one of "
2496 "NotNaN/NotInf/NSZ is enabled.");
2497 }
2498
2499 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2500 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2501 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2502 report_fatal_error("Conflicting FPFastMathFlags: "
2503 "AllowTransform requires AllowReassoc and "
2504 "AllowContract to be set.");
2505 }
2506
2507 auto it = GlobalVars.find(Flags);
2508 GlobalVariable *GV = nullptr;
2509 if (it != GlobalVars.end()) {
2510 // Reuse existing global variable.
2511 GV = it->second;
2512 } else {
2513 // Create constant instruction with the bitmask flags.
2514 Constant *InitValue =
2515 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2516 // TODO: Reuse constant if there is one already with the required
2517 // value.
2518 GV = new GlobalVariable(M, // Module
2519 Type::getInt32Ty(M.getContext()), // Type
2520 true, // isConstant
2522 InitValue // Initializer
2523 );
2524 GlobalVars[Flags] = GV;
2525 }
2526 }
2527 }
2528}
2529
2530void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2531 IRBuilder<> &B) {
2532 auto *II = dyn_cast<IntrinsicInst>(I);
2533 bool IsConstComposite =
2534 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2535 if (IsConstComposite && TrackConstants) {
2537 auto t = AggrConsts.find(I);
2538 assert(t != AggrConsts.end());
2539 auto *NewOp =
2540 buildIntrWithMD(Intrinsic::spv_track_constant,
2541 {II->getType(), II->getType()}, t->second, I, {}, B);
2542 replaceAllUsesWith(I, NewOp, false);
2543 NewOp->setArgOperand(0, I);
2544 }
2545 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2546 for (const auto &Op : I->operands()) {
2547 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2549 continue;
2550 unsigned OpNo = Op.getOperandNo();
2551 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2552 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2553 continue;
2554
2555 if (!BPrepared) {
2556 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2557 : B.SetInsertPoint(I);
2558 BPrepared = true;
2559 }
2560 Type *OpTy = Op->getType();
2561 Type *OpElemTy = GR->findDeducedElementType(Op);
2562 Value *NewOp = Op;
2563 if (OpTy->isTargetExtTy()) {
2564 // Since this value is replaced by poison, we need to do the same in
2565 // `insertAssignTypeIntrs`.
2566 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2567 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2568 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2569 }
2570 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2571 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2572 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2573 SmallVector<Value *, 2> Args = {
2574 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2575 B.getInt32(getPointerAddressSpace(OpTy))};
2576 CallInst *PtrCasted =
2577 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2578 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2579 NewOp = PtrCasted;
2580 }
2581 if (NewOp != Op)
2582 I->setOperand(OpNo, NewOp);
2583 }
2584 if (Named.insert(I).second)
2585 emitAssignName(I, B);
2586}
2587
2588Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2589 unsigned OpIdx) {
2590 std::unordered_set<Function *> FVisited;
2591 return deduceFunParamElementType(F, OpIdx, FVisited);
2592}
2593
2594Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2595 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2596 // maybe a cycle
2597 if (!FVisited.insert(F).second)
2598 return nullptr;
2599
2600 std::unordered_set<Value *> Visited;
2602 // search in function's call sites
2603 for (User *U : F->users()) {
2604 CallInst *CI = dyn_cast<CallInst>(U);
2605 if (!CI || OpIdx >= CI->arg_size())
2606 continue;
2607 Value *OpArg = CI->getArgOperand(OpIdx);
2608 if (!isPointerTy(OpArg->getType()))
2609 continue;
2610 // maybe we already know operand's element type
2611 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2612 return KnownTy;
2613 // try to deduce from the operand itself
2614 Visited.clear();
2615 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2616 return Ty;
2617 // search in actual parameter's users
2618 for (User *OpU : OpArg->users()) {
2620 if (!Inst || Inst == CI)
2621 continue;
2622 Visited.clear();
2623 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2624 return Ty;
2625 }
2626 // check if it's a formal parameter of the outer function
2627 if (!CI->getParent() || !CI->getParent()->getParent())
2628 continue;
2629 Function *OuterF = CI->getParent()->getParent();
2630 if (FVisited.find(OuterF) != FVisited.end())
2631 continue;
2632 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2633 if (OuterF->getArg(i) == OpArg) {
2634 Lookup.push_back(std::make_pair(OuterF, i));
2635 break;
2636 }
2637 }
2638 }
2639
2640 // search in function parameters
2641 for (auto &Pair : Lookup) {
2642 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2643 return Ty;
2644 }
2645
2646 return nullptr;
2647}
2648
2649void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2650 IRBuilder<> &B) {
2651 B.SetInsertPointPastAllocas(F);
2652 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2653 Argument *Arg = F->getArg(OpIdx);
2654 if (!isUntypedPointerTy(Arg->getType()))
2655 continue;
2656 Type *ElemTy = GR->findDeducedElementType(Arg);
2657 if (ElemTy)
2658 continue;
2659 if (hasPointeeTypeAttr(Arg) &&
2660 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2661 GR->buildAssignPtr(B, ElemTy, Arg);
2662 continue;
2663 }
2664 // search in function's call sites
2665 for (User *U : F->users()) {
2666 CallInst *CI = dyn_cast<CallInst>(U);
2667 if (!CI || OpIdx >= CI->arg_size())
2668 continue;
2669 Value *OpArg = CI->getArgOperand(OpIdx);
2670 if (!isPointerTy(OpArg->getType()))
2671 continue;
2672 // maybe we already know operand's element type
2673 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2674 break;
2675 }
2676 if (ElemTy) {
2677 GR->buildAssignPtr(B, ElemTy, Arg);
2678 continue;
2679 }
2680 if (HaveFunPtrs) {
2681 for (User *U : Arg->users()) {
2682 CallInst *CI = dyn_cast<CallInst>(U);
2683 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2684 CI->getCalledOperand() == Arg &&
2685 CI->getParent()->getParent() == CurrF) {
2687 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2688 if (ElemTy) {
2689 GR->buildAssignPtr(B, ElemTy, Arg);
2690 break;
2691 }
2692 }
2693 }
2694 }
2695 }
2696}
2697
2698void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2699 B.SetInsertPointPastAllocas(F);
2700 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2701 Argument *Arg = F->getArg(OpIdx);
2702 if (!isUntypedPointerTy(Arg->getType()))
2703 continue;
2704 Type *ElemTy = GR->findDeducedElementType(Arg);
2705 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2706 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2707 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2708 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2709 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2710 VisitedSubst);
2711 } else {
2712 GR->buildAssignPtr(B, ElemTy, Arg);
2713 }
2714 }
2715 }
2716}
2717
2719 SPIRVGlobalRegistry *GR) {
2720 FunctionType *FTy = F->getFunctionType();
2721 bool IsNewFTy = false;
2723 for (Argument &Arg : F->args()) {
2724 Type *ArgTy = Arg.getType();
2725 if (ArgTy->isPointerTy())
2726 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2727 IsNewFTy = true;
2728 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2729 }
2730 ArgTys.push_back(ArgTy);
2731 }
2732 return IsNewFTy
2733 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2734 : FTy;
2735}
2736
2737bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2738 SmallVector<Function *> Worklist;
2739 for (auto &F : M) {
2740 if (F.isIntrinsic())
2741 continue;
2742 if (F.isDeclaration()) {
2743 for (User *U : F.users()) {
2744 CallInst *CI = dyn_cast<CallInst>(U);
2745 if (!CI || CI->getCalledFunction() != &F) {
2746 Worklist.push_back(&F);
2747 break;
2748 }
2749 }
2750 } else {
2751 if (F.user_empty())
2752 continue;
2753 Type *FPElemTy = GR->findDeducedElementType(&F);
2754 if (!FPElemTy)
2755 FPElemTy = getFunctionPointerElemType(&F, GR);
2756 for (User *U : F.users()) {
2757 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2758 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2759 continue;
2760 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2761 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2763 break;
2764 }
2765 }
2766 }
2767 }
2768 if (Worklist.empty())
2769 return false;
2770
2771 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2772 if (!getVacantFunctionName(M, ServiceFunName))
2774 "cannot allocate a name for the internal service function");
2775 LLVMContext &Ctx = M.getContext();
2776 Function *SF =
2777 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2778 GlobalValue::PrivateLinkage, ServiceFunName, M);
2780 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2781 IRBuilder<> IRB(BB);
2782
2783 for (Function *F : Worklist) {
2785 for (const auto &Arg : F->args())
2786 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2787 IRB.CreateCall(F, Args);
2788 }
2789 IRB.CreateRetVoid();
2790
2791 return true;
2792}
2793
2794// Apply types parsed from demangled function declarations.
2795void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2796 DenseMap<Function *, CallInst *> Ptrcasts;
2797 for (auto It : FDeclPtrTys) {
2798 Function *F = It.first;
2799 for (auto *U : F->users()) {
2800 CallInst *CI = dyn_cast<CallInst>(U);
2801 if (!CI || CI->getCalledFunction() != F)
2802 continue;
2803 unsigned Sz = CI->arg_size();
2804 for (auto [Idx, ElemTy] : It.second) {
2805 if (Idx >= Sz)
2806 continue;
2807 Value *Param = CI->getArgOperand(Idx);
2808 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2809 continue;
2810 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2811 if (!hasPointeeTypeAttr(Arg)) {
2812 B.SetInsertPointPastAllocas(Arg->getParent());
2813 B.SetCurrentDebugLocation(DebugLoc());
2814 GR->buildAssignPtr(B, ElemTy, Arg);
2815 }
2816 } else if (isa<GetElementPtrInst>(Param)) {
2817 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2818 Ptrcasts);
2819 } else if (isa<Instruction>(Param)) {
2820 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2821 // insertAssignTypeIntrs() will complete buildAssignPtr()
2822 } else {
2823 B.SetInsertPoint(CI->getParent()
2824 ->getParent()
2825 ->getEntryBlock()
2826 .getFirstNonPHIOrDbgOrAlloca());
2827 GR->buildAssignPtr(B, ElemTy, Param);
2828 }
2829 CallInst *Ref = dyn_cast<CallInst>(Param);
2830 if (!Ref)
2831 continue;
2832 Function *RefF = Ref->getCalledFunction();
2833 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2834 GR->findDeducedElementType(RefF))
2835 continue;
2836 ElemTy = normalizeType(ElemTy);
2837 GR->addDeducedElementType(RefF, ElemTy);
2838 GR->addReturnType(
2840 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2841 }
2842 }
2843 }
2844}
2845
2846GetElementPtrInst *
2847SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2848 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2849 // If type is 0-length array and first index is 0 (zero), drop both the
2850 // 0-length array type and the first index. This is a common pattern in
2851 // the IR, e.g. when using a zero-length array as a placeholder for a
2852 // flexible array such as unbound arrays.
2853 assert(GEP && "GEP is null");
2854 Type *SrcTy = GEP->getSourceElementType();
2855 SmallVector<Value *, 8> Indices(GEP->indices());
2856 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2857 if (ArrTy && ArrTy->getNumElements() == 0 &&
2859 Indices.erase(Indices.begin());
2860 SrcTy = ArrTy->getElementType();
2861 return GetElementPtrInst::Create(SrcTy, GEP->getPointerOperand(), Indices,
2862 GEP->getNoWrapFlags(), "",
2863 GEP->getIterator());
2864 }
2865 return nullptr;
2866}
2867
2868bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2869 if (Func.isDeclaration())
2870 return false;
2871
2872 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2873 GR = ST.getSPIRVGlobalRegistry();
2874
2875 if (!CurrF)
2876 HaveFunPtrs =
2877 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2878
2879 CurrF = &Func;
2880 IRBuilder<> B(Func.getContext());
2881 AggrConsts.clear();
2882 AggrConstTypes.clear();
2883 AggrStores.clear();
2884
2885 // Fix GEP result types ahead of inference, and simplify if possible.
2886 // Data structure for dead instructions that were simplified and replaced.
2887 SmallPtrSet<Instruction *, 4> DeadInsts;
2888 for (auto &I : instructions(Func)) {
2890 if (!Ref || GR->findDeducedElementType(Ref))
2891 continue;
2892
2893 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2894 if (NewGEP) {
2895 Ref->replaceAllUsesWith(NewGEP);
2896 DeadInsts.insert(Ref);
2897 Ref = NewGEP;
2898 }
2899 if (Type *GepTy = getGEPType(Ref))
2900 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2901 }
2902 // Remove dead instructions that were simplified and replaced.
2903 for (auto *I : DeadInsts) {
2904 assert(I->use_empty() && "Dead instruction should not have any uses left");
2905 I->eraseFromParent();
2906 }
2907
2908 processParamTypesByFunHeader(CurrF, B);
2909
2910 // StoreInst's operand type can be changed during the next
2911 // transformations, so we need to store it in the set. Also store already
2912 // transformed types.
2913 for (auto &I : instructions(Func)) {
2914 StoreInst *SI = dyn_cast<StoreInst>(&I);
2915 if (!SI)
2916 continue;
2917 Type *ElTy = SI->getValueOperand()->getType();
2918 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2919 AggrStores.insert(&I);
2920 }
2921
2922 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2923 for (auto &GV : Func.getParent()->globals())
2924 processGlobalValue(GV, B);
2925
2926 preprocessUndefs(B);
2927 preprocessCompositeConstants(B);
2930
2931 applyDemangledPtrArgTypes(B);
2932
2933 // Pass forward: use operand to deduce instructions result.
2934 for (auto &I : Worklist) {
2935 // Don't emit intrinsincs for convergence intrinsics.
2936 if (isConvergenceIntrinsic(I))
2937 continue;
2938
2939 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2940 // if Postpone is true, we can't decide on pointee type yet
2941 insertAssignTypeIntrs(I, B);
2942 insertPtrCastOrAssignTypeInstr(I, B);
2944 // if instruction requires a pointee type set, let's check if we know it
2945 // already, and force it to be i8 if not
2946 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2947 insertAssignPtrTypeIntrs(I, B, true);
2948
2949 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2950 useRoundingMode(FPI, B);
2951 }
2952
2953 // Pass backward: use instructions results to specify/update/cast operands
2954 // where needed.
2955 SmallPtrSet<Instruction *, 4> IncompleteRets;
2956 for (auto &I : llvm::reverse(instructions(Func)))
2957 deduceOperandElementType(&I, &IncompleteRets);
2958
2959 // Pass forward for PHIs only, their operands are not preceed the
2960 // instruction in meaning of `instructions(Func)`.
2961 for (BasicBlock &BB : Func)
2962 for (PHINode &Phi : BB.phis())
2963 if (isPointerTy(Phi.getType()))
2964 deduceOperandElementType(&Phi, nullptr);
2965
2966 for (auto *I : Worklist) {
2967 TrackConstants = true;
2968 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2970 // Visitors return either the original/newly created instruction for
2971 // further processing, nullptr otherwise.
2972 I = visit(*I);
2973 if (!I)
2974 continue;
2975
2976 // Don't emit intrinsics for convergence operations.
2977 if (isConvergenceIntrinsic(I))
2978 continue;
2979
2981 processInstrAfterVisit(I, B);
2982 }
2983
2984 return true;
2985}
2986
2987// Try to deduce a better type for pointers to untyped ptr.
2988bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2989 if (!GR || TodoTypeSz == 0)
2990 return false;
2991
2992 unsigned SzTodo = TodoTypeSz;
2993 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2994 for (auto [Op, Enabled] : TodoType) {
2995 // TODO: add isa<CallInst>(Op) to continue
2997 continue;
2998 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2999 Type *KnownTy = GR->findDeducedElementType(Op);
3000 if (!KnownTy || !AssignCI)
3001 continue;
3002 assert(Op == AssignCI->getArgOperand(0));
3003 // Try to improve the type deduced after all Functions are processed.
3004 if (auto *CI = dyn_cast<Instruction>(Op)) {
3005 CurrF = CI->getParent()->getParent();
3006 std::unordered_set<Value *> Visited;
3007 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
3008 if (ElemTy != KnownTy) {
3009 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3010 propagateElemType(CI, ElemTy, VisitedSubst);
3011 eraseTodoType(Op);
3012 continue;
3013 }
3014 }
3015 }
3016
3017 if (Op->hasUseList()) {
3018 for (User *U : Op->users()) {
3020 if (Inst && !isa<IntrinsicInst>(Inst))
3021 ToProcess[Inst].insert(Op);
3022 }
3023 }
3024 }
3025 if (TodoTypeSz == 0)
3026 return true;
3027
3028 for (auto &F : M) {
3029 CurrF = &F;
3030 SmallPtrSet<Instruction *, 4> IncompleteRets;
3031 for (auto &I : llvm::reverse(instructions(F))) {
3032 auto It = ToProcess.find(&I);
3033 if (It == ToProcess.end())
3034 continue;
3035 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
3036 if (It->second.size() == 0)
3037 continue;
3038 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
3039 if (TodoTypeSz == 0)
3040 return true;
3041 }
3042 }
3043
3044 return SzTodo > TodoTypeSz;
3045}
3046
3047// Parse and store argument types of function declarations where needed.
3048void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
3049 for (auto &F : M) {
3050 if (!F.isDeclaration() || F.isIntrinsic())
3051 continue;
3052 // get the demangled name
3053 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
3054 if (DemangledName.empty())
3055 continue;
3056 // allow only OpGroupAsyncCopy use case at the moment
3057 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
3058 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3059 DemangledName, ST.getPreferredInstructionSet());
3060 if (Opcode != SPIRV::OpGroupAsyncCopy)
3061 continue;
3062 // find pointer arguments
3063 SmallVector<unsigned> Idxs;
3064 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
3065 Argument *Arg = F.getArg(OpIdx);
3066 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
3067 Idxs.push_back(OpIdx);
3068 }
3069 if (!Idxs.size())
3070 continue;
3071 // parse function arguments
3072 LLVMContext &Ctx = F.getContext();
3074 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3075 if (!TypeStrs.size())
3076 continue;
3077 // find type info for pointer arguments
3078 for (unsigned Idx : Idxs) {
3079 if (Idx >= TypeStrs.size())
3080 continue;
3081 if (Type *ElemTy =
3082 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3084 !ElemTy->isTargetExtTy())
3085 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3086 }
3087 }
3088}
3089
3090bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3091 bool Changed = false;
3092
3093 parseFunDeclarations(M);
3094 insertConstantsForFPFastMathDefault(M);
3095
3096 TodoType.clear();
3097 for (auto &F : M)
3099
3100 // Specify function parameters after all functions were processed.
3101 for (auto &F : M) {
3102 // check if function parameter types are set
3103 CurrF = &F;
3104 if (!F.isDeclaration() && !F.isIntrinsic()) {
3105 IRBuilder<> B(F.getContext());
3106 processParamTypes(&F, B);
3107 }
3108 }
3109
3110 CanTodoType = false;
3111 Changed |= postprocessTypes(M);
3112
3113 if (HaveFunPtrs)
3114 Changed |= processFunctionPointers(M);
3115
3116 return Changed;
3117}
3118
3120 return new SPIRVEmitIntrinsics(TM);
3121}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:525
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:171
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:293
void setOperand(unsigned i, Value *Val)
Definition User.h:238
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:233
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:403
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:367
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
FPDecorationId
Definition SPIRVUtils.h:549
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:513
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:398
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:491
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:361
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:380
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:375
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:453
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:346
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:499
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:509
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:356
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146