LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
363
364 if (isa<CallBase>(I)) {
365 // TODO: this is a temporary workaround meant to prevent inserting internal
366 // noise into the generated binary; remove once we rework the entire
367 // aggregate removal machinery.
368 StringRef Name = I->getName();
369 if (Name.starts_with("spv.mutated_callsite"))
370 return;
371 if (Name.starts_with("spv.named_mutated_callsite"))
372 I->setName(Name.substr(Name.rfind('.') + 1));
373 }
376 LLVMContext &Ctx = I->getContext();
377 std::vector<Value *> Args = {
379 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
380 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
384 bool DeleteOld) {
385 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
386 // Update uncomplete type records if any
387 if (isTodoType(Src)) {
388 if (DeleteOld)
389 eraseTodoType(Src);
390 insertTodoType(Dest);
391 }
392}
393
394void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
395 Instruction *Src,
396 Instruction *Dest,
397 bool DeleteOld) {
398 replaceAllUsesWith(Src, Dest, DeleteOld);
399 std::string Name = Src->hasName() ? Src->getName().str() : "";
400 Src->eraseFromParent();
401 if (!Name.empty()) {
402 Dest->setName(Name);
403 if (Named.insert(Dest).second)
404 emitAssignName(Dest, B);
405 }
406}
407
409 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
410 isPointerTy(SI->getValueOperand()->getType()) &&
411 isa<Argument>(SI->getValueOperand());
412}
413
414// Maybe restore original function return type.
416 Type *Ty) {
418 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
420 return Ty;
421 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
422 return OriginalTy;
423 return Ty;
424}
425
426// Reconstruct type with nested element types according to deduced type info.
427// Return nullptr if no detailed type info is available.
428Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
429 bool IsPostprocessing) {
430 Type *Ty = Op->getType();
431 if (auto *OpI = dyn_cast<Instruction>(Op))
432 Ty = restoreMutatedType(GR, OpI, Ty);
433 if (!isUntypedPointerTy(Ty))
434 return Ty;
435 // try to find the pointee type
436 if (Type *NestedTy = GR->findDeducedElementType(Op))
438 // not a pointer according to the type info (e.g., Event object)
439 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
440 if (CI) {
441 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
442 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
443 }
444 if (UnknownElemTypeI8) {
445 if (!IsPostprocessing)
446 insertTodoType(Op);
447 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
449 }
450 return nullptr;
451}
452
453CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
454 Type *ElemTy) {
455 IRBuilder<> B(Op->getContext());
456 if (auto *OpI = dyn_cast<Instruction>(Op)) {
457 // spv_ptrcast's argument Op denotes an instruction that generates
458 // a value, and we may use getInsertionPointAfterDef()
460 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
461 B.SetInsertPointPastAllocas(OpA->getParent());
462 B.SetCurrentDebugLocation(DebugLoc());
463 } else {
464 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
465 }
466 Type *OpTy = Op->getType();
467 SmallVector<Type *, 2> Types = {OpTy, OpTy};
468 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
469 B.getInt32(getPointerAddressSpace(OpTy))};
470 CallInst *PtrCasted =
471 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
472 GR->buildAssignPtr(B, ElemTy, PtrCasted);
473 return PtrCasted;
474}
475
476void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
477 Value *Op, Type *ElemTy, Instruction *I,
478 DenseMap<Function *, CallInst *> Ptrcasts) {
479 Function *F = I->getParent()->getParent();
480 CallInst *PtrCastedI = nullptr;
481 auto It = Ptrcasts.find(F);
482 if (It == Ptrcasts.end()) {
483 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
484 Ptrcasts[F] = PtrCastedI;
485 } else {
486 PtrCastedI = It->second;
487 }
488 I->replaceUsesOfWith(Op, PtrCastedI);
489}
490
491void SPIRVEmitIntrinsics::propagateElemType(
492 Value *Op, Type *ElemTy,
493 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
494 DenseMap<Function *, CallInst *> Ptrcasts;
495 SmallVector<User *> Users(Op->users());
496 for (auto *U : Users) {
497 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
498 continue;
499 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
500 continue;
502 // If the instruction was validated already, we need to keep it valid by
503 // keeping current Op type.
504 if (isa<GetElementPtrInst>(UI) ||
505 TypeValidated.find(UI) != TypeValidated.end())
506 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
507 }
508}
509
510void SPIRVEmitIntrinsics::propagateElemTypeRec(
511 Value *Op, Type *PtrElemTy, Type *CastElemTy,
512 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
513 std::unordered_set<Value *> Visited;
514 DenseMap<Function *, CallInst *> Ptrcasts;
515 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
516 std::move(Ptrcasts));
517}
518
519void SPIRVEmitIntrinsics::propagateElemTypeRec(
520 Value *Op, Type *PtrElemTy, Type *CastElemTy,
521 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
522 std::unordered_set<Value *> &Visited,
523 DenseMap<Function *, CallInst *> Ptrcasts) {
524 if (!Visited.insert(Op).second)
525 return;
526 SmallVector<User *> Users(Op->users());
527 for (auto *U : Users) {
528 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
529 continue;
530 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
531 continue;
533 // If the instruction was validated already, we need to keep it valid by
534 // keeping current Op type.
535 if (isa<GetElementPtrInst>(UI) ||
536 TypeValidated.find(UI) != TypeValidated.end())
537 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
538 }
539}
540
541// Set element pointer type to the given value of ValueTy and tries to
542// specify this type further (recursively) by Operand value, if needed.
543
544Type *
545SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
546 bool UnknownElemTypeI8) {
547 std::unordered_set<Value *> Visited;
548 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
549 UnknownElemTypeI8);
550}
551
552Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
553 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
554 bool UnknownElemTypeI8) {
555 Type *Ty = ValueTy;
556 if (Operand) {
557 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
558 if (Type *NestedTy =
559 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
560 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
561 } else {
562 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
563 UnknownElemTypeI8);
564 }
565 }
566 return Ty;
567}
568
569// Traverse User instructions to deduce an element pointer type of the operand.
570Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
571 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
572 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
574 return nullptr;
575
576 if (auto ElemTy = getPointeeType(Op->getType()))
577 return ElemTy;
578
579 // maybe we already know operand's element type
580 if (Type *KnownTy = GR->findDeducedElementType(Op))
581 return KnownTy;
582
583 for (User *OpU : Op->users()) {
584 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
585 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
586 return Ty;
587 }
588 }
589 return nullptr;
590}
591
592// Implements what we know in advance about intrinsics and builtin calls
593// TODO: consider feasibility of this particular case to be generalized by
594// encoding knowledge about intrinsics and builtin calls by corresponding
595// specification rules
597 Function *CalledF, unsigned OpIdx) {
598 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
599 DemangledName.starts_with("printf(")) &&
600 OpIdx == 0)
601 return IntegerType::getInt8Ty(CalledF->getContext());
602 return nullptr;
603}
604
605// Deduce and return a successfully deduced Type of the Instruction,
606// or nullptr otherwise.
607Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
608 bool UnknownElemTypeI8) {
609 std::unordered_set<Value *> Visited;
610 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
611}
612
613void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
614 bool UnknownElemTypeI8) {
615 if (isUntypedPointerTy(RefTy)) {
616 if (!UnknownElemTypeI8)
617 return;
618 insertTodoType(Op);
619 }
620 Ty = RefTy;
621}
622
623bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
624 GetElementPtrInst &GEP,
625 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
626 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
627 // We only rewrite i8* GEP. Other should be left as-is.
628 // Valid i8* GEP must always have a single index.
629 assert(GEP.getSourceElementType() ==
630 IntegerType::getInt8Ty(CurrF->getContext()));
631 assert(GEP.getNumIndices() == 1);
632
633 auto &DL = CurrF->getDataLayout();
634 Value *Src = getPointerRoot(GEP.getPointerOperand());
635 Type *CurType = deduceElementType(Src, true);
636
637 Value *Operand = *GEP.idx_begin();
638 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
639 if (!CI) {
640 ArrayType *AT = dyn_cast<ArrayType>(CurType);
641 // Operand is not constant. Either we have an array and accept it, or we
642 // give up.
643 if (AT)
644 OnDynamicIndexing(AT->getElementType(), Operand);
645 return AT == nullptr;
646 }
647
648 assert(CI);
649 uint64_t Offset = CI->getZExtValue();
650
651 do {
652 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
653 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
654 assert(Offset < AT->getNumElements() * EltTypeSize);
655 uint64_t Index = Offset / EltTypeSize;
656 Offset = Offset - (Index * EltTypeSize);
657 CurType = AT->getElementType();
658 OnLiteralIndexing(CurType, Index);
659 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
660 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
661 assert(Offset < StructSize);
662 (void)StructSize;
663 const auto &STL = DL.getStructLayout(ST);
664 unsigned Element = STL->getElementContainingOffset(Offset);
665 Offset -= STL->getElementOffset(Element);
666 CurType = ST->getElementType(Element);
667 OnLiteralIndexing(CurType, Element);
668 } else {
669 // Vector type indexing should not use GEP.
670 // So if we have an index left, something is wrong. Giving up.
671 return true;
672 }
673 } while (Offset > 0);
674
675 return false;
676}
677
679SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
680 auto &DL = CurrF->getDataLayout();
681 IRBuilder<> B(GEP.getParent());
682 B.SetInsertPoint(&GEP);
683
684 std::vector<Value *> Indices;
685 Indices.push_back(ConstantInt::get(
686 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
687 walkLogicalAccessChain(
688 GEP,
689 [&Indices, &B](Type *EltType, uint64_t Index) {
690 Indices.push_back(
691 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
692 },
693 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
694 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
695 Value *Index = B.CreateUDiv(
696 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
697 /* Signed= */ false));
698 Indices.push_back(Index);
699 });
700
701 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
703 Args.push_back(B.getInt1(GEP.isInBounds()));
704 Args.push_back(GEP.getOperand(0));
705 llvm::append_range(Args, Indices);
706 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
707 replaceAllUsesWithAndErase(B, &GEP, NewI);
708 return NewI;
709}
710
711Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
712
713 Type *CurType = GEP->getResultElementType();
714
715 bool Interrupted = walkLogicalAccessChain(
716 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
717 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
718
719 return Interrupted ? GEP->getResultElementType() : CurType;
720}
721
722Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
723 if (Ref->getSourceElementType() ==
724 IntegerType::getInt8Ty(CurrF->getContext()) &&
726 return getGEPTypeLogical(Ref);
727 }
728
729 Type *Ty = nullptr;
730 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
731 // useful here
732 if (isNestedPointer(Ref->getSourceElementType())) {
733 Ty = Ref->getSourceElementType();
734 for (Use &U : drop_begin(Ref->indices()))
735 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
736 } else {
737 Ty = Ref->getResultElementType();
738 }
739 return Ty;
740}
741
742Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
743 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
744 bool IgnoreKnownType) {
745 // allow to pass nullptr as an argument
746 if (!I)
747 return nullptr;
748
749 // maybe already known
750 if (!IgnoreKnownType)
751 if (Type *KnownTy = GR->findDeducedElementType(I))
752 return KnownTy;
753
754 // maybe a cycle
755 if (!Visited.insert(I).second)
756 return nullptr;
757
758 // fallback value in case when we fail to deduce a type
759 Type *Ty = nullptr;
760 // look for known basic patterns of type inference
761 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
762 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
763 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
764 Ty = getGEPType(Ref);
765 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
766 Value *Op = Ref->getPointerOperand();
767 Type *KnownTy = GR->findDeducedElementType(Op);
768 if (!KnownTy)
769 KnownTy = Op->getType();
770 if (Type *ElemTy = getPointeeType(KnownTy))
771 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
772 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
773 if (auto *Fn = dyn_cast<Function>(Ref)) {
774 Ty = SPIRV::getOriginalFunctionType(*Fn);
775 GR->addDeducedElementType(I, Ty);
776 } else {
777 Ty = deduceElementTypeByValueDeep(
778 Ref->getValueType(),
779 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
780 UnknownElemTypeI8);
781 }
782 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
783 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
784 UnknownElemTypeI8);
785 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
786 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
787 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
788 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
789 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
790 isPointerTy(Src) && isPointerTy(Dest))
791 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
792 UnknownElemTypeI8);
793 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
794 Value *Op = Ref->getNewValOperand();
795 if (isPointerTy(Op->getType()))
796 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
797 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
798 Value *Op = Ref->getValOperand();
799 if (isPointerTy(Op->getType()))
800 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
801 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
802 Type *BestTy = nullptr;
803 unsigned MaxN = 1;
804 DenseMap<Type *, unsigned> PhiTys;
805 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
806 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
807 UnknownElemTypeI8);
808 if (!Ty)
809 continue;
810 auto It = PhiTys.try_emplace(Ty, 1);
811 if (!It.second) {
812 ++It.first->second;
813 if (It.first->second > MaxN) {
814 MaxN = It.first->second;
815 BestTy = Ty;
816 }
817 }
818 }
819 if (BestTy)
820 Ty = BestTy;
821 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
822 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
823 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
824 if (Ty)
825 break;
826 }
827 } else if (auto *CI = dyn_cast<CallInst>(I)) {
828 static StringMap<unsigned> ResTypeByArg = {
829 {"to_global", 0},
830 {"to_local", 0},
831 {"to_private", 0},
832 {"__spirv_GenericCastToPtr_ToGlobal", 0},
833 {"__spirv_GenericCastToPtr_ToLocal", 0},
834 {"__spirv_GenericCastToPtr_ToPrivate", 0},
835 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
836 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
837 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
838 // TODO: maybe improve performance by caching demangled names
839
841 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
842 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
843 if (HandleType->getTargetExtName() == "spirv.Image" ||
844 HandleType->getTargetExtName() == "spirv.SignedImage") {
845 for (User *U : II->users()) {
846 Ty = cast<Instruction>(U)->getAccessType();
847 if (Ty)
848 break;
849 }
850 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
851 // This call is supposed to index into an array
852 Ty = HandleType->getTypeParameter(0);
853 if (Ty->isArrayTy())
854 Ty = Ty->getArrayElementType();
855 else {
856 assert(Ty && Ty->isStructTy());
857 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
858 Ty = cast<StructType>(Ty)->getElementType(Index);
859 }
861 } else {
862 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
863 }
864 } else if (II && II->getIntrinsicID() ==
865 Intrinsic::spv_generic_cast_to_ptr_explicit) {
866 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
867 UnknownElemTypeI8);
868 } else if (Function *CalledF = CI->getCalledFunction()) {
869 std::string DemangledName =
870 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
871 if (DemangledName.length() > 0)
872 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
873 auto AsArgIt = ResTypeByArg.find(DemangledName);
874 if (AsArgIt != ResTypeByArg.end())
875 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
876 Visited, UnknownElemTypeI8);
877 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
878 Ty = KnownRetTy;
879 }
880 }
881
882 // remember the found relationship
883 if (Ty && !IgnoreKnownType) {
884 // specify nested types if needed, otherwise return unchanged
886 }
887
888 return Ty;
889}
890
891// Re-create a type of the value if it has untyped pointer fields, also nested.
892// Return the original value type if no corrections of untyped pointer
893// information is found or needed.
894Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
895 bool UnknownElemTypeI8) {
896 std::unordered_set<Value *> Visited;
897 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
898}
899
900Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
901 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
902 bool UnknownElemTypeI8) {
903 if (!U)
904 return OrigTy;
905
906 // maybe already known
907 if (Type *KnownTy = GR->findDeducedCompositeType(U))
908 return KnownTy;
909
910 // maybe a cycle
911 if (!Visited.insert(U).second)
912 return OrigTy;
913
914 if (isa<StructType>(OrigTy)) {
916 bool Change = false;
917 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
918 Value *Op = U->getOperand(i);
919 assert(Op && "Operands should not be null.");
920 Type *OpTy = Op->getType();
921 Type *Ty = OpTy;
922 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
923 if (Type *NestedTy =
924 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
925 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
926 } else {
927 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
928 UnknownElemTypeI8);
929 }
930 Tys.push_back(Ty);
931 Change |= Ty != OpTy;
932 }
933 if (Change) {
934 Type *NewTy = StructType::create(Tys);
935 GR->addDeducedCompositeType(U, NewTy);
936 return NewTy;
937 }
938 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
939 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
940 Type *OpTy = ArrTy->getElementType();
941 Type *Ty = OpTy;
942 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
943 if (Type *NestedTy =
944 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
945 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
946 } else {
947 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
948 UnknownElemTypeI8);
949 }
950 if (Ty != OpTy) {
951 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
952 GR->addDeducedCompositeType(U, NewTy);
953 return NewTy;
954 }
955 }
956 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
957 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
958 Type *OpTy = VecTy->getElementType();
959 Type *Ty = OpTy;
960 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
961 if (Type *NestedTy =
962 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
963 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
964 } else {
965 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
966 UnknownElemTypeI8);
967 }
968 if (Ty != OpTy) {
969 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
971 return NewTy;
972 }
973 }
974 }
975
976 return OrigTy;
977}
978
979Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
980 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
981 return Ty;
982 if (!UnknownElemTypeI8)
983 return nullptr;
984 insertTodoType(I);
985 return IntegerType::getInt8Ty(I->getContext());
986}
987
989 Value *PointerOperand) {
990 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
991 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
992 return nullptr;
993 auto *PtrTy = dyn_cast<PointerType>(I->getType());
994 if (!PtrTy)
995 return I->getType();
996 if (Type *NestedTy = GR->findDeducedElementType(I))
997 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
998 return nullptr;
999}
1000
1001// Try to deduce element type for a call base. Returns false if this is an
1002// indirect function invocation, and true otherwise.
1003bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1004 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1005 Type *&KnownElemTy, bool &Incomplete) {
1006 Function *CalledF = CI->getCalledFunction();
1007 if (!CalledF)
1008 return false;
1009 std::string DemangledName =
1011 if (DemangledName.length() > 0 &&
1012 !StringRef(DemangledName).starts_with("llvm.")) {
1013 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1014 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1015 DemangledName, ST.getPreferredInstructionSet());
1016 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1017 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1018 Value *Op = CI->getArgOperand(i);
1019 if (!isPointerTy(Op->getType()))
1020 continue;
1021 ++PtrCnt;
1022 if (Type *ElemTy = GR->findDeducedElementType(Op))
1023 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1024 Ops.push_back(std::make_pair(Op, i));
1025 }
1026 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1027 if (CI->arg_size() == 0)
1028 return true;
1029 Value *Op = CI->getArgOperand(0);
1030 if (!isPointerTy(Op->getType()))
1031 return true;
1032 switch (Opcode) {
1033 case SPIRV::OpAtomicFAddEXT:
1034 case SPIRV::OpAtomicFMinEXT:
1035 case SPIRV::OpAtomicFMaxEXT:
1036 case SPIRV::OpAtomicLoad:
1037 case SPIRV::OpAtomicCompareExchangeWeak:
1038 case SPIRV::OpAtomicCompareExchange:
1039 case SPIRV::OpAtomicExchange:
1040 case SPIRV::OpAtomicIAdd:
1041 case SPIRV::OpAtomicISub:
1042 case SPIRV::OpAtomicOr:
1043 case SPIRV::OpAtomicXor:
1044 case SPIRV::OpAtomicAnd:
1045 case SPIRV::OpAtomicUMin:
1046 case SPIRV::OpAtomicUMax:
1047 case SPIRV::OpAtomicSMin:
1048 case SPIRV::OpAtomicSMax: {
1049 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1050 : CI->getType();
1051 if (!KnownElemTy)
1052 return true;
1053 Incomplete = isTodoType(Op);
1054 Ops.push_back(std::make_pair(Op, 0));
1055 } break;
1056 case SPIRV::OpAtomicStore: {
1057 if (CI->arg_size() < 4)
1058 return true;
1059 Value *ValOp = CI->getArgOperand(3);
1060 KnownElemTy = isPointerTy(ValOp->getType())
1061 ? getAtomicElemTy(GR, CI, Op)
1062 : ValOp->getType();
1063 if (!KnownElemTy)
1064 return true;
1065 Incomplete = isTodoType(Op);
1066 Ops.push_back(std::make_pair(Op, 0));
1067 } break;
1068 }
1069 }
1070 }
1071 return true;
1072}
1073
1074// Try to deduce element type for a function pointer.
1075void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1076 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1077 Type *&KnownElemTy, bool IsPostprocessing) {
1078 Value *Op = CI->getCalledOperand();
1079 if (!Op || !isPointerTy(Op->getType()))
1080 return;
1081 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1082 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1083 bool IsNewFTy = false, IsIncomplete = false;
1085 for (auto &&[ParmIdx, Arg] : llvm::enumerate(CI->args())) {
1086 Type *ArgTy = Arg->getType();
1087 if (ArgTy->isPointerTy()) {
1088 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1089 IsNewFTy = true;
1090 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1091 if (isTodoType(Arg))
1092 IsIncomplete = true;
1093 } else {
1094 IsIncomplete = true;
1095 }
1096 } else {
1097 ArgTy = FTy->getFunctionParamType(ParmIdx);
1098 }
1099 ArgTys.push_back(ArgTy);
1100 }
1101 Type *RetTy = FTy->getReturnType();
1102 if (CI->getType()->isPointerTy()) {
1103 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1104 IsNewFTy = true;
1105 RetTy =
1107 if (isTodoType(CI))
1108 IsIncomplete = true;
1109 } else {
1110 IsIncomplete = true;
1111 }
1112 }
1113 if (!IsPostprocessing && IsIncomplete)
1114 insertTodoType(Op);
1115 KnownElemTy =
1116 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1117}
1118
1119bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1120 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1121 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1122 Type *&KnownElemTy, Value *Op, Function *F) {
1123 KnownElemTy = GR->findDeducedElementType(F);
1124 if (KnownElemTy)
1125 return false;
1126 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1127 OpElemTy = normalizeType(OpElemTy);
1128 GR->addDeducedElementType(F, OpElemTy);
1129 GR->addReturnType(
1130 F, TypedPointerType::get(OpElemTy,
1131 getPointerAddressSpace(F->getReturnType())));
1132 // non-recursive update of types in function uses
1133 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1134 for (User *U : F->users()) {
1135 CallInst *CI = dyn_cast<CallInst>(U);
1136 if (!CI || CI->getCalledFunction() != F)
1137 continue;
1138 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1139 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1140 GR->updateAssignType(AssignCI, CI,
1141 getNormalizedPoisonValue(OpElemTy));
1142 propagateElemType(CI, PrevElemTy, VisitedSubst);
1143 }
1144 }
1145 }
1146 // Non-recursive update of types in the function uncomplete returns.
1147 // This may happen just once per a function, the latch is a pair of
1148 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1149 // With or without the latch it is a non-recursive call due to
1150 // IncompleteRets set to nullptr in this call.
1151 if (IncompleteRets)
1152 for (Instruction *IncompleteRetI : *IncompleteRets)
1153 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1154 IsPostprocessing);
1155 } else if (IncompleteRets) {
1156 IncompleteRets->insert(I);
1157 }
1158 TypeValidated.insert(I);
1159 return true;
1160}
1161
1162// If the Instruction has Pointer operands with unresolved types, this function
1163// tries to deduce them. If the Instruction has Pointer operands with known
1164// types which differ from expected, this function tries to insert a bitcast to
1165// resolve the issue.
1166void SPIRVEmitIntrinsics::deduceOperandElementType(
1167 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1168 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1170 Type *KnownElemTy = nullptr;
1171 bool Incomplete = false;
1172 // look for known basic patterns of type inference
1173 if (auto *Ref = dyn_cast<PHINode>(I)) {
1174 if (!isPointerTy(I->getType()) ||
1175 !(KnownElemTy = GR->findDeducedElementType(I)))
1176 return;
1177 Incomplete = isTodoType(I);
1178 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1179 Value *Op = Ref->getIncomingValue(i);
1180 if (isPointerTy(Op->getType()))
1181 Ops.push_back(std::make_pair(Op, i));
1182 }
1183 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1184 KnownElemTy = GR->findDeducedElementType(I);
1185 if (!KnownElemTy)
1186 return;
1187 Incomplete = isTodoType(I);
1188 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1189 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1190 if (!isPointerTy(I->getType()))
1191 return;
1192 KnownElemTy = GR->findDeducedElementType(I);
1193 if (!KnownElemTy)
1194 return;
1195 Incomplete = isTodoType(I);
1196 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1197 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1198 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1199 return;
1200 KnownElemTy = Ref->getSourceElementType();
1201 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1203 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1204 KnownElemTy = I->getType();
1205 if (isUntypedPointerTy(KnownElemTy))
1206 return;
1207 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1208 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1209 return;
1210 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1212 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1213 if (!(KnownElemTy =
1214 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1215 return;
1216 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1217 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1218 return;
1219 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1221 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1222 KnownElemTy = isPointerTy(I->getType())
1223 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1224 : I->getType();
1225 if (!KnownElemTy)
1226 return;
1227 Incomplete = isTodoType(Ref->getPointerOperand());
1228 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1230 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1231 KnownElemTy = isPointerTy(I->getType())
1232 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1233 : I->getType();
1234 if (!KnownElemTy)
1235 return;
1236 Incomplete = isTodoType(Ref->getPointerOperand());
1237 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1239 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1240 if (!isPointerTy(I->getType()) ||
1241 !(KnownElemTy = GR->findDeducedElementType(I)))
1242 return;
1243 Incomplete = isTodoType(I);
1244 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1245 Value *Op = Ref->getOperand(i);
1246 if (isPointerTy(Op->getType()))
1247 Ops.push_back(std::make_pair(Op, i));
1248 }
1249 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1250 if (!isPointerTy(CurrF->getReturnType()))
1251 return;
1252 Value *Op = Ref->getReturnValue();
1253 if (!Op)
1254 return;
1255 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1256 IsPostprocessing, KnownElemTy, Op,
1257 CurrF))
1258 return;
1259 Incomplete = isTodoType(CurrF);
1260 Ops.push_back(std::make_pair(Op, 0));
1261 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1262 if (!isPointerTy(Ref->getOperand(0)->getType()))
1263 return;
1264 Value *Op0 = Ref->getOperand(0);
1265 Value *Op1 = Ref->getOperand(1);
1266 bool Incomplete0 = isTodoType(Op0);
1267 bool Incomplete1 = isTodoType(Op1);
1268 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1269 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1270 ? nullptr
1271 : GR->findDeducedElementType(Op0);
1272 if (ElemTy0) {
1273 KnownElemTy = ElemTy0;
1274 Incomplete = Incomplete0;
1275 Ops.push_back(std::make_pair(Op1, 1));
1276 } else if (ElemTy1) {
1277 KnownElemTy = ElemTy1;
1278 Incomplete = Incomplete1;
1279 Ops.push_back(std::make_pair(Op0, 0));
1280 }
1281 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1282 if (!CI->isIndirectCall())
1283 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1284 else if (HaveFunPtrs)
1285 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1286 IsPostprocessing);
1287 }
1288
1289 // There is no enough info to deduce types or all is valid.
1290 if (!KnownElemTy || Ops.size() == 0)
1291 return;
1292
1293 LLVMContext &Ctx = CurrF->getContext();
1294 IRBuilder<> B(Ctx);
1295 for (auto &OpIt : Ops) {
1296 Value *Op = OpIt.first;
1297 if (AskOps && !AskOps->contains(Op))
1298 continue;
1299 Type *AskTy = nullptr;
1300 CallInst *AskCI = nullptr;
1301 if (IsPostprocessing && AskOps) {
1302 AskTy = GR->findDeducedElementType(Op);
1303 AskCI = GR->findAssignPtrTypeInstr(Op);
1304 assert(AskTy && AskCI);
1305 }
1306 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1307 if (Ty == KnownElemTy)
1308 continue;
1309 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1310 Type *OpTy = Op->getType();
1311 if (Op->hasUseList() &&
1312 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1313 Type *PrevElemTy = GR->findDeducedElementType(Op);
1314 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1315 // check if KnownElemTy is complete
1316 if (!Incomplete)
1317 eraseTodoType(Op);
1318 else if (!IsPostprocessing)
1319 insertTodoType(Op);
1320 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1321 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1322 if (AssignCI == nullptr) {
1323 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1324 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1325 CallInst *CI =
1326 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1327 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1328 GR->addAssignPtrTypeInstr(Op, CI);
1329 } else {
1330 GR->updateAssignType(AssignCI, Op, OpTyVal);
1331 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1332 std::make_pair(I, Op)};
1333 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1334 }
1335 } else {
1336 eraseTodoType(Op);
1337 CallInst *PtrCastI =
1338 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1339 if (OpIt.second == std::numeric_limits<unsigned>::max())
1340 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1341 else
1342 I->setOperand(OpIt.second, PtrCastI);
1343 }
1344 }
1345 TypeValidated.insert(I);
1346}
1347
1348void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1349 Instruction *New,
1350 IRBuilder<> &B) {
1351 while (!Old->user_empty()) {
1352 auto *U = Old->user_back();
1353 if (isAssignTypeInstr(U)) {
1354 B.SetInsertPoint(U);
1355 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1356 CallInst *AssignCI =
1357 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1358 GR->addAssignPtrTypeInstr(New, AssignCI);
1359 U->eraseFromParent();
1360 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1361 isa<CallInst>(U)) {
1362 U->replaceUsesOfWith(Old, New);
1363 } else {
1364 llvm_unreachable("illegal aggregate intrinsic user");
1365 }
1366 }
1367 New->copyMetadata(*Old);
1368 Old->eraseFromParent();
1369}
1370
1371void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1372 std::queue<Instruction *> Worklist;
1373 for (auto &I : instructions(CurrF))
1374 Worklist.push(&I);
1375
1376 while (!Worklist.empty()) {
1377 Instruction *I = Worklist.front();
1378 bool BPrepared = false;
1379 Worklist.pop();
1380
1381 for (auto &Op : I->operands()) {
1382 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1383 if (!AggrUndef || !Op->getType()->isAggregateType())
1384 continue;
1385
1386 if (!BPrepared) {
1388 BPrepared = true;
1389 }
1390 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1391 Worklist.push(IntrUndef);
1392 I->replaceUsesOfWith(Op, IntrUndef);
1393 AggrConsts[IntrUndef] = AggrUndef;
1394 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1395 }
1396 }
1397}
1398
1399void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1400 std::queue<Instruction *> Worklist;
1401 for (auto &I : instructions(CurrF))
1402 Worklist.push(&I);
1403
1404 while (!Worklist.empty()) {
1405 auto *I = Worklist.front();
1406 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1407 assert(I);
1408 bool KeepInst = false;
1409 for (const auto &Op : I->operands()) {
1410 Constant *AggrConst = nullptr;
1411 Type *ResTy = nullptr;
1412 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1413 AggrConst = COp;
1414 ResTy = COp->getType();
1415 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1416 AggrConst = COp;
1417 ResTy = B.getInt32Ty();
1418 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1419 AggrConst = COp;
1420 ResTy = B.getInt32Ty();
1421 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1422 AggrConst = COp;
1423 ResTy = B.getInt32Ty();
1424 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1425 AggrConst = COp;
1426 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1427 }
1428 if (AggrConst) {
1430 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1431 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1432 Args.push_back(COp->getElementAsConstant(i));
1433 else
1434 llvm::append_range(Args, AggrConst->operands());
1435 if (!BPrepared) {
1436 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1437 : B.SetInsertPoint(I);
1438 BPrepared = true;
1439 }
1440 auto *CI =
1441 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1442 Worklist.push(CI);
1443 I->replaceUsesOfWith(Op, CI);
1444 KeepInst = true;
1445 AggrConsts[CI] = AggrConst;
1446 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1447 }
1448 }
1449 if (!KeepInst)
1450 Worklist.pop();
1451 }
1452}
1453
1455 IRBuilder<> &B) {
1456 LLVMContext &Ctx = I->getContext();
1458 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1459 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1460}
1461
1463 unsigned RoundingModeDeco,
1464 IRBuilder<> &B) {
1465 LLVMContext &Ctx = I->getContext();
1467 MDNode *RoundingModeNode = MDNode::get(
1468 Ctx,
1470 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1471 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1472 createDecorationIntrinsic(I, RoundingModeNode, B);
1473}
1474
1476 IRBuilder<> &B) {
1477 LLVMContext &Ctx = I->getContext();
1479 MDNode *SaturatedConversionNode =
1480 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1481 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1482 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1483}
1484
1486 if (auto *CI = dyn_cast<CallInst>(I)) {
1487 if (Function *Fu = CI->getCalledFunction()) {
1488 if (Fu->isIntrinsic()) {
1489 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1490 switch (IntrinsicId) {
1491 case Intrinsic::fptosi_sat:
1492 case Intrinsic::fptoui_sat:
1494 break;
1495 default:
1496 break;
1497 }
1498 }
1499 }
1500 }
1501}
1502
1503Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1504 if (!Call.isInlineAsm())
1505 return &Call;
1506
1507 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1508 LLVMContext &Ctx = CurrF->getContext();
1509
1510 Constant *TyC = UndefValue::get(IA->getFunctionType());
1511 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1513 buildMD(TyC),
1514 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1515 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1516 Args.push_back(Call.getArgOperand(OpIdx));
1517
1519 B.SetInsertPoint(&Call);
1520 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1521 return &Call;
1522}
1523
1524// Use a tip about rounding mode to create a decoration.
1525void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1526 IRBuilder<> &B) {
1527 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1528 if (!RM.has_value())
1529 return;
1530 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1531 switch (RM.value()) {
1532 default:
1533 // ignore unknown rounding modes
1534 break;
1535 case RoundingMode::NearestTiesToEven:
1536 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1537 break;
1538 case RoundingMode::TowardNegative:
1539 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1540 break;
1541 case RoundingMode::TowardPositive:
1542 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1543 break;
1544 case RoundingMode::TowardZero:
1545 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1546 break;
1547 case RoundingMode::Dynamic:
1548 case RoundingMode::NearestTiesToAway:
1549 // TODO: check if supported
1550 break;
1551 }
1552 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1553 return;
1554 // Convert the tip about rounding mode into a decoration record.
1555 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1556}
1557
1558Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1559 BasicBlock *ParentBB = I.getParent();
1560 IRBuilder<> B(ParentBB);
1561 B.SetInsertPoint(&I);
1564 for (auto &Op : I.operands()) {
1565 if (Op.get()->getType()->isSized()) {
1566 Args.push_back(Op);
1567 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op.get())) {
1568 BBCases.push_back(BB);
1569 Args.push_back(BlockAddress::get(BB->getParent(), BB));
1570 } else {
1571 report_fatal_error("Unexpected switch operand");
1572 }
1573 }
1574 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1575 {I.getOperand(0)->getType()}, {Args});
1576 // remove switch to avoid its unneeded and undesirable unwrap into branches
1577 // and conditions
1578 replaceAllUsesWith(&I, NewI);
1579 I.eraseFromParent();
1580 // insert artificial and temporary instruction to preserve valid CFG,
1581 // it will be removed after IR translation pass
1582 B.SetInsertPoint(ParentBB);
1583 IndirectBrInst *BrI = B.CreateIndirectBr(
1584 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1585 BBCases.size());
1586 for (BasicBlock *BBCase : BBCases)
1587 BrI->addDestination(BBCase);
1588 return BrI;
1589}
1590
1592 if (GEP->getNumIndices() == 0)
1593 return false;
1594 if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1595 return CI->getZExtValue() == 0;
1596 }
1597 return false;
1598}
1599
1600Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1601 IRBuilder<> B(I.getParent());
1602 B.SetInsertPoint(&I);
1603
1605 // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
1606 // index of the GEP is not 0, then we need to try to adjust it.
1607 //
1608 // If the GEP is doing byte addressing, try to rebuild the full access chain
1609 // from the type of the pointer.
1610 if (I.getSourceElementType() ==
1611 IntegerType::getInt8Ty(CurrF->getContext())) {
1612 return buildLogicalAccessChainFromGEP(I);
1613 }
1614
1615 // Look for the array-to-pointer decay. If this is the pattern
1616 // we can adjust the types, and prepend a 0 to the indices.
1617 Value *PtrOp = I.getPointerOperand();
1618 Type *SrcElemTy = I.getSourceElementType();
1619 Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
1620
1621 if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
1622 if (ArrTy->getElementType() == SrcElemTy) {
1623 SmallVector<Value *> NewIndices;
1624 Type *FirstIdxType = I.getOperand(1)->getType();
1625 NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
1626 for (Value *Idx : I.indices())
1627 NewIndices.push_back(Idx);
1628
1629 SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
1631 Args.push_back(B.getInt1(I.isInBounds()));
1632 Args.push_back(I.getPointerOperand());
1633 Args.append(NewIndices.begin(), NewIndices.end());
1634
1635 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1636 replaceAllUsesWithAndErase(B, &I, NewI);
1637 return NewI;
1638 }
1639 }
1640 }
1641
1642 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1644 Args.push_back(B.getInt1(I.isInBounds()));
1645 llvm::append_range(Args, I.operands());
1646 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1647 replaceAllUsesWithAndErase(B, &I, NewI);
1648 return NewI;
1649}
1650
1651Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1652 IRBuilder<> B(I.getParent());
1653 B.SetInsertPoint(&I);
1654 Value *Source = I.getOperand(0);
1655
1656 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1657 // varying element types. In case of IR coming from older versions of LLVM
1658 // such bitcasts do not provide sufficient information, should be just skipped
1659 // here, and handled in insertPtrCastOrAssignTypeInstr.
1660 if (isPointerTy(I.getType())) {
1661 replaceAllUsesWith(&I, Source);
1662 I.eraseFromParent();
1663 return nullptr;
1664 }
1665
1666 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1667 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1668 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1669 replaceAllUsesWithAndErase(B, &I, NewI);
1670 return NewI;
1671}
1672
1673void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1674 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1675 Type *VTy = V->getType();
1676
1677 // A couple of sanity checks.
1678 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1679 if (Type *ElemTy = getPointeeType(VTy))
1680 if (ElemTy != AssignedType)
1681 report_fatal_error("Unexpected pointer element type!");
1682
1683 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1684 if (!AssignCI) {
1685 GR->buildAssignType(B, AssignedType, V);
1686 return;
1687 }
1688
1689 Type *CurrentType =
1691 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1692 ->getType();
1693 if (CurrentType == AssignedType)
1694 return;
1695
1696 // Builtin types cannot be redeclared or casted.
1697 if (CurrentType->isTargetExtTy())
1698 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1699 "/" + AssignedType->getTargetExtName() +
1700 " for value " + V->getName(),
1701 false);
1702
1703 // Our previous guess about the type seems to be wrong, let's update
1704 // inferred type according to a new, more precise type information.
1705 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1706}
1707
1708void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1709 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1710 unsigned OperandToReplace, IRBuilder<> &B) {
1711 TypeValidated.insert(I);
1712
1713 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1714 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1715 if (PointerElemTy == ExpectedElementType ||
1716 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1717 return;
1718
1720 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1721 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1722 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1723 bool FirstPtrCastOrAssignPtrType = true;
1724
1725 // Do not emit new spv_ptrcast if equivalent one already exists or when
1726 // spv_assign_ptr_type already targets this pointer with the same element
1727 // type.
1728 if (Pointer->hasUseList()) {
1729 for (auto User : Pointer->users()) {
1730 auto *II = dyn_cast<IntrinsicInst>(User);
1731 if (!II ||
1732 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1733 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1734 II->getOperand(0) != Pointer)
1735 continue;
1736
1737 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1738 // pointer.
1739 FirstPtrCastOrAssignPtrType = false;
1740 if (II->getOperand(1) != VMD ||
1741 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1743 continue;
1744
1745 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1746 // same element type and address space.
1747 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1748 return;
1749
1750 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1751 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1752 if (II->getParent() != I->getParent())
1753 continue;
1754
1755 I->setOperand(OperandToReplace, II);
1756 return;
1757 }
1758 }
1759
1760 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1761 if (FirstPtrCastOrAssignPtrType) {
1762 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1763 // emit spv_assign_ptr_type instead.
1764 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1765 return;
1766 } else if (isTodoType(Pointer)) {
1767 eraseTodoType(Pointer);
1768 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1769 // If this wouldn't be the first spv_ptrcast but existing type info is
1770 // uncomplete, update spv_assign_ptr_type arguments.
1771 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1772 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1773 assert(PrevElemTy);
1774 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1775 std::make_pair(I, Pointer)};
1776 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1777 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1778 } else {
1779 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1780 }
1781 return;
1782 }
1783 }
1784 }
1785
1786 // Emit spv_ptrcast
1787 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1788 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1789 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1790 I->setOperand(OperandToReplace, PtrCastI);
1791 // We need to set up a pointee type for the newly created spv_ptrcast.
1792 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1793}
1794
1795void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1796 IRBuilder<> &B) {
1797 // Handle basic instructions:
1798 StoreInst *SI = dyn_cast<StoreInst>(I);
1799 if (IsKernelArgInt8(CurrF, SI)) {
1800 replacePointerOperandWithPtrCast(
1801 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1802 0, B);
1803 }
1804 if (SI) {
1805 Value *Op = SI->getValueOperand();
1806 Value *Pointer = SI->getPointerOperand();
1807 Type *OpTy = Op->getType();
1808 if (auto *OpI = dyn_cast<Instruction>(Op))
1809 OpTy = restoreMutatedType(GR, OpI, OpTy);
1810 if (OpTy == Op->getType())
1811 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1812 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1813 return;
1814 }
1815 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1816 Value *Pointer = LI->getPointerOperand();
1817 Type *OpTy = LI->getType();
1818 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1819 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1820 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1821 } else {
1822 Type *NewOpTy = OpTy;
1823 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1824 if (OpTy == NewOpTy)
1825 insertTodoType(Pointer);
1826 }
1827 }
1828 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1829 return;
1830 }
1831 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1832 Value *Pointer = GEPI->getPointerOperand();
1833 Type *OpTy = nullptr;
1834
1835 // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
1836 // the first index is 0, then we can trivially lower to OpAccessChain. If
1837 // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
1838 // this time, and will rewrite the GEP when visiting it.
1839 if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
1840 return;
1841 }
1842
1843 // In all cases, fall back to the GEP type if type scavenging failed.
1844 if (!OpTy)
1845 OpTy = GEPI->getSourceElementType();
1846
1847 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1848 if (isNestedPointer(OpTy))
1849 insertTodoType(Pointer);
1850 return;
1851 }
1852
1853 // TODO: review and merge with existing logics:
1854 // Handle calls to builtins (non-intrinsics):
1855 CallInst *CI = dyn_cast<CallInst>(I);
1856 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1858 return;
1859
1860 // collect information about formal parameter types
1861 std::string DemangledName =
1863 Function *CalledF = CI->getCalledFunction();
1864 SmallVector<Type *, 4> CalledArgTys;
1865 bool HaveTypes = false;
1866 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1867 Argument *CalledArg = CalledF->getArg(OpIdx);
1868 Type *ArgType = CalledArg->getType();
1869 if (!isPointerTy(ArgType)) {
1870 CalledArgTys.push_back(nullptr);
1871 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1872 CalledArgTys.push_back(ArgTypeElem);
1873 HaveTypes = true;
1874 } else {
1875 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1876 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1877 ElemTy = getPointeeTypeByAttr(CalledArg);
1878 if (!ElemTy) {
1879 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1880 if (ElemTy) {
1881 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1882 } else {
1883 for (User *U : CalledArg->users()) {
1884 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1885 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1886 break;
1887 }
1888 }
1889 }
1890 }
1891 HaveTypes |= ElemTy != nullptr;
1892 CalledArgTys.push_back(ElemTy);
1893 }
1894 }
1895
1896 if (DemangledName.empty() && !HaveTypes)
1897 return;
1898
1899 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1900 Value *ArgOperand = CI->getArgOperand(OpIdx);
1901 if (!isPointerTy(ArgOperand->getType()))
1902 continue;
1903
1904 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1905 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1906 // However, we may have assumptions about the formal argument's type and
1907 // may have a need to insert a ptr cast for the actual parameter of this
1908 // call.
1909 Argument *CalledArg = CalledF->getArg(OpIdx);
1910 if (!GR->findDeducedElementType(CalledArg))
1911 continue;
1912 }
1913
1914 Type *ExpectedType =
1915 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1916 if (!ExpectedType && !DemangledName.empty())
1917 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1918 DemangledName, OpIdx, I->getContext());
1919 if (!ExpectedType || ExpectedType->isVoidTy())
1920 continue;
1921
1922 if (ExpectedType->isTargetExtTy() &&
1924 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1925 ArgOperand, B);
1926 else
1927 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1928 }
1929}
1930
1931Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1932 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1933 // type in LLT and IRTranslator will replace it by the scalar.
1934 if (isVector1(I.getType()))
1935 return &I;
1936
1937 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1938 I.getOperand(1)->getType(),
1939 I.getOperand(2)->getType()};
1940 IRBuilder<> B(I.getParent());
1941 B.SetInsertPoint(&I);
1942 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1943 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1944 replaceAllUsesWithAndErase(B, &I, NewI);
1945 return NewI;
1946}
1947
1949SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1950 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1951 // type in LLT and IRTranslator will replace it by the scalar.
1952 if (isVector1(I.getVectorOperandType()))
1953 return &I;
1954
1955 IRBuilder<> B(I.getParent());
1956 B.SetInsertPoint(&I);
1957 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1958 I.getIndexOperand()->getType()};
1959 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1960 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1961 replaceAllUsesWithAndErase(B, &I, NewI);
1962 return NewI;
1963}
1964
1965Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1966 IRBuilder<> B(I.getParent());
1967 B.SetInsertPoint(&I);
1968 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1970 Value *AggregateOp = I.getAggregateOperand();
1971 if (isa<UndefValue>(AggregateOp))
1972 Args.push_back(UndefValue::get(B.getInt32Ty()));
1973 else
1974 Args.push_back(AggregateOp);
1975 Args.push_back(I.getInsertedValueOperand());
1976 for (auto &Op : I.indices())
1977 Args.push_back(B.getInt32(Op));
1978 Instruction *NewI =
1979 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1980 replaceMemInstrUses(&I, NewI, B);
1981 return NewI;
1982}
1983
1984Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1985 if (I.getAggregateOperand()->getType()->isAggregateType())
1986 return &I;
1987 IRBuilder<> B(I.getParent());
1988 B.SetInsertPoint(&I);
1989 SmallVector<Value *> Args(I.operands());
1990 for (auto &Op : I.indices())
1991 Args.push_back(B.getInt32(Op));
1992 auto *NewI =
1993 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1994 replaceAllUsesWithAndErase(B, &I, NewI);
1995 return NewI;
1996}
1997
1998Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1999 if (!I.getType()->isAggregateType())
2000 return &I;
2001 IRBuilder<> B(I.getParent());
2002 B.SetInsertPoint(&I);
2003 TrackConstants = false;
2004 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2006 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
2007 auto *NewI =
2008 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
2009 {I.getPointerOperand(), B.getInt16(Flags),
2010 B.getInt8(I.getAlign().value())});
2011 replaceMemInstrUses(&I, NewI, B);
2012 return NewI;
2013}
2014
2015Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
2016 if (!AggrStores.contains(&I))
2017 return &I;
2018 IRBuilder<> B(I.getParent());
2019 B.SetInsertPoint(&I);
2020 TrackConstants = false;
2021 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2023 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
2024 auto *PtrOp = I.getPointerOperand();
2025 auto *NewI = B.CreateIntrinsic(
2026 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
2027 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
2028 B.getInt8(I.getAlign().value())});
2029 NewI->copyMetadata(I);
2030 I.eraseFromParent();
2031 return NewI;
2032}
2033
2034Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
2035 Value *ArraySize = nullptr;
2036 if (I.isArrayAllocation()) {
2037 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
2038 if (!STI->canUseExtension(
2039 SPIRV::Extension::SPV_INTEL_variable_length_array))
2041 "array allocation: this instruction requires the following "
2042 "SPIR-V extension: SPV_INTEL_variable_length_array",
2043 false);
2044 ArraySize = I.getArraySize();
2045 }
2046 IRBuilder<> B(I.getParent());
2047 B.SetInsertPoint(&I);
2048 TrackConstants = false;
2049 Type *PtrTy = I.getType();
2050 auto *NewI =
2051 ArraySize
2052 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2053 {PtrTy, ArraySize->getType()},
2054 {ArraySize, B.getInt8(I.getAlign().value())})
2055 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2056 {B.getInt8(I.getAlign().value())});
2057 replaceAllUsesWithAndErase(B, &I, NewI);
2058 return NewI;
2059}
2060
2061Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2062 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2063 IRBuilder<> B(I.getParent());
2064 B.SetInsertPoint(&I);
2065 SmallVector<Value *> Args(I.operands());
2066 Args.push_back(B.getInt32(
2067 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2068 Args.push_back(B.getInt32(
2069 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2070 Args.push_back(B.getInt32(
2071 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2072 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2073 {I.getPointerOperand()->getType()}, {Args});
2074 replaceMemInstrUses(&I, NewI, B);
2075 return NewI;
2076}
2077
2078Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2079 IRBuilder<> B(I.getParent());
2080 B.SetInsertPoint(&I);
2081 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2082 return &I;
2083}
2084
2085void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2086 IRBuilder<> &B) {
2087 // Skip special artificial variables.
2088 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2089 "llvm.compiler.used"};
2090
2091 if (ArtificialGlobals.contains(GV.getName()))
2092 return;
2093
2094 Constant *Init = nullptr;
2095 if (hasInitializer(&GV)) {
2096 // Deduce element type and store results in Global Registry.
2097 // Result is ignored, because TypedPointerType is not supported
2098 // by llvm IR general logic.
2099 deduceElementTypeHelper(&GV, false);
2100 Init = GV.getInitializer();
2101 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2102 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2103 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2104 {GV.getType(), Ty}, {&GV, Const});
2105 InitInst->setArgOperand(1, Init);
2106 }
2107 if (!Init && GV.use_empty())
2108 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2109}
2110
2111// Return true, if we can't decide what is the pointee type now and will get
2112// back to the question later. Return false is spv_assign_ptr_type is not needed
2113// or can be inserted immediately.
2114bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2115 IRBuilder<> &B,
2116 bool UnknownElemTypeI8) {
2118 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2119 return false;
2120
2122 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2123 GR->buildAssignPtr(B, ElemTy, I);
2124 return false;
2125 }
2126 return true;
2127}
2128
2129void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2130 IRBuilder<> &B) {
2131 // TODO: extend the list of functions with known result types
2132 static StringMap<unsigned> ResTypeWellKnown = {
2133 {"async_work_group_copy", WellKnownTypes::Event},
2134 {"async_work_group_strided_copy", WellKnownTypes::Event},
2135 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2136
2138
2139 bool IsKnown = false;
2140 if (auto *CI = dyn_cast<CallInst>(I)) {
2141 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2142 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2143 Function *CalledF = CI->getCalledFunction();
2144 std::string DemangledName =
2146 FPDecorationId DecorationId = FPDecorationId::NONE;
2147 if (DemangledName.length() > 0)
2148 DemangledName =
2149 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2150 auto ResIt = ResTypeWellKnown.find(DemangledName);
2151 if (ResIt != ResTypeWellKnown.end()) {
2152 IsKnown = true;
2154 switch (ResIt->second) {
2155 case WellKnownTypes::Event:
2156 GR->buildAssignType(
2157 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2158 break;
2159 }
2160 }
2161 // check if a floating rounding mode or saturation info is present
2162 switch (DecorationId) {
2163 default:
2164 break;
2165 case FPDecorationId::SAT:
2167 break;
2168 case FPDecorationId::RTE:
2170 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2171 break;
2172 case FPDecorationId::RTZ:
2174 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2175 break;
2176 case FPDecorationId::RTP:
2178 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2179 break;
2180 case FPDecorationId::RTN:
2182 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2183 break;
2184 }
2185 }
2186 }
2187
2188 Type *Ty = I->getType();
2189 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2191 Type *TypeToAssign = Ty;
2192 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2193 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2194 II->getIntrinsicID() == Intrinsic::spv_undef) {
2195 auto It = AggrConstTypes.find(II);
2196 if (It == AggrConstTypes.end())
2197 report_fatal_error("Unknown composite intrinsic type");
2198 TypeToAssign = It->second;
2199 }
2200 }
2201 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2202 GR->buildAssignType(B, TypeToAssign, I);
2203 }
2204 for (const auto &Op : I->operands()) {
2206 // Check GetElementPtrConstantExpr case.
2208 (isa<GEPOperator>(Op) ||
2209 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2211 Type *OpTy = Op->getType();
2212 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2213 CallInst *AssignCI =
2214 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2215 UndefValue::get(B.getInt32Ty()), {}, B);
2216 GR->addAssignPtrTypeInstr(Op, AssignCI);
2217 } else if (!isa<Instruction>(Op)) {
2218 Type *OpTy = Op->getType();
2219 Type *OpTyElem = getPointeeType(OpTy);
2220 if (OpTyElem) {
2221 GR->buildAssignPtr(B, OpTyElem, Op);
2222 } else if (isPointerTy(OpTy)) {
2223 Type *ElemTy = GR->findDeducedElementType(Op);
2224 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2225 Op);
2226 } else {
2227 Value *OpTyVal = Op;
2228 if (OpTy->isTargetExtTy()) {
2229 // We need to do this in order to be consistent with how target ext
2230 // types are handled in `processInstrAfterVisit`
2231 OpTyVal = getNormalizedPoisonValue(OpTy);
2232 }
2233 CallInst *AssignCI =
2234 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2235 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2236 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2237 }
2238 }
2239 }
2240 }
2241}
2242
2243bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2244 Instruction *Inst) {
2245 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2246 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2247 return false;
2248 // Add aliasing decorations to internal load and store intrinsics
2249 // and atomic instructions, skipping atomic store as it won't have ID to
2250 // attach the decoration.
2251 CallInst *CI = dyn_cast<CallInst>(Inst);
2252 if (!CI)
2253 return false;
2254 if (Function *Fun = CI->getCalledFunction()) {
2255 if (Fun->isIntrinsic()) {
2256 switch (Fun->getIntrinsicID()) {
2257 case Intrinsic::spv_load:
2258 case Intrinsic::spv_store:
2259 return true;
2260 default:
2261 return false;
2262 }
2263 }
2265 const std::string Prefix = "__spirv_Atomic";
2266 const bool IsAtomic = Name.find(Prefix) == 0;
2267
2268 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2269 return true;
2270 }
2271 return false;
2272}
2273
2274void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2275 IRBuilder<> &B) {
2276 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2278 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2279 {I, MetadataAsValue::get(I->getContext(), MD)});
2280 }
2281 // Lower alias.scope/noalias metadata
2282 {
2283 auto processMemAliasingDecoration = [&](unsigned Kind) {
2284 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2285 if (shouldTryToAddMemAliasingDecoration(I)) {
2286 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2287 ? SPIRV::Decoration::AliasScopeINTEL
2288 : SPIRV::Decoration::NoAliasINTEL;
2290 I, ConstantInt::get(B.getInt32Ty(), Dec),
2291 MetadataAsValue::get(I->getContext(), AliasListMD)};
2293 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2294 {I->getType()}, {Args});
2295 }
2296 }
2297 };
2298 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2299 processMemAliasingDecoration(LLVMContext::MD_noalias);
2300 }
2301 // MD_fpmath
2302 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2303 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2304 bool AllowFPMaxError =
2305 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2306 if (!AllowFPMaxError)
2307 return;
2308
2310 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2311 {I->getType()},
2312 {I, MetadataAsValue::get(I->getContext(), MD)});
2313 }
2314}
2315
2317 const Module &M,
2319 &FPFastMathDefaultInfoMap,
2320 Function *F) {
2321 auto it = FPFastMathDefaultInfoMap.find(F);
2322 if (it != FPFastMathDefaultInfoMap.end())
2323 return it->second;
2324
2325 // If the map does not contain the entry, create a new one. Initialize it to
2326 // contain all 3 elements sorted by bit width of target type: {half, float,
2327 // double}.
2328 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2329 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2330 SPIRV::FPFastMathMode::None);
2331 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2332 SPIRV::FPFastMathMode::None);
2333 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2334 SPIRV::FPFastMathMode::None);
2335 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2336}
2337
2339 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2340 const Type *Ty) {
2341 size_t BitWidth = Ty->getScalarSizeInBits();
2342 int Index =
2344 BitWidth);
2345 assert(Index >= 0 && Index < 3 &&
2346 "Expected FPFastMathDefaultInfo for half, float, or double");
2347 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2348 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2349 return FPFastMathDefaultInfoVec[Index];
2350}
2351
2352void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2353 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2354 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2355 return;
2356
2357 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2358 // We need the entry point (function) as the key, and the target
2359 // type and flags as the value.
2360 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2361 // execution modes, as they are now deprecated and must be replaced
2362 // with FPFastMathDefaultInfo.
2363 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2364 if (!Node) {
2365 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2366 // This requires emitting ContractionOff. However, because
2367 // ContractionOff is now deprecated, we need to replace it with
2368 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2369 // We need to create the constant for that.
2370
2371 // Create constant instruction with the bitmask flags.
2372 Constant *InitValue =
2373 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2374 // TODO: Reuse constant if there is one already with the required
2375 // value.
2376 [[maybe_unused]] GlobalVariable *GV =
2377 new GlobalVariable(M, // Module
2378 Type::getInt32Ty(M.getContext()), // Type
2379 true, // isConstant
2381 InitValue // Initializer
2382 );
2383 }
2384 return;
2385 }
2386
2387 // The table maps function pointers to their default FP fast math info. It
2388 // can be assumed that the SmallVector is sorted by the bit width of the
2389 // type. The first element is the smallest bit width, and the last element
2390 // is the largest bit width, therefore, we will have {half, float, double}
2391 // in the order of their bit widths.
2392 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2393 FPFastMathDefaultInfoMap;
2394
2395 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2396 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2397 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2399 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2400 const auto EM =
2402 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2403 ->getZExtValue();
2404 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2405 assert(MDN->getNumOperands() == 4 &&
2406 "Expected 4 operands for FPFastMathDefault");
2407 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2408 unsigned Flags =
2410 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2411 ->getZExtValue();
2412 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2413 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2414 SPIRV::FPFastMathDefaultInfo &Info =
2415 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2416 Info.FastMathFlags = Flags;
2417 Info.FPFastMathDefault = true;
2418 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2419 assert(MDN->getNumOperands() == 2 &&
2420 "Expected no operands for ContractionOff");
2421
2422 // We need to save this info for every possible FP type, i.e. {half,
2423 // float, double, fp128}.
2424 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2425 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2426 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2427 Info.ContractionOff = true;
2428 }
2429 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2430 assert(MDN->getNumOperands() == 3 &&
2431 "Expected 1 operand for SignedZeroInfNanPreserve");
2432 unsigned TargetWidth =
2434 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2435 ->getZExtValue();
2436 // We need to save this info only for the FP type with TargetWidth.
2437 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2438 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2441 assert(Index >= 0 && Index < 3 &&
2442 "Expected FPFastMathDefaultInfo for half, float, or double");
2443 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2444 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2445 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2446 }
2447 }
2448
2449 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2450 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2451 if (FPFastMathDefaultInfoVec.empty())
2452 continue;
2453
2454 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2455 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2456 // Skip if none of the execution modes was used.
2457 unsigned Flags = Info.FastMathFlags;
2458 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2459 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2460 continue;
2461
2462 // Check if flags are compatible.
2463 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2464 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2465 "and AllowContract");
2466
2467 if (Info.SignedZeroInfNanPreserve &&
2468 !(Flags &
2469 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2470 SPIRV::FPFastMathMode::NSZ))) {
2471 if (Info.FPFastMathDefault)
2472 report_fatal_error("Conflicting FPFastMathFlags: "
2473 "SignedZeroInfNanPreserve but at least one of "
2474 "NotNaN/NotInf/NSZ is enabled.");
2475 }
2476
2477 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2478 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2479 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2480 report_fatal_error("Conflicting FPFastMathFlags: "
2481 "AllowTransform requires AllowReassoc and "
2482 "AllowContract to be set.");
2483 }
2484
2485 auto it = GlobalVars.find(Flags);
2486 GlobalVariable *GV = nullptr;
2487 if (it != GlobalVars.end()) {
2488 // Reuse existing global variable.
2489 GV = it->second;
2490 } else {
2491 // Create constant instruction with the bitmask flags.
2492 Constant *InitValue =
2493 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2494 // TODO: Reuse constant if there is one already with the required
2495 // value.
2496 GV = new GlobalVariable(M, // Module
2497 Type::getInt32Ty(M.getContext()), // Type
2498 true, // isConstant
2500 InitValue // Initializer
2501 );
2502 GlobalVars[Flags] = GV;
2503 }
2504 }
2505 }
2506}
2507
2508void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2509 IRBuilder<> &B) {
2510 auto *II = dyn_cast<IntrinsicInst>(I);
2511 bool IsConstComposite =
2512 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2513 if (IsConstComposite && TrackConstants) {
2515 auto t = AggrConsts.find(I);
2516 assert(t != AggrConsts.end());
2517 auto *NewOp =
2518 buildIntrWithMD(Intrinsic::spv_track_constant,
2519 {II->getType(), II->getType()}, t->second, I, {}, B);
2520 replaceAllUsesWith(I, NewOp, false);
2521 NewOp->setArgOperand(0, I);
2522 }
2523 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2524 for (const auto &Op : I->operands()) {
2525 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2527 continue;
2528 unsigned OpNo = Op.getOperandNo();
2529 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2530 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2531 continue;
2532
2533 if (!BPrepared) {
2534 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2535 : B.SetInsertPoint(I);
2536 BPrepared = true;
2537 }
2538 Type *OpTy = Op->getType();
2539 Type *OpElemTy = GR->findDeducedElementType(Op);
2540 Value *NewOp = Op;
2541 if (OpTy->isTargetExtTy()) {
2542 // Since this value is replaced by poison, we need to do the same in
2543 // `insertAssignTypeIntrs`.
2544 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2545 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2546 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2547 }
2548 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2549 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2550 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2551 SmallVector<Value *, 2> Args = {
2552 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2553 B.getInt32(getPointerAddressSpace(OpTy))};
2554 CallInst *PtrCasted =
2555 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2556 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2557 NewOp = PtrCasted;
2558 }
2559 if (NewOp != Op)
2560 I->setOperand(OpNo, NewOp);
2561 }
2562 if (Named.insert(I).second)
2563 emitAssignName(I, B);
2564}
2565
2566Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2567 unsigned OpIdx) {
2568 std::unordered_set<Function *> FVisited;
2569 return deduceFunParamElementType(F, OpIdx, FVisited);
2570}
2571
2572Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2573 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2574 // maybe a cycle
2575 if (!FVisited.insert(F).second)
2576 return nullptr;
2577
2578 std::unordered_set<Value *> Visited;
2580 // search in function's call sites
2581 for (User *U : F->users()) {
2582 CallInst *CI = dyn_cast<CallInst>(U);
2583 if (!CI || OpIdx >= CI->arg_size())
2584 continue;
2585 Value *OpArg = CI->getArgOperand(OpIdx);
2586 if (!isPointerTy(OpArg->getType()))
2587 continue;
2588 // maybe we already know operand's element type
2589 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2590 return KnownTy;
2591 // try to deduce from the operand itself
2592 Visited.clear();
2593 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2594 return Ty;
2595 // search in actual parameter's users
2596 for (User *OpU : OpArg->users()) {
2598 if (!Inst || Inst == CI)
2599 continue;
2600 Visited.clear();
2601 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2602 return Ty;
2603 }
2604 // check if it's a formal parameter of the outer function
2605 if (!CI->getParent() || !CI->getParent()->getParent())
2606 continue;
2607 Function *OuterF = CI->getParent()->getParent();
2608 if (FVisited.find(OuterF) != FVisited.end())
2609 continue;
2610 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2611 if (OuterF->getArg(i) == OpArg) {
2612 Lookup.push_back(std::make_pair(OuterF, i));
2613 break;
2614 }
2615 }
2616 }
2617
2618 // search in function parameters
2619 for (auto &Pair : Lookup) {
2620 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2621 return Ty;
2622 }
2623
2624 return nullptr;
2625}
2626
2627void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2628 IRBuilder<> &B) {
2629 B.SetInsertPointPastAllocas(F);
2630 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2631 Argument *Arg = F->getArg(OpIdx);
2632 if (!isUntypedPointerTy(Arg->getType()))
2633 continue;
2634 Type *ElemTy = GR->findDeducedElementType(Arg);
2635 if (ElemTy)
2636 continue;
2637 if (hasPointeeTypeAttr(Arg) &&
2638 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2639 GR->buildAssignPtr(B, ElemTy, Arg);
2640 continue;
2641 }
2642 // search in function's call sites
2643 for (User *U : F->users()) {
2644 CallInst *CI = dyn_cast<CallInst>(U);
2645 if (!CI || OpIdx >= CI->arg_size())
2646 continue;
2647 Value *OpArg = CI->getArgOperand(OpIdx);
2648 if (!isPointerTy(OpArg->getType()))
2649 continue;
2650 // maybe we already know operand's element type
2651 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2652 break;
2653 }
2654 if (ElemTy) {
2655 GR->buildAssignPtr(B, ElemTy, Arg);
2656 continue;
2657 }
2658 if (HaveFunPtrs) {
2659 for (User *U : Arg->users()) {
2660 CallInst *CI = dyn_cast<CallInst>(U);
2661 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2662 CI->getCalledOperand() == Arg &&
2663 CI->getParent()->getParent() == CurrF) {
2665 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2666 if (ElemTy) {
2667 GR->buildAssignPtr(B, ElemTy, Arg);
2668 break;
2669 }
2670 }
2671 }
2672 }
2673 }
2674}
2675
2676void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2677 B.SetInsertPointPastAllocas(F);
2678 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2679 Argument *Arg = F->getArg(OpIdx);
2680 if (!isUntypedPointerTy(Arg->getType()))
2681 continue;
2682 Type *ElemTy = GR->findDeducedElementType(Arg);
2683 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2684 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2685 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2686 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2687 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2688 VisitedSubst);
2689 } else {
2690 GR->buildAssignPtr(B, ElemTy, Arg);
2691 }
2692 }
2693 }
2694}
2695
2697 SPIRVGlobalRegistry *GR) {
2698 FunctionType *FTy = F->getFunctionType();
2699 bool IsNewFTy = false;
2701 for (Argument &Arg : F->args()) {
2702 Type *ArgTy = Arg.getType();
2703 if (ArgTy->isPointerTy())
2704 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2705 IsNewFTy = true;
2706 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2707 }
2708 ArgTys.push_back(ArgTy);
2709 }
2710 return IsNewFTy
2711 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2712 : FTy;
2713}
2714
2715bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2716 SmallVector<Function *> Worklist;
2717 for (auto &F : M) {
2718 if (F.isIntrinsic())
2719 continue;
2720 if (F.isDeclaration()) {
2721 for (User *U : F.users()) {
2722 CallInst *CI = dyn_cast<CallInst>(U);
2723 if (!CI || CI->getCalledFunction() != &F) {
2724 Worklist.push_back(&F);
2725 break;
2726 }
2727 }
2728 } else {
2729 if (F.user_empty())
2730 continue;
2731 Type *FPElemTy = GR->findDeducedElementType(&F);
2732 if (!FPElemTy)
2733 FPElemTy = getFunctionPointerElemType(&F, GR);
2734 for (User *U : F.users()) {
2735 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2736 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2737 continue;
2738 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2739 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2741 break;
2742 }
2743 }
2744 }
2745 }
2746 if (Worklist.empty())
2747 return false;
2748
2749 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2750 if (!getVacantFunctionName(M, ServiceFunName))
2752 "cannot allocate a name for the internal service function");
2753 LLVMContext &Ctx = M.getContext();
2754 Function *SF =
2755 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2756 GlobalValue::PrivateLinkage, ServiceFunName, M);
2758 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2759 IRBuilder<> IRB(BB);
2760
2761 for (Function *F : Worklist) {
2763 for (const auto &Arg : F->args())
2764 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2765 IRB.CreateCall(F, Args);
2766 }
2767 IRB.CreateRetVoid();
2768
2769 return true;
2770}
2771
2772// Apply types parsed from demangled function declarations.
2773void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2774 DenseMap<Function *, CallInst *> Ptrcasts;
2775 for (auto It : FDeclPtrTys) {
2776 Function *F = It.first;
2777 for (auto *U : F->users()) {
2778 CallInst *CI = dyn_cast<CallInst>(U);
2779 if (!CI || CI->getCalledFunction() != F)
2780 continue;
2781 unsigned Sz = CI->arg_size();
2782 for (auto [Idx, ElemTy] : It.second) {
2783 if (Idx >= Sz)
2784 continue;
2785 Value *Param = CI->getArgOperand(Idx);
2786 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2787 continue;
2788 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2789 if (!hasPointeeTypeAttr(Arg)) {
2790 B.SetInsertPointPastAllocas(Arg->getParent());
2791 B.SetCurrentDebugLocation(DebugLoc());
2792 GR->buildAssignPtr(B, ElemTy, Arg);
2793 }
2794 } else if (isa<GetElementPtrInst>(Param)) {
2795 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2796 Ptrcasts);
2797 } else if (isa<Instruction>(Param)) {
2798 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2799 // insertAssignTypeIntrs() will complete buildAssignPtr()
2800 } else {
2801 B.SetInsertPoint(CI->getParent()
2802 ->getParent()
2803 ->getEntryBlock()
2804 .getFirstNonPHIOrDbgOrAlloca());
2805 GR->buildAssignPtr(B, ElemTy, Param);
2806 }
2807 CallInst *Ref = dyn_cast<CallInst>(Param);
2808 if (!Ref)
2809 continue;
2810 Function *RefF = Ref->getCalledFunction();
2811 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2812 GR->findDeducedElementType(RefF))
2813 continue;
2814 ElemTy = normalizeType(ElemTy);
2815 GR->addDeducedElementType(RefF, ElemTy);
2816 GR->addReturnType(
2818 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2819 }
2820 }
2821 }
2822}
2823
2824GetElementPtrInst *
2825SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2826 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2827 // If type is 0-length array and first index is 0 (zero), drop both the
2828 // 0-length array type and the first index. This is a common pattern in
2829 // the IR, e.g. when using a zero-length array as a placeholder for a
2830 // flexible array such as unbound arrays.
2831 assert(GEP && "GEP is null");
2832 Type *SrcTy = GEP->getSourceElementType();
2833 SmallVector<Value *, 8> Indices(GEP->indices());
2834 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2835 if (ArrTy && ArrTy->getNumElements() == 0 &&
2837 Indices.erase(Indices.begin());
2838 SrcTy = ArrTy->getElementType();
2839 return GetElementPtrInst::Create(SrcTy, GEP->getPointerOperand(), Indices,
2840 GEP->getNoWrapFlags(), "",
2841 GEP->getIterator());
2842 }
2843 return nullptr;
2844}
2845
2846bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2847 if (Func.isDeclaration())
2848 return false;
2849
2850 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2851 GR = ST.getSPIRVGlobalRegistry();
2852
2853 if (!CurrF)
2854 HaveFunPtrs =
2855 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2856
2857 CurrF = &Func;
2858 IRBuilder<> B(Func.getContext());
2859 AggrConsts.clear();
2860 AggrConstTypes.clear();
2861 AggrStores.clear();
2862
2863 // Fix GEP result types ahead of inference, and simplify if possible.
2864 // Data structure for dead instructions that were simplified and replaced.
2865 SmallPtrSet<Instruction *, 4> DeadInsts;
2866 for (auto &I : instructions(Func)) {
2868 if (!Ref || GR->findDeducedElementType(Ref))
2869 continue;
2870
2871 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2872 if (NewGEP) {
2873 Ref->replaceAllUsesWith(NewGEP);
2874 DeadInsts.insert(Ref);
2875 Ref = NewGEP;
2876 }
2877 if (Type *GepTy = getGEPType(Ref))
2878 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2879 }
2880 // Remove dead instructions that were simplified and replaced.
2881 for (auto *I : DeadInsts) {
2882 assert(I->use_empty() && "Dead instruction should not have any uses left");
2883 I->eraseFromParent();
2884 }
2885
2886 processParamTypesByFunHeader(CurrF, B);
2887
2888 // StoreInst's operand type can be changed during the next
2889 // transformations, so we need to store it in the set. Also store already
2890 // transformed types.
2891 for (auto &I : instructions(Func)) {
2892 StoreInst *SI = dyn_cast<StoreInst>(&I);
2893 if (!SI)
2894 continue;
2895 Type *ElTy = SI->getValueOperand()->getType();
2896 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2897 AggrStores.insert(&I);
2898 }
2899
2900 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2901 for (auto &GV : Func.getParent()->globals())
2902 processGlobalValue(GV, B);
2903
2904 preprocessUndefs(B);
2905 preprocessCompositeConstants(B);
2908
2909 applyDemangledPtrArgTypes(B);
2910
2911 // Pass forward: use operand to deduce instructions result.
2912 for (auto &I : Worklist) {
2913 // Don't emit intrinsincs for convergence intrinsics.
2914 if (isConvergenceIntrinsic(I))
2915 continue;
2916
2917 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2918 // if Postpone is true, we can't decide on pointee type yet
2919 insertAssignTypeIntrs(I, B);
2920 insertPtrCastOrAssignTypeInstr(I, B);
2922 // if instruction requires a pointee type set, let's check if we know it
2923 // already, and force it to be i8 if not
2924 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2925 insertAssignPtrTypeIntrs(I, B, true);
2926
2927 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2928 useRoundingMode(FPI, B);
2929 }
2930
2931 // Pass backward: use instructions results to specify/update/cast operands
2932 // where needed.
2933 SmallPtrSet<Instruction *, 4> IncompleteRets;
2934 for (auto &I : llvm::reverse(instructions(Func)))
2935 deduceOperandElementType(&I, &IncompleteRets);
2936
2937 // Pass forward for PHIs only, their operands are not preceed the
2938 // instruction in meaning of `instructions(Func)`.
2939 for (BasicBlock &BB : Func)
2940 for (PHINode &Phi : BB.phis())
2941 if (isPointerTy(Phi.getType()))
2942 deduceOperandElementType(&Phi, nullptr);
2943
2944 for (auto *I : Worklist) {
2945 TrackConstants = true;
2946 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2948 // Visitors return either the original/newly created instruction for
2949 // further processing, nullptr otherwise.
2950 I = visit(*I);
2951 if (!I)
2952 continue;
2953
2954 // Don't emit intrinsics for convergence operations.
2955 if (isConvergenceIntrinsic(I))
2956 continue;
2957
2959 processInstrAfterVisit(I, B);
2960 }
2961
2962 return true;
2963}
2964
2965// Try to deduce a better type for pointers to untyped ptr.
2966bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2967 if (!GR || TodoTypeSz == 0)
2968 return false;
2969
2970 unsigned SzTodo = TodoTypeSz;
2971 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2972 for (auto [Op, Enabled] : TodoType) {
2973 // TODO: add isa<CallInst>(Op) to continue
2975 continue;
2976 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2977 Type *KnownTy = GR->findDeducedElementType(Op);
2978 if (!KnownTy || !AssignCI)
2979 continue;
2980 assert(Op == AssignCI->getArgOperand(0));
2981 // Try to improve the type deduced after all Functions are processed.
2982 if (auto *CI = dyn_cast<Instruction>(Op)) {
2983 CurrF = CI->getParent()->getParent();
2984 std::unordered_set<Value *> Visited;
2985 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2986 if (ElemTy != KnownTy) {
2987 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2988 propagateElemType(CI, ElemTy, VisitedSubst);
2989 eraseTodoType(Op);
2990 continue;
2991 }
2992 }
2993 }
2994
2995 if (Op->hasUseList()) {
2996 for (User *U : Op->users()) {
2998 if (Inst && !isa<IntrinsicInst>(Inst))
2999 ToProcess[Inst].insert(Op);
3000 }
3001 }
3002 }
3003 if (TodoTypeSz == 0)
3004 return true;
3005
3006 for (auto &F : M) {
3007 CurrF = &F;
3008 SmallPtrSet<Instruction *, 4> IncompleteRets;
3009 for (auto &I : llvm::reverse(instructions(F))) {
3010 auto It = ToProcess.find(&I);
3011 if (It == ToProcess.end())
3012 continue;
3013 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
3014 if (It->second.size() == 0)
3015 continue;
3016 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
3017 if (TodoTypeSz == 0)
3018 return true;
3019 }
3020 }
3021
3022 return SzTodo > TodoTypeSz;
3023}
3024
3025// Parse and store argument types of function declarations where needed.
3026void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
3027 for (auto &F : M) {
3028 if (!F.isDeclaration() || F.isIntrinsic())
3029 continue;
3030 // get the demangled name
3031 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
3032 if (DemangledName.empty())
3033 continue;
3034 // allow only OpGroupAsyncCopy use case at the moment
3035 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
3036 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3037 DemangledName, ST.getPreferredInstructionSet());
3038 if (Opcode != SPIRV::OpGroupAsyncCopy)
3039 continue;
3040 // find pointer arguments
3041 SmallVector<unsigned> Idxs;
3042 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
3043 Argument *Arg = F.getArg(OpIdx);
3044 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
3045 Idxs.push_back(OpIdx);
3046 }
3047 if (!Idxs.size())
3048 continue;
3049 // parse function arguments
3050 LLVMContext &Ctx = F.getContext();
3052 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3053 if (!TypeStrs.size())
3054 continue;
3055 // find type info for pointer arguments
3056 for (unsigned Idx : Idxs) {
3057 if (Idx >= TypeStrs.size())
3058 continue;
3059 if (Type *ElemTy =
3060 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3062 !ElemTy->isTargetExtTy())
3063 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3064 }
3065 }
3066}
3067
3068bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3069 bool Changed = false;
3070
3071 parseFunDeclarations(M);
3072 insertConstantsForFPFastMathDefault(M);
3073
3074 TodoType.clear();
3075 for (auto &F : M)
3077
3078 // Specify function parameters after all functions were processed.
3079 for (auto &F : M) {
3080 // check if function parameter types are set
3081 CurrF = &F;
3082 if (!F.isDeclaration() && !F.isIntrinsic()) {
3083 IRBuilder<> B(F.getContext());
3084 processParamTypes(&F, B);
3085 }
3086 }
3087
3088 CanTodoType = false;
3089 Changed |= postprocessTypes(M);
3090
3091 if (HaveFunPtrs)
3092 Changed |= processFunctionPointers(M);
3093
3094 return Changed;
3095}
3096
3098 return new SPIRVEmitIntrinsics(TM);
3099}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:523
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
void setOperand(unsigned i, Value *Val)
Definition User.h:237
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:401
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:365
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
FPDecorationId
Definition SPIRVUtils.h:547
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:511
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:396
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:489
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:359
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:378
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:373
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:451
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:344
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:497
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:428
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:507
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:354
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146