LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
363
364 if (isa<CallBase>(I)) {
365 // TODO: this is a temporary workaround meant to prevent inserting internal
366 // noise into the generated binary; remove once we rework the entire
367 // aggregate removal machinery.
368 StringRef Name = I->getName();
369 if (Name.starts_with("spv.mutated_callsite"))
370 return;
371 if (Name.starts_with("spv.named_mutated_callsite"))
372 I->setName(Name.substr(Name.rfind('.') + 1));
373 }
376 LLVMContext &Ctx = I->getContext();
377 std::vector<Value *> Args = {
379 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
380 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
384 bool DeleteOld) {
385 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
386 // Update uncomplete type records if any
387 if (isTodoType(Src)) {
388 if (DeleteOld)
389 eraseTodoType(Src);
390 insertTodoType(Dest);
391 }
392}
393
394void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
395 Instruction *Src,
396 Instruction *Dest,
397 bool DeleteOld) {
398 replaceAllUsesWith(Src, Dest, DeleteOld);
399 std::string Name = Src->hasName() ? Src->getName().str() : "";
400 Src->eraseFromParent();
401 if (!Name.empty()) {
402 Dest->setName(Name);
403 if (Named.insert(Dest).second)
404 emitAssignName(Dest, B);
405 }
406}
407
409 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
410 isPointerTy(SI->getValueOperand()->getType()) &&
411 isa<Argument>(SI->getValueOperand());
412}
413
414// Maybe restore original function return type.
416 Type *Ty) {
418 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
420 return Ty;
421 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
422 return OriginalTy;
423 return Ty;
424}
425
426// Reconstruct type with nested element types according to deduced type info.
427// Return nullptr if no detailed type info is available.
428Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
429 bool IsPostprocessing) {
430 Type *Ty = Op->getType();
431 if (auto *OpI = dyn_cast<Instruction>(Op))
432 Ty = restoreMutatedType(GR, OpI, Ty);
433 if (!isUntypedPointerTy(Ty))
434 return Ty;
435 // try to find the pointee type
436 if (Type *NestedTy = GR->findDeducedElementType(Op))
438 // not a pointer according to the type info (e.g., Event object)
439 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
440 if (CI) {
441 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
442 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
443 }
444 if (UnknownElemTypeI8) {
445 if (!IsPostprocessing)
446 insertTodoType(Op);
447 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
449 }
450 return nullptr;
451}
452
453CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
454 Type *ElemTy) {
455 IRBuilder<> B(Op->getContext());
456 if (auto *OpI = dyn_cast<Instruction>(Op)) {
457 // spv_ptrcast's argument Op denotes an instruction that generates
458 // a value, and we may use getInsertionPointAfterDef()
460 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
461 B.SetInsertPointPastAllocas(OpA->getParent());
462 B.SetCurrentDebugLocation(DebugLoc());
463 } else {
464 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
465 }
466 Type *OpTy = Op->getType();
467 SmallVector<Type *, 2> Types = {OpTy, OpTy};
468 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
469 B.getInt32(getPointerAddressSpace(OpTy))};
470 CallInst *PtrCasted =
471 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
472 GR->buildAssignPtr(B, ElemTy, PtrCasted);
473 return PtrCasted;
474}
475
476void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
477 Value *Op, Type *ElemTy, Instruction *I,
478 DenseMap<Function *, CallInst *> Ptrcasts) {
479 Function *F = I->getParent()->getParent();
480 CallInst *PtrCastedI = nullptr;
481 auto It = Ptrcasts.find(F);
482 if (It == Ptrcasts.end()) {
483 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
484 Ptrcasts[F] = PtrCastedI;
485 } else {
486 PtrCastedI = It->second;
487 }
488 I->replaceUsesOfWith(Op, PtrCastedI);
489}
490
491void SPIRVEmitIntrinsics::propagateElemType(
492 Value *Op, Type *ElemTy,
493 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
494 DenseMap<Function *, CallInst *> Ptrcasts;
495 SmallVector<User *> Users(Op->users());
496 for (auto *U : Users) {
497 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
498 continue;
499 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
500 continue;
502 // If the instruction was validated already, we need to keep it valid by
503 // keeping current Op type.
504 if (isa<GetElementPtrInst>(UI) ||
505 TypeValidated.find(UI) != TypeValidated.end())
506 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
507 }
508}
509
510void SPIRVEmitIntrinsics::propagateElemTypeRec(
511 Value *Op, Type *PtrElemTy, Type *CastElemTy,
512 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
513 std::unordered_set<Value *> Visited;
514 DenseMap<Function *, CallInst *> Ptrcasts;
515 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
516 std::move(Ptrcasts));
517}
518
519void SPIRVEmitIntrinsics::propagateElemTypeRec(
520 Value *Op, Type *PtrElemTy, Type *CastElemTy,
521 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
522 std::unordered_set<Value *> &Visited,
523 DenseMap<Function *, CallInst *> Ptrcasts) {
524 if (!Visited.insert(Op).second)
525 return;
526 SmallVector<User *> Users(Op->users());
527 for (auto *U : Users) {
528 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
529 continue;
530 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
531 continue;
533 // If the instruction was validated already, we need to keep it valid by
534 // keeping current Op type.
535 if (isa<GetElementPtrInst>(UI) ||
536 TypeValidated.find(UI) != TypeValidated.end())
537 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
538 }
539}
540
541// Set element pointer type to the given value of ValueTy and tries to
542// specify this type further (recursively) by Operand value, if needed.
543
544Type *
545SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
546 bool UnknownElemTypeI8) {
547 std::unordered_set<Value *> Visited;
548 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
549 UnknownElemTypeI8);
550}
551
552Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
553 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
554 bool UnknownElemTypeI8) {
555 Type *Ty = ValueTy;
556 if (Operand) {
557 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
558 if (Type *NestedTy =
559 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
560 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
561 } else {
562 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
563 UnknownElemTypeI8);
564 }
565 }
566 return Ty;
567}
568
569// Traverse User instructions to deduce an element pointer type of the operand.
570Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
571 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
572 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
574 return nullptr;
575
576 if (auto ElemTy = getPointeeType(Op->getType()))
577 return ElemTy;
578
579 // maybe we already know operand's element type
580 if (Type *KnownTy = GR->findDeducedElementType(Op))
581 return KnownTy;
582
583 for (User *OpU : Op->users()) {
584 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
585 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
586 return Ty;
587 }
588 }
589 return nullptr;
590}
591
592// Implements what we know in advance about intrinsics and builtin calls
593// TODO: consider feasibility of this particular case to be generalized by
594// encoding knowledge about intrinsics and builtin calls by corresponding
595// specification rules
597 Function *CalledF, unsigned OpIdx) {
598 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
599 DemangledName.starts_with("printf(")) &&
600 OpIdx == 0)
601 return IntegerType::getInt8Ty(CalledF->getContext());
602 return nullptr;
603}
604
605// Deduce and return a successfully deduced Type of the Instruction,
606// or nullptr otherwise.
607Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
608 bool UnknownElemTypeI8) {
609 std::unordered_set<Value *> Visited;
610 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
611}
612
613void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
614 bool UnknownElemTypeI8) {
615 if (isUntypedPointerTy(RefTy)) {
616 if (!UnknownElemTypeI8)
617 return;
618 insertTodoType(Op);
619 }
620 Ty = RefTy;
621}
622
623bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
624 GetElementPtrInst &GEP,
625 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
626 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
627 // We only rewrite i8* GEP. Other should be left as-is.
628 // Valid i8* GEP must always have a single index.
629 assert(GEP.getSourceElementType() ==
630 IntegerType::getInt8Ty(CurrF->getContext()));
631 assert(GEP.getNumIndices() == 1);
632
633 auto &DL = CurrF->getDataLayout();
634 Value *Src = getPointerRoot(GEP.getPointerOperand());
635 Type *CurType = deduceElementType(Src, true);
636
637 Value *Operand = *GEP.idx_begin();
638 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
639 if (!CI) {
640 ArrayType *AT = dyn_cast<ArrayType>(CurType);
641 // Operand is not constant. Either we have an array and accept it, or we
642 // give up.
643 if (AT)
644 OnDynamicIndexing(AT->getElementType(), Operand);
645 return AT == nullptr;
646 }
647
648 assert(CI);
649 uint64_t Offset = CI->getZExtValue();
650
651 do {
652 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
653 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
654 assert(Offset < AT->getNumElements() * EltTypeSize);
655 uint64_t Index = Offset / EltTypeSize;
656 Offset = Offset - (Index * EltTypeSize);
657 CurType = AT->getElementType();
658 OnLiteralIndexing(CurType, Index);
659 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
660 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
661 assert(Offset < StructSize);
662 (void)StructSize;
663 const auto &STL = DL.getStructLayout(ST);
664 unsigned Element = STL->getElementContainingOffset(Offset);
665 Offset -= STL->getElementOffset(Element);
666 CurType = ST->getElementType(Element);
667 OnLiteralIndexing(CurType, Element);
668 } else {
669 // Vector type indexing should not use GEP.
670 // So if we have an index left, something is wrong. Giving up.
671 return true;
672 }
673 } while (Offset > 0);
674
675 return false;
676}
677
679SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
680 auto &DL = CurrF->getDataLayout();
681 IRBuilder<> B(GEP.getParent());
682 B.SetInsertPoint(&GEP);
683
684 std::vector<Value *> Indices;
685 Indices.push_back(ConstantInt::get(
686 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
687 walkLogicalAccessChain(
688 GEP,
689 [&Indices, &B](Type *EltType, uint64_t Index) {
690 Indices.push_back(
691 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
692 },
693 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
694 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
695 Value *Index = B.CreateUDiv(
696 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
697 /* Signed= */ false));
698 Indices.push_back(Index);
699 });
700
701 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
703 Args.push_back(B.getInt1(GEP.isInBounds()));
704 Args.push_back(GEP.getOperand(0));
705 llvm::append_range(Args, Indices);
706 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
707 replaceAllUsesWithAndErase(B, &GEP, NewI);
708 return NewI;
709}
710
711Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
712
713 Type *CurType = GEP->getResultElementType();
714
715 bool Interrupted = walkLogicalAccessChain(
716 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
717 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
718
719 return Interrupted ? GEP->getResultElementType() : CurType;
720}
721
722Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
723 if (Ref->getSourceElementType() ==
724 IntegerType::getInt8Ty(CurrF->getContext()) &&
726 return getGEPTypeLogical(Ref);
727 }
728
729 Type *Ty = nullptr;
730 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
731 // useful here
732 if (isNestedPointer(Ref->getSourceElementType())) {
733 Ty = Ref->getSourceElementType();
734 for (Use &U : drop_begin(Ref->indices()))
735 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
736 } else {
737 Ty = Ref->getResultElementType();
738 }
739 return Ty;
740}
741
742Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
743 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
744 bool IgnoreKnownType) {
745 // allow to pass nullptr as an argument
746 if (!I)
747 return nullptr;
748
749 // maybe already known
750 if (!IgnoreKnownType)
751 if (Type *KnownTy = GR->findDeducedElementType(I))
752 return KnownTy;
753
754 // maybe a cycle
755 if (!Visited.insert(I).second)
756 return nullptr;
757
758 // fallback value in case when we fail to deduce a type
759 Type *Ty = nullptr;
760 // look for known basic patterns of type inference
761 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
762 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
763 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
764 Ty = getGEPType(Ref);
765 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
766 Value *Op = Ref->getPointerOperand();
767 Type *KnownTy = GR->findDeducedElementType(Op);
768 if (!KnownTy)
769 KnownTy = Op->getType();
770 if (Type *ElemTy = getPointeeType(KnownTy))
771 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
772 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
773 if (auto *Fn = dyn_cast<Function>(Ref)) {
774 Ty = SPIRV::getOriginalFunctionType(*Fn);
775 GR->addDeducedElementType(I, Ty);
776 } else {
777 Ty = deduceElementTypeByValueDeep(
778 Ref->getValueType(),
779 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
780 UnknownElemTypeI8);
781 }
782 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
783 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
784 UnknownElemTypeI8);
785 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
786 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
787 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
788 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
789 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
790 isPointerTy(Src) && isPointerTy(Dest))
791 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
792 UnknownElemTypeI8);
793 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
794 Value *Op = Ref->getNewValOperand();
795 if (isPointerTy(Op->getType()))
796 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
797 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
798 Value *Op = Ref->getValOperand();
799 if (isPointerTy(Op->getType()))
800 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
801 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
802 Type *BestTy = nullptr;
803 unsigned MaxN = 1;
804 DenseMap<Type *, unsigned> PhiTys;
805 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
806 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
807 UnknownElemTypeI8);
808 if (!Ty)
809 continue;
810 auto It = PhiTys.try_emplace(Ty, 1);
811 if (!It.second) {
812 ++It.first->second;
813 if (It.first->second > MaxN) {
814 MaxN = It.first->second;
815 BestTy = Ty;
816 }
817 }
818 }
819 if (BestTy)
820 Ty = BestTy;
821 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
822 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
823 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
824 if (Ty)
825 break;
826 }
827 } else if (auto *CI = dyn_cast<CallInst>(I)) {
828 static StringMap<unsigned> ResTypeByArg = {
829 {"to_global", 0},
830 {"to_local", 0},
831 {"to_private", 0},
832 {"__spirv_GenericCastToPtr_ToGlobal", 0},
833 {"__spirv_GenericCastToPtr_ToLocal", 0},
834 {"__spirv_GenericCastToPtr_ToPrivate", 0},
835 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
836 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
837 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
838 // TODO: maybe improve performance by caching demangled names
839
841 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
842 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
843 if (HandleType->getTargetExtName() == "spirv.Image" ||
844 HandleType->getTargetExtName() == "spirv.SignedImage") {
845 for (User *U : II->users()) {
846 Ty = cast<Instruction>(U)->getAccessType();
847 if (Ty)
848 break;
849 }
850 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
851 // This call is supposed to index into an array
852 Ty = HandleType->getTypeParameter(0);
853 if (Ty->isArrayTy())
854 Ty = Ty->getArrayElementType();
855 else {
856 assert(Ty && Ty->isStructTy());
857 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
858 Ty = cast<StructType>(Ty)->getElementType(Index);
859 }
861 } else {
862 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
863 }
864 } else if (II && II->getIntrinsicID() ==
865 Intrinsic::spv_generic_cast_to_ptr_explicit) {
866 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
867 UnknownElemTypeI8);
868 } else if (Function *CalledF = CI->getCalledFunction()) {
869 std::string DemangledName =
870 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
871 if (DemangledName.length() > 0)
872 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
873 auto AsArgIt = ResTypeByArg.find(DemangledName);
874 if (AsArgIt != ResTypeByArg.end())
875 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
876 Visited, UnknownElemTypeI8);
877 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
878 Ty = KnownRetTy;
879 }
880 }
881
882 // remember the found relationship
883 if (Ty && !IgnoreKnownType) {
884 // specify nested types if needed, otherwise return unchanged
886 }
887
888 return Ty;
889}
890
891// Re-create a type of the value if it has untyped pointer fields, also nested.
892// Return the original value type if no corrections of untyped pointer
893// information is found or needed.
894Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
895 bool UnknownElemTypeI8) {
896 std::unordered_set<Value *> Visited;
897 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
898}
899
900Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
901 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
902 bool UnknownElemTypeI8) {
903 if (!U)
904 return OrigTy;
905
906 // maybe already known
907 if (Type *KnownTy = GR->findDeducedCompositeType(U))
908 return KnownTy;
909
910 // maybe a cycle
911 if (!Visited.insert(U).second)
912 return OrigTy;
913
914 if (isa<StructType>(OrigTy)) {
916 bool Change = false;
917 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
918 Value *Op = U->getOperand(i);
919 assert(Op && "Operands should not be null.");
920 Type *OpTy = Op->getType();
921 Type *Ty = OpTy;
922 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
923 if (Type *NestedTy =
924 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
925 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
926 } else {
927 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
928 UnknownElemTypeI8);
929 }
930 Tys.push_back(Ty);
931 Change |= Ty != OpTy;
932 }
933 if (Change) {
934 Type *NewTy = StructType::create(Tys);
935 GR->addDeducedCompositeType(U, NewTy);
936 return NewTy;
937 }
938 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
939 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
940 Type *OpTy = ArrTy->getElementType();
941 Type *Ty = OpTy;
942 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
943 if (Type *NestedTy =
944 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
945 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
946 } else {
947 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
948 UnknownElemTypeI8);
949 }
950 if (Ty != OpTy) {
951 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
952 GR->addDeducedCompositeType(U, NewTy);
953 return NewTy;
954 }
955 }
956 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
957 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
958 Type *OpTy = VecTy->getElementType();
959 Type *Ty = OpTy;
960 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
961 if (Type *NestedTy =
962 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
963 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
964 } else {
965 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
966 UnknownElemTypeI8);
967 }
968 if (Ty != OpTy) {
969 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
971 return NewTy;
972 }
973 }
974 }
975
976 return OrigTy;
977}
978
979Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
980 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
981 return Ty;
982 if (!UnknownElemTypeI8)
983 return nullptr;
984 insertTodoType(I);
985 return IntegerType::getInt8Ty(I->getContext());
986}
987
989 Value *PointerOperand) {
990 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
991 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
992 return nullptr;
993 auto *PtrTy = dyn_cast<PointerType>(I->getType());
994 if (!PtrTy)
995 return I->getType();
996 if (Type *NestedTy = GR->findDeducedElementType(I))
997 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
998 return nullptr;
999}
1000
1001// Try to deduce element type for a call base. Returns false if this is an
1002// indirect function invocation, and true otherwise.
1003bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1004 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1005 Type *&KnownElemTy, bool &Incomplete) {
1006 Function *CalledF = CI->getCalledFunction();
1007 if (!CalledF)
1008 return false;
1009 std::string DemangledName =
1011 if (DemangledName.length() > 0 &&
1012 !StringRef(DemangledName).starts_with("llvm.")) {
1013 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1014 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1015 DemangledName, ST.getPreferredInstructionSet());
1016 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1017 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1018 Value *Op = CI->getArgOperand(i);
1019 if (!isPointerTy(Op->getType()))
1020 continue;
1021 ++PtrCnt;
1022 if (Type *ElemTy = GR->findDeducedElementType(Op))
1023 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1024 Ops.push_back(std::make_pair(Op, i));
1025 }
1026 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1027 if (CI->arg_size() == 0)
1028 return true;
1029 Value *Op = CI->getArgOperand(0);
1030 if (!isPointerTy(Op->getType()))
1031 return true;
1032 switch (Opcode) {
1033 case SPIRV::OpAtomicFAddEXT:
1034 case SPIRV::OpAtomicFMinEXT:
1035 case SPIRV::OpAtomicFMaxEXT:
1036 case SPIRV::OpAtomicLoad:
1037 case SPIRV::OpAtomicCompareExchangeWeak:
1038 case SPIRV::OpAtomicCompareExchange:
1039 case SPIRV::OpAtomicExchange:
1040 case SPIRV::OpAtomicIAdd:
1041 case SPIRV::OpAtomicISub:
1042 case SPIRV::OpAtomicOr:
1043 case SPIRV::OpAtomicXor:
1044 case SPIRV::OpAtomicAnd:
1045 case SPIRV::OpAtomicUMin:
1046 case SPIRV::OpAtomicUMax:
1047 case SPIRV::OpAtomicSMin:
1048 case SPIRV::OpAtomicSMax: {
1049 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1050 : CI->getType();
1051 if (!KnownElemTy)
1052 return true;
1053 Incomplete = isTodoType(Op);
1054 Ops.push_back(std::make_pair(Op, 0));
1055 } break;
1056 case SPIRV::OpAtomicStore: {
1057 if (CI->arg_size() < 4)
1058 return true;
1059 Value *ValOp = CI->getArgOperand(3);
1060 KnownElemTy = isPointerTy(ValOp->getType())
1061 ? getAtomicElemTy(GR, CI, Op)
1062 : ValOp->getType();
1063 if (!KnownElemTy)
1064 return true;
1065 Incomplete = isTodoType(Op);
1066 Ops.push_back(std::make_pair(Op, 0));
1067 } break;
1068 }
1069 }
1070 }
1071 return true;
1072}
1073
1074// Try to deduce element type for a function pointer.
1075void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1076 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1077 Type *&KnownElemTy, bool IsPostprocessing) {
1078 Value *Op = CI->getCalledOperand();
1079 if (!Op || !isPointerTy(Op->getType()))
1080 return;
1081 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1082 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1083 bool IsNewFTy = false, IsIncomplete = false;
1085 for (auto &&[ParmIdx, Arg] : llvm::enumerate(CI->args())) {
1086 Type *ArgTy = Arg->getType();
1087 if (ArgTy->isPointerTy()) {
1088 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1089 IsNewFTy = true;
1090 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1091 if (isTodoType(Arg))
1092 IsIncomplete = true;
1093 } else {
1094 IsIncomplete = true;
1095 }
1096 } else {
1097 ArgTy = FTy->getFunctionParamType(ParmIdx);
1098 }
1099 ArgTys.push_back(ArgTy);
1100 }
1101 Type *RetTy = FTy->getReturnType();
1102 if (CI->getType()->isPointerTy()) {
1103 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1104 IsNewFTy = true;
1105 RetTy =
1107 if (isTodoType(CI))
1108 IsIncomplete = true;
1109 } else {
1110 IsIncomplete = true;
1111 }
1112 }
1113 if (!IsPostprocessing && IsIncomplete)
1114 insertTodoType(Op);
1115 KnownElemTy =
1116 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1117}
1118
1119bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1120 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1121 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1122 Type *&KnownElemTy, Value *Op, Function *F) {
1123 KnownElemTy = GR->findDeducedElementType(F);
1124 if (KnownElemTy)
1125 return false;
1126 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1127 OpElemTy = normalizeType(OpElemTy);
1128 GR->addDeducedElementType(F, OpElemTy);
1129 GR->addReturnType(
1130 F, TypedPointerType::get(OpElemTy,
1131 getPointerAddressSpace(F->getReturnType())));
1132 // non-recursive update of types in function uses
1133 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1134 for (User *U : F->users()) {
1135 CallInst *CI = dyn_cast<CallInst>(U);
1136 if (!CI || CI->getCalledFunction() != F)
1137 continue;
1138 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1139 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1140 GR->updateAssignType(AssignCI, CI,
1141 getNormalizedPoisonValue(OpElemTy));
1142 propagateElemType(CI, PrevElemTy, VisitedSubst);
1143 }
1144 }
1145 }
1146 // Non-recursive update of types in the function uncomplete returns.
1147 // This may happen just once per a function, the latch is a pair of
1148 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1149 // With or without the latch it is a non-recursive call due to
1150 // IncompleteRets set to nullptr in this call.
1151 if (IncompleteRets)
1152 for (Instruction *IncompleteRetI : *IncompleteRets)
1153 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1154 IsPostprocessing);
1155 } else if (IncompleteRets) {
1156 IncompleteRets->insert(I);
1157 }
1158 TypeValidated.insert(I);
1159 return true;
1160}
1161
1162// If the Instruction has Pointer operands with unresolved types, this function
1163// tries to deduce them. If the Instruction has Pointer operands with known
1164// types which differ from expected, this function tries to insert a bitcast to
1165// resolve the issue.
1166void SPIRVEmitIntrinsics::deduceOperandElementType(
1167 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1168 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1170 Type *KnownElemTy = nullptr;
1171 bool Incomplete = false;
1172 // look for known basic patterns of type inference
1173 if (auto *Ref = dyn_cast<PHINode>(I)) {
1174 if (!isPointerTy(I->getType()) ||
1175 !(KnownElemTy = GR->findDeducedElementType(I)))
1176 return;
1177 Incomplete = isTodoType(I);
1178 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1179 Value *Op = Ref->getIncomingValue(i);
1180 if (isPointerTy(Op->getType()))
1181 Ops.push_back(std::make_pair(Op, i));
1182 }
1183 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1184 KnownElemTy = GR->findDeducedElementType(I);
1185 if (!KnownElemTy)
1186 return;
1187 Incomplete = isTodoType(I);
1188 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1189 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1190 if (!isPointerTy(I->getType()))
1191 return;
1192 KnownElemTy = GR->findDeducedElementType(I);
1193 if (!KnownElemTy)
1194 return;
1195 Incomplete = isTodoType(I);
1196 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1197 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1198 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1199 return;
1200 KnownElemTy = Ref->getSourceElementType();
1201 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1203 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1204 KnownElemTy = I->getType();
1205 if (isUntypedPointerTy(KnownElemTy))
1206 return;
1207 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1208 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1209 return;
1210 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1212 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1213 if (!(KnownElemTy =
1214 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1215 return;
1216 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1217 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1218 return;
1219 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1221 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1222 KnownElemTy = isPointerTy(I->getType())
1223 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1224 : I->getType();
1225 if (!KnownElemTy)
1226 return;
1227 Incomplete = isTodoType(Ref->getPointerOperand());
1228 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1230 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1231 KnownElemTy = isPointerTy(I->getType())
1232 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1233 : I->getType();
1234 if (!KnownElemTy)
1235 return;
1236 Incomplete = isTodoType(Ref->getPointerOperand());
1237 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1239 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1240 if (!isPointerTy(I->getType()) ||
1241 !(KnownElemTy = GR->findDeducedElementType(I)))
1242 return;
1243 Incomplete = isTodoType(I);
1244 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1245 Value *Op = Ref->getOperand(i);
1246 if (isPointerTy(Op->getType()))
1247 Ops.push_back(std::make_pair(Op, i));
1248 }
1249 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1250 if (!isPointerTy(CurrF->getReturnType()))
1251 return;
1252 Value *Op = Ref->getReturnValue();
1253 if (!Op)
1254 return;
1255 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1256 IsPostprocessing, KnownElemTy, Op,
1257 CurrF))
1258 return;
1259 Incomplete = isTodoType(CurrF);
1260 Ops.push_back(std::make_pair(Op, 0));
1261 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1262 if (!isPointerTy(Ref->getOperand(0)->getType()))
1263 return;
1264 Value *Op0 = Ref->getOperand(0);
1265 Value *Op1 = Ref->getOperand(1);
1266 bool Incomplete0 = isTodoType(Op0);
1267 bool Incomplete1 = isTodoType(Op1);
1268 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1269 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1270 ? nullptr
1271 : GR->findDeducedElementType(Op0);
1272 if (ElemTy0) {
1273 KnownElemTy = ElemTy0;
1274 Incomplete = Incomplete0;
1275 Ops.push_back(std::make_pair(Op1, 1));
1276 } else if (ElemTy1) {
1277 KnownElemTy = ElemTy1;
1278 Incomplete = Incomplete1;
1279 Ops.push_back(std::make_pair(Op0, 0));
1280 }
1281 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1282 if (!CI->isIndirectCall())
1283 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1284 else if (HaveFunPtrs)
1285 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1286 IsPostprocessing);
1287 }
1288
1289 // There is no enough info to deduce types or all is valid.
1290 if (!KnownElemTy || Ops.size() == 0)
1291 return;
1292
1293 LLVMContext &Ctx = CurrF->getContext();
1294 IRBuilder<> B(Ctx);
1295 for (auto &OpIt : Ops) {
1296 Value *Op = OpIt.first;
1297 if (AskOps && !AskOps->contains(Op))
1298 continue;
1299 Type *AskTy = nullptr;
1300 CallInst *AskCI = nullptr;
1301 if (IsPostprocessing && AskOps) {
1302 AskTy = GR->findDeducedElementType(Op);
1303 AskCI = GR->findAssignPtrTypeInstr(Op);
1304 assert(AskTy && AskCI);
1305 }
1306 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1307 if (Ty == KnownElemTy)
1308 continue;
1309 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1310 Type *OpTy = Op->getType();
1311 if (Op->hasUseList() &&
1312 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1313 Type *PrevElemTy = GR->findDeducedElementType(Op);
1314 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1315 // check if KnownElemTy is complete
1316 if (!Incomplete)
1317 eraseTodoType(Op);
1318 else if (!IsPostprocessing)
1319 insertTodoType(Op);
1320 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1321 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1322 if (AssignCI == nullptr) {
1323 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1324 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1325 CallInst *CI =
1326 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1327 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1328 GR->addAssignPtrTypeInstr(Op, CI);
1329 } else {
1330 GR->updateAssignType(AssignCI, Op, OpTyVal);
1331 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1332 std::make_pair(I, Op)};
1333 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1334 }
1335 } else {
1336 eraseTodoType(Op);
1337 CallInst *PtrCastI =
1338 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1339 if (OpIt.second == std::numeric_limits<unsigned>::max())
1340 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1341 else
1342 I->setOperand(OpIt.second, PtrCastI);
1343 }
1344 }
1345 TypeValidated.insert(I);
1346}
1347
1348void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1349 Instruction *New,
1350 IRBuilder<> &B) {
1351 while (!Old->user_empty()) {
1352 auto *U = Old->user_back();
1353 if (isAssignTypeInstr(U)) {
1354 B.SetInsertPoint(U);
1355 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1356 CallInst *AssignCI =
1357 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1358 GR->addAssignPtrTypeInstr(New, AssignCI);
1359 U->eraseFromParent();
1360 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1361 isa<CallInst>(U)) {
1362 U->replaceUsesOfWith(Old, New);
1363 } else {
1364 llvm_unreachable("illegal aggregate intrinsic user");
1365 }
1366 }
1367 New->copyMetadata(*Old);
1368 Old->eraseFromParent();
1369}
1370
1371void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1372 std::queue<Instruction *> Worklist;
1373 for (auto &I : instructions(CurrF))
1374 Worklist.push(&I);
1375
1376 while (!Worklist.empty()) {
1377 Instruction *I = Worklist.front();
1378 bool BPrepared = false;
1379 Worklist.pop();
1380
1381 for (auto &Op : I->operands()) {
1382 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1383 if (!AggrUndef || !Op->getType()->isAggregateType())
1384 continue;
1385
1386 if (!BPrepared) {
1388 BPrepared = true;
1389 }
1390 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1391 Worklist.push(IntrUndef);
1392 I->replaceUsesOfWith(Op, IntrUndef);
1393 AggrConsts[IntrUndef] = AggrUndef;
1394 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1395 }
1396 }
1397}
1398
1399void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1400 std::queue<Instruction *> Worklist;
1401 for (auto &I : instructions(CurrF))
1402 Worklist.push(&I);
1403
1404 while (!Worklist.empty()) {
1405 auto *I = Worklist.front();
1406 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1407 assert(I);
1408 bool KeepInst = false;
1409 for (const auto &Op : I->operands()) {
1410 Constant *AggrConst = nullptr;
1411 Type *ResTy = nullptr;
1412 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1413 AggrConst = COp;
1414 ResTy = COp->getType();
1415 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1416 AggrConst = COp;
1417 ResTy = B.getInt32Ty();
1418 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1419 AggrConst = COp;
1420 ResTy = B.getInt32Ty();
1421 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1422 AggrConst = COp;
1423 ResTy = B.getInt32Ty();
1424 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1425 AggrConst = COp;
1426 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1427 }
1428 if (AggrConst) {
1430 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1431 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1432 Args.push_back(COp->getElementAsConstant(i));
1433 else
1434 llvm::append_range(Args, AggrConst->operands());
1435 if (!BPrepared) {
1436 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1437 : B.SetInsertPoint(I);
1438 BPrepared = true;
1439 }
1440 auto *CI =
1441 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1442 Worklist.push(CI);
1443 I->replaceUsesOfWith(Op, CI);
1444 KeepInst = true;
1445 AggrConsts[CI] = AggrConst;
1446 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1447 }
1448 }
1449 if (!KeepInst)
1450 Worklist.pop();
1451 }
1452}
1453
1455 IRBuilder<> &B) {
1456 LLVMContext &Ctx = I->getContext();
1458 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1459 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1460}
1461
1463 unsigned RoundingModeDeco,
1464 IRBuilder<> &B) {
1465 LLVMContext &Ctx = I->getContext();
1467 MDNode *RoundingModeNode = MDNode::get(
1468 Ctx,
1470 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1471 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1472 createDecorationIntrinsic(I, RoundingModeNode, B);
1473}
1474
1476 IRBuilder<> &B) {
1477 LLVMContext &Ctx = I->getContext();
1479 MDNode *SaturatedConversionNode =
1480 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1481 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1482 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1483}
1484
1486 if (auto *CI = dyn_cast<CallInst>(I)) {
1487 if (Function *Fu = CI->getCalledFunction()) {
1488 if (Fu->isIntrinsic()) {
1489 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1490 switch (IntrinsicId) {
1491 case Intrinsic::fptosi_sat:
1492 case Intrinsic::fptoui_sat:
1494 break;
1495 default:
1496 break;
1497 }
1498 }
1499 }
1500 }
1501}
1502
1503Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1504 if (!Call.isInlineAsm())
1505 return &Call;
1506
1507 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1508 LLVMContext &Ctx = CurrF->getContext();
1509
1510 Constant *TyC = UndefValue::get(IA->getFunctionType());
1511 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1513 buildMD(TyC),
1514 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1515 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1516 Args.push_back(Call.getArgOperand(OpIdx));
1517
1519 B.SetInsertPoint(&Call);
1520 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1521 return &Call;
1522}
1523
1524// Use a tip about rounding mode to create a decoration.
1525void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1526 IRBuilder<> &B) {
1527 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1528 if (!RM.has_value())
1529 return;
1530 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1531 switch (RM.value()) {
1532 default:
1533 // ignore unknown rounding modes
1534 break;
1535 case RoundingMode::NearestTiesToEven:
1536 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1537 break;
1538 case RoundingMode::TowardNegative:
1539 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1540 break;
1541 case RoundingMode::TowardPositive:
1542 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1543 break;
1544 case RoundingMode::TowardZero:
1545 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1546 break;
1547 case RoundingMode::Dynamic:
1548 case RoundingMode::NearestTiesToAway:
1549 // TODO: check if supported
1550 break;
1551 }
1552 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1553 return;
1554 // Convert the tip about rounding mode into a decoration record.
1555 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1556}
1557
1558Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1559 BasicBlock *ParentBB = I.getParent();
1560 Function *F = ParentBB->getParent();
1561 IRBuilder<> B(ParentBB);
1562 B.SetInsertPoint(&I);
1565 Args.push_back(I.getCondition());
1566 BBCases.push_back(I.getDefaultDest());
1567 Args.push_back(BlockAddress::get(F, I.getDefaultDest()));
1568 for (auto &Case : I.cases()) {
1569 Args.push_back(Case.getCaseValue());
1570 BBCases.push_back(Case.getCaseSuccessor());
1571 Args.push_back(BlockAddress::get(F, Case.getCaseSuccessor()));
1572 }
1573 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1574 {I.getOperand(0)->getType()}, {Args});
1575 // remove switch to avoid its unneeded and undesirable unwrap into branches
1576 // and conditions
1577 replaceAllUsesWith(&I, NewI);
1578 I.eraseFromParent();
1579 // insert artificial and temporary instruction to preserve valid CFG,
1580 // it will be removed after IR translation pass
1581 B.SetInsertPoint(ParentBB);
1582 IndirectBrInst *BrI = B.CreateIndirectBr(
1583 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1584 BBCases.size());
1585 for (BasicBlock *BBCase : BBCases)
1586 BrI->addDestination(BBCase);
1587 return BrI;
1588}
1589
1591 if (GEP->getNumIndices() == 0)
1592 return false;
1593 if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1594 return CI->getZExtValue() == 0;
1595 }
1596 return false;
1597}
1598
1599Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1600 IRBuilder<> B(I.getParent());
1601 B.SetInsertPoint(&I);
1602
1604 // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
1605 // index of the GEP is not 0, then we need to try to adjust it.
1606 //
1607 // If the GEP is doing byte addressing, try to rebuild the full access chain
1608 // from the type of the pointer.
1609 if (I.getSourceElementType() ==
1610 IntegerType::getInt8Ty(CurrF->getContext())) {
1611 return buildLogicalAccessChainFromGEP(I);
1612 }
1613
1614 // Look for the array-to-pointer decay. If this is the pattern
1615 // we can adjust the types, and prepend a 0 to the indices.
1616 Value *PtrOp = I.getPointerOperand();
1617 Type *SrcElemTy = I.getSourceElementType();
1618 Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
1619
1620 if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
1621 if (ArrTy->getElementType() == SrcElemTy) {
1622 SmallVector<Value *> NewIndices;
1623 Type *FirstIdxType = I.getOperand(1)->getType();
1624 NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
1625 for (Value *Idx : I.indices())
1626 NewIndices.push_back(Idx);
1627
1628 SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
1630 Args.push_back(B.getInt1(I.isInBounds()));
1631 Args.push_back(I.getPointerOperand());
1632 Args.append(NewIndices.begin(), NewIndices.end());
1633
1634 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1635 replaceAllUsesWithAndErase(B, &I, NewI);
1636 return NewI;
1637 }
1638 }
1639 }
1640
1641 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1643 Args.push_back(B.getInt1(I.isInBounds()));
1644 llvm::append_range(Args, I.operands());
1645 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1646 replaceAllUsesWithAndErase(B, &I, NewI);
1647 return NewI;
1648}
1649
1650Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1651 IRBuilder<> B(I.getParent());
1652 B.SetInsertPoint(&I);
1653 Value *Source = I.getOperand(0);
1654
1655 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1656 // varying element types. In case of IR coming from older versions of LLVM
1657 // such bitcasts do not provide sufficient information, should be just skipped
1658 // here, and handled in insertPtrCastOrAssignTypeInstr.
1659 if (isPointerTy(I.getType())) {
1660 replaceAllUsesWith(&I, Source);
1661 I.eraseFromParent();
1662 return nullptr;
1663 }
1664
1665 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1666 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1667 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1668 replaceAllUsesWithAndErase(B, &I, NewI);
1669 return NewI;
1670}
1671
1672void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1673 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1674 Type *VTy = V->getType();
1675
1676 // A couple of sanity checks.
1677 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1678 if (Type *ElemTy = getPointeeType(VTy))
1679 if (ElemTy != AssignedType)
1680 report_fatal_error("Unexpected pointer element type!");
1681
1682 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1683 if (!AssignCI) {
1684 GR->buildAssignType(B, AssignedType, V);
1685 return;
1686 }
1687
1688 Type *CurrentType =
1690 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1691 ->getType();
1692 if (CurrentType == AssignedType)
1693 return;
1694
1695 // Builtin types cannot be redeclared or casted.
1696 if (CurrentType->isTargetExtTy())
1697 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1698 "/" + AssignedType->getTargetExtName() +
1699 " for value " + V->getName(),
1700 false);
1701
1702 // Our previous guess about the type seems to be wrong, let's update
1703 // inferred type according to a new, more precise type information.
1704 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1705}
1706
1707void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1708 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1709 unsigned OperandToReplace, IRBuilder<> &B) {
1710 TypeValidated.insert(I);
1711
1712 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1713 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1714 if (PointerElemTy == ExpectedElementType ||
1715 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1716 return;
1717
1719 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1720 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1721 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1722 bool FirstPtrCastOrAssignPtrType = true;
1723
1724 // Do not emit new spv_ptrcast if equivalent one already exists or when
1725 // spv_assign_ptr_type already targets this pointer with the same element
1726 // type.
1727 if (Pointer->hasUseList()) {
1728 for (auto User : Pointer->users()) {
1729 auto *II = dyn_cast<IntrinsicInst>(User);
1730 if (!II ||
1731 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1732 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1733 II->getOperand(0) != Pointer)
1734 continue;
1735
1736 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1737 // pointer.
1738 FirstPtrCastOrAssignPtrType = false;
1739 if (II->getOperand(1) != VMD ||
1740 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1742 continue;
1743
1744 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1745 // same element type and address space.
1746 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1747 return;
1748
1749 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1750 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1751 if (II->getParent() != I->getParent())
1752 continue;
1753
1754 I->setOperand(OperandToReplace, II);
1755 return;
1756 }
1757 }
1758
1759 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1760 if (FirstPtrCastOrAssignPtrType) {
1761 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1762 // emit spv_assign_ptr_type instead.
1763 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1764 return;
1765 } else if (isTodoType(Pointer)) {
1766 eraseTodoType(Pointer);
1767 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1768 // If this wouldn't be the first spv_ptrcast but existing type info is
1769 // uncomplete, update spv_assign_ptr_type arguments.
1770 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1771 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1772 assert(PrevElemTy);
1773 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1774 std::make_pair(I, Pointer)};
1775 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1776 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1777 } else {
1778 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1779 }
1780 return;
1781 }
1782 }
1783 }
1784
1785 // Emit spv_ptrcast
1786 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1787 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1788 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1789 I->setOperand(OperandToReplace, PtrCastI);
1790 // We need to set up a pointee type for the newly created spv_ptrcast.
1791 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1792}
1793
1794void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1795 IRBuilder<> &B) {
1796 // Handle basic instructions:
1797 StoreInst *SI = dyn_cast<StoreInst>(I);
1798 if (IsKernelArgInt8(CurrF, SI)) {
1799 replacePointerOperandWithPtrCast(
1800 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1801 0, B);
1802 }
1803 if (SI) {
1804 Value *Op = SI->getValueOperand();
1805 Value *Pointer = SI->getPointerOperand();
1806 Type *OpTy = Op->getType();
1807 if (auto *OpI = dyn_cast<Instruction>(Op))
1808 OpTy = restoreMutatedType(GR, OpI, OpTy);
1809 if (OpTy == Op->getType())
1810 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1811 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1812 return;
1813 }
1814 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1815 Value *Pointer = LI->getPointerOperand();
1816 Type *OpTy = LI->getType();
1817 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1818 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1819 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1820 } else {
1821 Type *NewOpTy = OpTy;
1822 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1823 if (OpTy == NewOpTy)
1824 insertTodoType(Pointer);
1825 }
1826 }
1827 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1828 return;
1829 }
1830 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1831 Value *Pointer = GEPI->getPointerOperand();
1832 Type *OpTy = nullptr;
1833
1834 // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
1835 // the first index is 0, then we can trivially lower to OpAccessChain. If
1836 // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
1837 // this time, and will rewrite the GEP when visiting it.
1838 if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
1839 return;
1840 }
1841
1842 // In all cases, fall back to the GEP type if type scavenging failed.
1843 if (!OpTy)
1844 OpTy = GEPI->getSourceElementType();
1845
1846 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1847 if (isNestedPointer(OpTy))
1848 insertTodoType(Pointer);
1849 return;
1850 }
1851
1852 // TODO: review and merge with existing logics:
1853 // Handle calls to builtins (non-intrinsics):
1854 CallInst *CI = dyn_cast<CallInst>(I);
1855 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1857 return;
1858
1859 // collect information about formal parameter types
1860 std::string DemangledName =
1862 Function *CalledF = CI->getCalledFunction();
1863 SmallVector<Type *, 4> CalledArgTys;
1864 bool HaveTypes = false;
1865 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1866 Argument *CalledArg = CalledF->getArg(OpIdx);
1867 Type *ArgType = CalledArg->getType();
1868 if (!isPointerTy(ArgType)) {
1869 CalledArgTys.push_back(nullptr);
1870 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1871 CalledArgTys.push_back(ArgTypeElem);
1872 HaveTypes = true;
1873 } else {
1874 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1875 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1876 ElemTy = getPointeeTypeByAttr(CalledArg);
1877 if (!ElemTy) {
1878 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1879 if (ElemTy) {
1880 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1881 } else {
1882 for (User *U : CalledArg->users()) {
1883 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1884 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1885 break;
1886 }
1887 }
1888 }
1889 }
1890 HaveTypes |= ElemTy != nullptr;
1891 CalledArgTys.push_back(ElemTy);
1892 }
1893 }
1894
1895 if (DemangledName.empty() && !HaveTypes)
1896 return;
1897
1898 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1899 Value *ArgOperand = CI->getArgOperand(OpIdx);
1900 if (!isPointerTy(ArgOperand->getType()))
1901 continue;
1902
1903 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1904 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1905 // However, we may have assumptions about the formal argument's type and
1906 // may have a need to insert a ptr cast for the actual parameter of this
1907 // call.
1908 Argument *CalledArg = CalledF->getArg(OpIdx);
1909 if (!GR->findDeducedElementType(CalledArg))
1910 continue;
1911 }
1912
1913 Type *ExpectedType =
1914 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1915 if (!ExpectedType && !DemangledName.empty())
1916 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1917 DemangledName, OpIdx, I->getContext());
1918 if (!ExpectedType || ExpectedType->isVoidTy())
1919 continue;
1920
1921 if (ExpectedType->isTargetExtTy() &&
1923 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1924 ArgOperand, B);
1925 else
1926 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1927 }
1928}
1929
1930Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1931 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1932 // type in LLT and IRTranslator will replace it by the scalar.
1933 if (isVector1(I.getType()))
1934 return &I;
1935
1936 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1937 I.getOperand(1)->getType(),
1938 I.getOperand(2)->getType()};
1939 IRBuilder<> B(I.getParent());
1940 B.SetInsertPoint(&I);
1941 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1942 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1943 replaceAllUsesWithAndErase(B, &I, NewI);
1944 return NewI;
1945}
1946
1948SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1949 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1950 // type in LLT and IRTranslator will replace it by the scalar.
1951 if (isVector1(I.getVectorOperandType()))
1952 return &I;
1953
1954 IRBuilder<> B(I.getParent());
1955 B.SetInsertPoint(&I);
1956 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1957 I.getIndexOperand()->getType()};
1958 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1959 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1960 replaceAllUsesWithAndErase(B, &I, NewI);
1961 return NewI;
1962}
1963
1964Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1965 IRBuilder<> B(I.getParent());
1966 B.SetInsertPoint(&I);
1967 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1969 Value *AggregateOp = I.getAggregateOperand();
1970 if (isa<UndefValue>(AggregateOp))
1971 Args.push_back(UndefValue::get(B.getInt32Ty()));
1972 else
1973 Args.push_back(AggregateOp);
1974 Args.push_back(I.getInsertedValueOperand());
1975 for (auto &Op : I.indices())
1976 Args.push_back(B.getInt32(Op));
1977 Instruction *NewI =
1978 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1979 replaceMemInstrUses(&I, NewI, B);
1980 return NewI;
1981}
1982
1983Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1984 if (I.getAggregateOperand()->getType()->isAggregateType())
1985 return &I;
1986 IRBuilder<> B(I.getParent());
1987 B.SetInsertPoint(&I);
1988 SmallVector<Value *> Args(I.operands());
1989 for (auto &Op : I.indices())
1990 Args.push_back(B.getInt32(Op));
1991 auto *NewI =
1992 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1993 replaceAllUsesWithAndErase(B, &I, NewI);
1994 return NewI;
1995}
1996
1997Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1998 if (!I.getType()->isAggregateType())
1999 return &I;
2000 IRBuilder<> B(I.getParent());
2001 B.SetInsertPoint(&I);
2002 TrackConstants = false;
2003 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2005 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
2006 auto *NewI =
2007 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
2008 {I.getPointerOperand(), B.getInt16(Flags),
2009 B.getInt8(I.getAlign().value())});
2010 replaceMemInstrUses(&I, NewI, B);
2011 return NewI;
2012}
2013
2014Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
2015 if (!AggrStores.contains(&I))
2016 return &I;
2017 IRBuilder<> B(I.getParent());
2018 B.SetInsertPoint(&I);
2019 TrackConstants = false;
2020 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2022 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
2023 auto *PtrOp = I.getPointerOperand();
2024 auto *NewI = B.CreateIntrinsic(
2025 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
2026 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
2027 B.getInt8(I.getAlign().value())});
2028 NewI->copyMetadata(I);
2029 I.eraseFromParent();
2030 return NewI;
2031}
2032
2033Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
2034 Value *ArraySize = nullptr;
2035 if (I.isArrayAllocation()) {
2036 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
2037 if (!STI->canUseExtension(
2038 SPIRV::Extension::SPV_INTEL_variable_length_array))
2040 "array allocation: this instruction requires the following "
2041 "SPIR-V extension: SPV_INTEL_variable_length_array",
2042 false);
2043 ArraySize = I.getArraySize();
2044 }
2045 IRBuilder<> B(I.getParent());
2046 B.SetInsertPoint(&I);
2047 TrackConstants = false;
2048 Type *PtrTy = I.getType();
2049 auto *NewI =
2050 ArraySize
2051 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2052 {PtrTy, ArraySize->getType()},
2053 {ArraySize, B.getInt8(I.getAlign().value())})
2054 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2055 {B.getInt8(I.getAlign().value())});
2056 replaceAllUsesWithAndErase(B, &I, NewI);
2057 return NewI;
2058}
2059
2060Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2061 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2062 IRBuilder<> B(I.getParent());
2063 B.SetInsertPoint(&I);
2064 SmallVector<Value *> Args(I.operands());
2065 Args.push_back(B.getInt32(
2066 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2067 Args.push_back(B.getInt32(
2068 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2069 Args.push_back(B.getInt32(
2070 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2071 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2072 {I.getPointerOperand()->getType()}, {Args});
2073 replaceMemInstrUses(&I, NewI, B);
2074 return NewI;
2075}
2076
2077Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2078 IRBuilder<> B(I.getParent());
2079 B.SetInsertPoint(&I);
2080 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2081 return &I;
2082}
2083
2084void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2085 IRBuilder<> &B) {
2086 // Skip special artificial variables.
2087 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2088 "llvm.compiler.used"};
2089
2090 if (ArtificialGlobals.contains(GV.getName()))
2091 return;
2092
2093 Constant *Init = nullptr;
2094 if (hasInitializer(&GV)) {
2095 // Deduce element type and store results in Global Registry.
2096 // Result is ignored, because TypedPointerType is not supported
2097 // by llvm IR general logic.
2098 deduceElementTypeHelper(&GV, false);
2099 Init = GV.getInitializer();
2100 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2101 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2102 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2103 {GV.getType(), Ty}, {&GV, Const});
2104 InitInst->setArgOperand(1, Init);
2105 }
2106 if (!Init && GV.use_empty())
2107 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2108}
2109
2110// Return true, if we can't decide what is the pointee type now and will get
2111// back to the question later. Return false is spv_assign_ptr_type is not needed
2112// or can be inserted immediately.
2113bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2114 IRBuilder<> &B,
2115 bool UnknownElemTypeI8) {
2117 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2118 return false;
2119
2121 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2122 GR->buildAssignPtr(B, ElemTy, I);
2123 return false;
2124 }
2125 return true;
2126}
2127
2128void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2129 IRBuilder<> &B) {
2130 // TODO: extend the list of functions with known result types
2131 static StringMap<unsigned> ResTypeWellKnown = {
2132 {"async_work_group_copy", WellKnownTypes::Event},
2133 {"async_work_group_strided_copy", WellKnownTypes::Event},
2134 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2135
2137
2138 bool IsKnown = false;
2139 if (auto *CI = dyn_cast<CallInst>(I)) {
2140 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2141 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2142 Function *CalledF = CI->getCalledFunction();
2143 std::string DemangledName =
2145 FPDecorationId DecorationId = FPDecorationId::NONE;
2146 if (DemangledName.length() > 0)
2147 DemangledName =
2148 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2149 auto ResIt = ResTypeWellKnown.find(DemangledName);
2150 if (ResIt != ResTypeWellKnown.end()) {
2151 IsKnown = true;
2153 switch (ResIt->second) {
2154 case WellKnownTypes::Event:
2155 GR->buildAssignType(
2156 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2157 break;
2158 }
2159 }
2160 // check if a floating rounding mode or saturation info is present
2161 switch (DecorationId) {
2162 default:
2163 break;
2164 case FPDecorationId::SAT:
2166 break;
2167 case FPDecorationId::RTE:
2169 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2170 break;
2171 case FPDecorationId::RTZ:
2173 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2174 break;
2175 case FPDecorationId::RTP:
2177 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2178 break;
2179 case FPDecorationId::RTN:
2181 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2182 break;
2183 }
2184 }
2185 }
2186
2187 Type *Ty = I->getType();
2188 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2190 Type *TypeToAssign = Ty;
2191 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2192 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2193 II->getIntrinsicID() == Intrinsic::spv_undef) {
2194 auto It = AggrConstTypes.find(II);
2195 if (It == AggrConstTypes.end())
2196 report_fatal_error("Unknown composite intrinsic type");
2197 TypeToAssign = It->second;
2198 }
2199 }
2200 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2201 GR->buildAssignType(B, TypeToAssign, I);
2202 }
2203 for (const auto &Op : I->operands()) {
2205 // Check GetElementPtrConstantExpr case.
2207 (isa<GEPOperator>(Op) ||
2208 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2210 Type *OpTy = Op->getType();
2211 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2212 CallInst *AssignCI =
2213 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2214 UndefValue::get(B.getInt32Ty()), {}, B);
2215 GR->addAssignPtrTypeInstr(Op, AssignCI);
2216 } else if (!isa<Instruction>(Op)) {
2217 Type *OpTy = Op->getType();
2218 Type *OpTyElem = getPointeeType(OpTy);
2219 if (OpTyElem) {
2220 GR->buildAssignPtr(B, OpTyElem, Op);
2221 } else if (isPointerTy(OpTy)) {
2222 Type *ElemTy = GR->findDeducedElementType(Op);
2223 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2224 Op);
2225 } else {
2226 Value *OpTyVal = Op;
2227 if (OpTy->isTargetExtTy()) {
2228 // We need to do this in order to be consistent with how target ext
2229 // types are handled in `processInstrAfterVisit`
2230 OpTyVal = getNormalizedPoisonValue(OpTy);
2231 }
2232 CallInst *AssignCI =
2233 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2234 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2235 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2236 }
2237 }
2238 }
2239 }
2240}
2241
2242bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2243 Instruction *Inst) {
2244 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2245 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2246 return false;
2247 // Add aliasing decorations to internal load and store intrinsics
2248 // and atomic instructions, skipping atomic store as it won't have ID to
2249 // attach the decoration.
2250 CallInst *CI = dyn_cast<CallInst>(Inst);
2251 if (!CI)
2252 return false;
2253 if (Function *Fun = CI->getCalledFunction()) {
2254 if (Fun->isIntrinsic()) {
2255 switch (Fun->getIntrinsicID()) {
2256 case Intrinsic::spv_load:
2257 case Intrinsic::spv_store:
2258 return true;
2259 default:
2260 return false;
2261 }
2262 }
2264 const std::string Prefix = "__spirv_Atomic";
2265 const bool IsAtomic = Name.find(Prefix) == 0;
2266
2267 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2268 return true;
2269 }
2270 return false;
2271}
2272
2273void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2274 IRBuilder<> &B) {
2275 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2277 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2278 {I, MetadataAsValue::get(I->getContext(), MD)});
2279 }
2280 // Lower alias.scope/noalias metadata
2281 {
2282 auto processMemAliasingDecoration = [&](unsigned Kind) {
2283 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2284 if (shouldTryToAddMemAliasingDecoration(I)) {
2285 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2286 ? SPIRV::Decoration::AliasScopeINTEL
2287 : SPIRV::Decoration::NoAliasINTEL;
2289 I, ConstantInt::get(B.getInt32Ty(), Dec),
2290 MetadataAsValue::get(I->getContext(), AliasListMD)};
2292 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2293 {I->getType()}, {Args});
2294 }
2295 }
2296 };
2297 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2298 processMemAliasingDecoration(LLVMContext::MD_noalias);
2299 }
2300 // MD_fpmath
2301 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2302 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2303 bool AllowFPMaxError =
2304 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2305 if (!AllowFPMaxError)
2306 return;
2307
2309 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2310 {I->getType()},
2311 {I, MetadataAsValue::get(I->getContext(), MD)});
2312 }
2313}
2314
2316 const Module &M,
2318 &FPFastMathDefaultInfoMap,
2319 Function *F) {
2320 auto it = FPFastMathDefaultInfoMap.find(F);
2321 if (it != FPFastMathDefaultInfoMap.end())
2322 return it->second;
2323
2324 // If the map does not contain the entry, create a new one. Initialize it to
2325 // contain all 3 elements sorted by bit width of target type: {half, float,
2326 // double}.
2327 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2328 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2329 SPIRV::FPFastMathMode::None);
2330 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2331 SPIRV::FPFastMathMode::None);
2332 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2333 SPIRV::FPFastMathMode::None);
2334 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2335}
2336
2338 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2339 const Type *Ty) {
2340 size_t BitWidth = Ty->getScalarSizeInBits();
2341 int Index =
2343 BitWidth);
2344 assert(Index >= 0 && Index < 3 &&
2345 "Expected FPFastMathDefaultInfo for half, float, or double");
2346 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2347 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2348 return FPFastMathDefaultInfoVec[Index];
2349}
2350
2351void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2352 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2353 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2354 return;
2355
2356 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2357 // We need the entry point (function) as the key, and the target
2358 // type and flags as the value.
2359 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2360 // execution modes, as they are now deprecated and must be replaced
2361 // with FPFastMathDefaultInfo.
2362 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2363 if (!Node) {
2364 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2365 // This requires emitting ContractionOff. However, because
2366 // ContractionOff is now deprecated, we need to replace it with
2367 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2368 // We need to create the constant for that.
2369
2370 // Create constant instruction with the bitmask flags.
2371 Constant *InitValue =
2372 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2373 // TODO: Reuse constant if there is one already with the required
2374 // value.
2375 [[maybe_unused]] GlobalVariable *GV =
2376 new GlobalVariable(M, // Module
2377 Type::getInt32Ty(M.getContext()), // Type
2378 true, // isConstant
2380 InitValue // Initializer
2381 );
2382 }
2383 return;
2384 }
2385
2386 // The table maps function pointers to their default FP fast math info. It
2387 // can be assumed that the SmallVector is sorted by the bit width of the
2388 // type. The first element is the smallest bit width, and the last element
2389 // is the largest bit width, therefore, we will have {half, float, double}
2390 // in the order of their bit widths.
2391 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2392 FPFastMathDefaultInfoMap;
2393
2394 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2395 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2396 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2398 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2399 const auto EM =
2401 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2402 ->getZExtValue();
2403 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2404 assert(MDN->getNumOperands() == 4 &&
2405 "Expected 4 operands for FPFastMathDefault");
2406 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2407 unsigned Flags =
2409 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2410 ->getZExtValue();
2411 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2412 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2413 SPIRV::FPFastMathDefaultInfo &Info =
2414 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2415 Info.FastMathFlags = Flags;
2416 Info.FPFastMathDefault = true;
2417 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2418 assert(MDN->getNumOperands() == 2 &&
2419 "Expected no operands for ContractionOff");
2420
2421 // We need to save this info for every possible FP type, i.e. {half,
2422 // float, double, fp128}.
2423 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2424 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2425 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2426 Info.ContractionOff = true;
2427 }
2428 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2429 assert(MDN->getNumOperands() == 3 &&
2430 "Expected 1 operand for SignedZeroInfNanPreserve");
2431 unsigned TargetWidth =
2433 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2434 ->getZExtValue();
2435 // We need to save this info only for the FP type with TargetWidth.
2436 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2437 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2440 assert(Index >= 0 && Index < 3 &&
2441 "Expected FPFastMathDefaultInfo for half, float, or double");
2442 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2443 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2444 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2445 }
2446 }
2447
2448 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2449 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2450 if (FPFastMathDefaultInfoVec.empty())
2451 continue;
2452
2453 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2454 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2455 // Skip if none of the execution modes was used.
2456 unsigned Flags = Info.FastMathFlags;
2457 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2458 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2459 continue;
2460
2461 // Check if flags are compatible.
2462 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2463 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2464 "and AllowContract");
2465
2466 if (Info.SignedZeroInfNanPreserve &&
2467 !(Flags &
2468 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2469 SPIRV::FPFastMathMode::NSZ))) {
2470 if (Info.FPFastMathDefault)
2471 report_fatal_error("Conflicting FPFastMathFlags: "
2472 "SignedZeroInfNanPreserve but at least one of "
2473 "NotNaN/NotInf/NSZ is enabled.");
2474 }
2475
2476 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2477 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2478 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2479 report_fatal_error("Conflicting FPFastMathFlags: "
2480 "AllowTransform requires AllowReassoc and "
2481 "AllowContract to be set.");
2482 }
2483
2484 auto it = GlobalVars.find(Flags);
2485 GlobalVariable *GV = nullptr;
2486 if (it != GlobalVars.end()) {
2487 // Reuse existing global variable.
2488 GV = it->second;
2489 } else {
2490 // Create constant instruction with the bitmask flags.
2491 Constant *InitValue =
2492 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2493 // TODO: Reuse constant if there is one already with the required
2494 // value.
2495 GV = new GlobalVariable(M, // Module
2496 Type::getInt32Ty(M.getContext()), // Type
2497 true, // isConstant
2499 InitValue // Initializer
2500 );
2501 GlobalVars[Flags] = GV;
2502 }
2503 }
2504 }
2505}
2506
2507void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2508 IRBuilder<> &B) {
2509 auto *II = dyn_cast<IntrinsicInst>(I);
2510 bool IsConstComposite =
2511 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2512 if (IsConstComposite && TrackConstants) {
2514 auto t = AggrConsts.find(I);
2515 assert(t != AggrConsts.end());
2516 auto *NewOp =
2517 buildIntrWithMD(Intrinsic::spv_track_constant,
2518 {II->getType(), II->getType()}, t->second, I, {}, B);
2519 replaceAllUsesWith(I, NewOp, false);
2520 NewOp->setArgOperand(0, I);
2521 }
2522 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2523 for (const auto &Op : I->operands()) {
2524 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2526 continue;
2527 unsigned OpNo = Op.getOperandNo();
2528 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2529 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2530 continue;
2531
2532 if (!BPrepared) {
2533 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2534 : B.SetInsertPoint(I);
2535 BPrepared = true;
2536 }
2537 Type *OpTy = Op->getType();
2538 Type *OpElemTy = GR->findDeducedElementType(Op);
2539 Value *NewOp = Op;
2540 if (OpTy->isTargetExtTy()) {
2541 // Since this value is replaced by poison, we need to do the same in
2542 // `insertAssignTypeIntrs`.
2543 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2544 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2545 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2546 }
2547 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2548 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2549 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2550 SmallVector<Value *, 2> Args = {
2551 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2552 B.getInt32(getPointerAddressSpace(OpTy))};
2553 CallInst *PtrCasted =
2554 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2555 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2556 NewOp = PtrCasted;
2557 }
2558 if (NewOp != Op)
2559 I->setOperand(OpNo, NewOp);
2560 }
2561 if (Named.insert(I).second)
2562 emitAssignName(I, B);
2563}
2564
2565Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2566 unsigned OpIdx) {
2567 std::unordered_set<Function *> FVisited;
2568 return deduceFunParamElementType(F, OpIdx, FVisited);
2569}
2570
2571Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2572 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2573 // maybe a cycle
2574 if (!FVisited.insert(F).second)
2575 return nullptr;
2576
2577 std::unordered_set<Value *> Visited;
2579 // search in function's call sites
2580 for (User *U : F->users()) {
2581 CallInst *CI = dyn_cast<CallInst>(U);
2582 if (!CI || OpIdx >= CI->arg_size())
2583 continue;
2584 Value *OpArg = CI->getArgOperand(OpIdx);
2585 if (!isPointerTy(OpArg->getType()))
2586 continue;
2587 // maybe we already know operand's element type
2588 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2589 return KnownTy;
2590 // try to deduce from the operand itself
2591 Visited.clear();
2592 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2593 return Ty;
2594 // search in actual parameter's users
2595 for (User *OpU : OpArg->users()) {
2597 if (!Inst || Inst == CI)
2598 continue;
2599 Visited.clear();
2600 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2601 return Ty;
2602 }
2603 // check if it's a formal parameter of the outer function
2604 if (!CI->getParent() || !CI->getParent()->getParent())
2605 continue;
2606 Function *OuterF = CI->getParent()->getParent();
2607 if (FVisited.find(OuterF) != FVisited.end())
2608 continue;
2609 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2610 if (OuterF->getArg(i) == OpArg) {
2611 Lookup.push_back(std::make_pair(OuterF, i));
2612 break;
2613 }
2614 }
2615 }
2616
2617 // search in function parameters
2618 for (auto &Pair : Lookup) {
2619 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2620 return Ty;
2621 }
2622
2623 return nullptr;
2624}
2625
2626void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2627 IRBuilder<> &B) {
2628 B.SetInsertPointPastAllocas(F);
2629 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2630 Argument *Arg = F->getArg(OpIdx);
2631 if (!isUntypedPointerTy(Arg->getType()))
2632 continue;
2633 Type *ElemTy = GR->findDeducedElementType(Arg);
2634 if (ElemTy)
2635 continue;
2636 if (hasPointeeTypeAttr(Arg) &&
2637 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2638 GR->buildAssignPtr(B, ElemTy, Arg);
2639 continue;
2640 }
2641 // search in function's call sites
2642 for (User *U : F->users()) {
2643 CallInst *CI = dyn_cast<CallInst>(U);
2644 if (!CI || OpIdx >= CI->arg_size())
2645 continue;
2646 Value *OpArg = CI->getArgOperand(OpIdx);
2647 if (!isPointerTy(OpArg->getType()))
2648 continue;
2649 // maybe we already know operand's element type
2650 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2651 break;
2652 }
2653 if (ElemTy) {
2654 GR->buildAssignPtr(B, ElemTy, Arg);
2655 continue;
2656 }
2657 if (HaveFunPtrs) {
2658 for (User *U : Arg->users()) {
2659 CallInst *CI = dyn_cast<CallInst>(U);
2660 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2661 CI->getCalledOperand() == Arg &&
2662 CI->getParent()->getParent() == CurrF) {
2664 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2665 if (ElemTy) {
2666 GR->buildAssignPtr(B, ElemTy, Arg);
2667 break;
2668 }
2669 }
2670 }
2671 }
2672 }
2673}
2674
2675void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2676 B.SetInsertPointPastAllocas(F);
2677 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2678 Argument *Arg = F->getArg(OpIdx);
2679 if (!isUntypedPointerTy(Arg->getType()))
2680 continue;
2681 Type *ElemTy = GR->findDeducedElementType(Arg);
2682 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2683 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2684 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2685 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2686 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2687 VisitedSubst);
2688 } else {
2689 GR->buildAssignPtr(B, ElemTy, Arg);
2690 }
2691 }
2692 }
2693}
2694
2696 SPIRVGlobalRegistry *GR) {
2697 FunctionType *FTy = F->getFunctionType();
2698 bool IsNewFTy = false;
2700 for (Argument &Arg : F->args()) {
2701 Type *ArgTy = Arg.getType();
2702 if (ArgTy->isPointerTy())
2703 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2704 IsNewFTy = true;
2705 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2706 }
2707 ArgTys.push_back(ArgTy);
2708 }
2709 return IsNewFTy
2710 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2711 : FTy;
2712}
2713
2714bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2715 SmallVector<Function *> Worklist;
2716 for (auto &F : M) {
2717 if (F.isIntrinsic())
2718 continue;
2719 if (F.isDeclaration()) {
2720 for (User *U : F.users()) {
2721 CallInst *CI = dyn_cast<CallInst>(U);
2722 if (!CI || CI->getCalledFunction() != &F) {
2723 Worklist.push_back(&F);
2724 break;
2725 }
2726 }
2727 } else {
2728 if (F.user_empty())
2729 continue;
2730 Type *FPElemTy = GR->findDeducedElementType(&F);
2731 if (!FPElemTy)
2732 FPElemTy = getFunctionPointerElemType(&F, GR);
2733 for (User *U : F.users()) {
2734 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2735 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2736 continue;
2737 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2738 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2740 break;
2741 }
2742 }
2743 }
2744 }
2745 if (Worklist.empty())
2746 return false;
2747
2748 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2749 if (!getVacantFunctionName(M, ServiceFunName))
2751 "cannot allocate a name for the internal service function");
2752 LLVMContext &Ctx = M.getContext();
2753 Function *SF =
2754 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2755 GlobalValue::PrivateLinkage, ServiceFunName, M);
2757 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2758 IRBuilder<> IRB(BB);
2759
2760 for (Function *F : Worklist) {
2762 for (const auto &Arg : F->args())
2763 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2764 IRB.CreateCall(F, Args);
2765 }
2766 IRB.CreateRetVoid();
2767
2768 return true;
2769}
2770
2771// Apply types parsed from demangled function declarations.
2772void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2773 DenseMap<Function *, CallInst *> Ptrcasts;
2774 for (auto It : FDeclPtrTys) {
2775 Function *F = It.first;
2776 for (auto *U : F->users()) {
2777 CallInst *CI = dyn_cast<CallInst>(U);
2778 if (!CI || CI->getCalledFunction() != F)
2779 continue;
2780 unsigned Sz = CI->arg_size();
2781 for (auto [Idx, ElemTy] : It.second) {
2782 if (Idx >= Sz)
2783 continue;
2784 Value *Param = CI->getArgOperand(Idx);
2785 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2786 continue;
2787 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2788 if (!hasPointeeTypeAttr(Arg)) {
2789 B.SetInsertPointPastAllocas(Arg->getParent());
2790 B.SetCurrentDebugLocation(DebugLoc());
2791 GR->buildAssignPtr(B, ElemTy, Arg);
2792 }
2793 } else if (isa<GetElementPtrInst>(Param)) {
2794 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2795 Ptrcasts);
2796 } else if (isa<Instruction>(Param)) {
2797 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2798 // insertAssignTypeIntrs() will complete buildAssignPtr()
2799 } else {
2800 B.SetInsertPoint(CI->getParent()
2801 ->getParent()
2802 ->getEntryBlock()
2803 .getFirstNonPHIOrDbgOrAlloca());
2804 GR->buildAssignPtr(B, ElemTy, Param);
2805 }
2806 CallInst *Ref = dyn_cast<CallInst>(Param);
2807 if (!Ref)
2808 continue;
2809 Function *RefF = Ref->getCalledFunction();
2810 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2811 GR->findDeducedElementType(RefF))
2812 continue;
2813 ElemTy = normalizeType(ElemTy);
2814 GR->addDeducedElementType(RefF, ElemTy);
2815 GR->addReturnType(
2817 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2818 }
2819 }
2820 }
2821}
2822
2823GetElementPtrInst *
2824SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2825 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2826 // If type is 0-length array and first index is 0 (zero), drop both the
2827 // 0-length array type and the first index. This is a common pattern in
2828 // the IR, e.g. when using a zero-length array as a placeholder for a
2829 // flexible array such as unbound arrays.
2830 assert(GEP && "GEP is null");
2831 Type *SrcTy = GEP->getSourceElementType();
2832 SmallVector<Value *, 8> Indices(GEP->indices());
2833 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2834 if (ArrTy && ArrTy->getNumElements() == 0 &&
2836 Indices.erase(Indices.begin());
2837 SrcTy = ArrTy->getElementType();
2838 return GetElementPtrInst::Create(SrcTy, GEP->getPointerOperand(), Indices,
2839 GEP->getNoWrapFlags(), "",
2840 GEP->getIterator());
2841 }
2842 return nullptr;
2843}
2844
2845bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2846 if (Func.isDeclaration())
2847 return false;
2848
2849 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2850 GR = ST.getSPIRVGlobalRegistry();
2851
2852 if (!CurrF)
2853 HaveFunPtrs =
2854 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2855
2856 CurrF = &Func;
2857 IRBuilder<> B(Func.getContext());
2858 AggrConsts.clear();
2859 AggrConstTypes.clear();
2860 AggrStores.clear();
2861
2862 // Fix GEP result types ahead of inference, and simplify if possible.
2863 // Data structure for dead instructions that were simplified and replaced.
2864 SmallPtrSet<Instruction *, 4> DeadInsts;
2865 for (auto &I : instructions(Func)) {
2867 if (!Ref || GR->findDeducedElementType(Ref))
2868 continue;
2869
2870 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2871 if (NewGEP) {
2872 Ref->replaceAllUsesWith(NewGEP);
2873 DeadInsts.insert(Ref);
2874 Ref = NewGEP;
2875 }
2876 if (Type *GepTy = getGEPType(Ref))
2877 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2878 }
2879 // Remove dead instructions that were simplified and replaced.
2880 for (auto *I : DeadInsts) {
2881 assert(I->use_empty() && "Dead instruction should not have any uses left");
2882 I->eraseFromParent();
2883 }
2884
2885 processParamTypesByFunHeader(CurrF, B);
2886
2887 // StoreInst's operand type can be changed during the next
2888 // transformations, so we need to store it in the set. Also store already
2889 // transformed types.
2890 for (auto &I : instructions(Func)) {
2891 StoreInst *SI = dyn_cast<StoreInst>(&I);
2892 if (!SI)
2893 continue;
2894 Type *ElTy = SI->getValueOperand()->getType();
2895 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2896 AggrStores.insert(&I);
2897 }
2898
2899 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2900 for (auto &GV : Func.getParent()->globals())
2901 processGlobalValue(GV, B);
2902
2903 preprocessUndefs(B);
2904 preprocessCompositeConstants(B);
2907
2908 applyDemangledPtrArgTypes(B);
2909
2910 // Pass forward: use operand to deduce instructions result.
2911 for (auto &I : Worklist) {
2912 // Don't emit intrinsincs for convergence intrinsics.
2913 if (isConvergenceIntrinsic(I))
2914 continue;
2915
2916 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2917 // if Postpone is true, we can't decide on pointee type yet
2918 insertAssignTypeIntrs(I, B);
2919 insertPtrCastOrAssignTypeInstr(I, B);
2921 // if instruction requires a pointee type set, let's check if we know it
2922 // already, and force it to be i8 if not
2923 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2924 insertAssignPtrTypeIntrs(I, B, true);
2925
2926 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2927 useRoundingMode(FPI, B);
2928 }
2929
2930 // Pass backward: use instructions results to specify/update/cast operands
2931 // where needed.
2932 SmallPtrSet<Instruction *, 4> IncompleteRets;
2933 for (auto &I : llvm::reverse(instructions(Func)))
2934 deduceOperandElementType(&I, &IncompleteRets);
2935
2936 // Pass forward for PHIs only, their operands are not preceed the
2937 // instruction in meaning of `instructions(Func)`.
2938 for (BasicBlock &BB : Func)
2939 for (PHINode &Phi : BB.phis())
2940 if (isPointerTy(Phi.getType()))
2941 deduceOperandElementType(&Phi, nullptr);
2942
2943 for (auto *I : Worklist) {
2944 TrackConstants = true;
2945 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2947 // Visitors return either the original/newly created instruction for
2948 // further processing, nullptr otherwise.
2949 I = visit(*I);
2950 if (!I)
2951 continue;
2952
2953 // Don't emit intrinsics for convergence operations.
2954 if (isConvergenceIntrinsic(I))
2955 continue;
2956
2958 processInstrAfterVisit(I, B);
2959 }
2960
2961 return true;
2962}
2963
2964// Try to deduce a better type for pointers to untyped ptr.
2965bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2966 if (!GR || TodoTypeSz == 0)
2967 return false;
2968
2969 unsigned SzTodo = TodoTypeSz;
2970 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2971 for (auto [Op, Enabled] : TodoType) {
2972 // TODO: add isa<CallInst>(Op) to continue
2974 continue;
2975 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2976 Type *KnownTy = GR->findDeducedElementType(Op);
2977 if (!KnownTy || !AssignCI)
2978 continue;
2979 assert(Op == AssignCI->getArgOperand(0));
2980 // Try to improve the type deduced after all Functions are processed.
2981 if (auto *CI = dyn_cast<Instruction>(Op)) {
2982 CurrF = CI->getParent()->getParent();
2983 std::unordered_set<Value *> Visited;
2984 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2985 if (ElemTy != KnownTy) {
2986 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2987 propagateElemType(CI, ElemTy, VisitedSubst);
2988 eraseTodoType(Op);
2989 continue;
2990 }
2991 }
2992 }
2993
2994 if (Op->hasUseList()) {
2995 for (User *U : Op->users()) {
2997 if (Inst && !isa<IntrinsicInst>(Inst))
2998 ToProcess[Inst].insert(Op);
2999 }
3000 }
3001 }
3002 if (TodoTypeSz == 0)
3003 return true;
3004
3005 for (auto &F : M) {
3006 CurrF = &F;
3007 SmallPtrSet<Instruction *, 4> IncompleteRets;
3008 for (auto &I : llvm::reverse(instructions(F))) {
3009 auto It = ToProcess.find(&I);
3010 if (It == ToProcess.end())
3011 continue;
3012 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
3013 if (It->second.size() == 0)
3014 continue;
3015 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
3016 if (TodoTypeSz == 0)
3017 return true;
3018 }
3019 }
3020
3021 return SzTodo > TodoTypeSz;
3022}
3023
3024// Parse and store argument types of function declarations where needed.
3025void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
3026 for (auto &F : M) {
3027 if (!F.isDeclaration() || F.isIntrinsic())
3028 continue;
3029 // get the demangled name
3030 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
3031 if (DemangledName.empty())
3032 continue;
3033 // allow only OpGroupAsyncCopy use case at the moment
3034 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
3035 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3036 DemangledName, ST.getPreferredInstructionSet());
3037 if (Opcode != SPIRV::OpGroupAsyncCopy)
3038 continue;
3039 // find pointer arguments
3040 SmallVector<unsigned> Idxs;
3041 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
3042 Argument *Arg = F.getArg(OpIdx);
3043 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
3044 Idxs.push_back(OpIdx);
3045 }
3046 if (!Idxs.size())
3047 continue;
3048 // parse function arguments
3049 LLVMContext &Ctx = F.getContext();
3051 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3052 if (!TypeStrs.size())
3053 continue;
3054 // find type info for pointer arguments
3055 for (unsigned Idx : Idxs) {
3056 if (Idx >= TypeStrs.size())
3057 continue;
3058 if (Type *ElemTy =
3059 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3061 !ElemTy->isTargetExtTy())
3062 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3063 }
3064 }
3065}
3066
3067bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3068 bool Changed = false;
3069
3070 parseFunDeclarations(M);
3071 insertConstantsForFPFastMathDefault(M);
3072
3073 TodoType.clear();
3074 for (auto &F : M)
3076
3077 // Specify function parameters after all functions were processed.
3078 for (auto &F : M) {
3079 // check if function parameter types are set
3080 CurrF = &F;
3081 if (!F.isDeclaration() && !F.isIntrinsic()) {
3082 IRBuilder<> B(F.getContext());
3083 processParamTypes(&F, B);
3084 }
3085 }
3086
3087 CanTodoType = false;
3088 Changed |= postprocessTypes(M);
3089
3090 if (HaveFunPtrs)
3091 Changed |= processFunctionPointers(M);
3092
3093 return Changed;
3094}
3095
3097 return new SPIRVEmitIntrinsics(TM);
3098}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:523
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
void setOperand(unsigned i, Value *Val)
Definition User.h:237
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:401
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2484
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:365
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2148
FPDecorationId
Definition SPIRVUtils.h:547
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:511
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:396
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:489
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:359
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:378
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:373
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:451
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:344
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:497
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:428
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:507
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:354
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146