26#include "llvm/IR/IntrinsicsSPIRV.h"
38 for (
unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
39 unsigned StrIndex = i + WordIndex;
41 if (StrIndex < Str.size()) {
42 CharToAdd = Str[StrIndex];
44 Word |= (CharToAdd << (WordIndex * 8));
51 return (Str.size() + 4) & ~3;
56 for (
unsigned i = 0; i < PaddedLen; i += 4) {
64 for (
unsigned i = 0; i < PaddedLen; i += 4) {
71 std::vector<Value *> &Args) {
73 for (
unsigned i = 0; i < PaddedLen; i += 4) {
84 const auto Bitwidth = Imm.getBitWidth();
87 else if (Bitwidth <= 32) {
88 MIB.
addImm(Imm.getZExtValue());
93 }
else if (Bitwidth <= 64) {
94 uint64_t FullImm = Imm.getZExtValue();
95 uint32_t LowBits = FullImm & 0xffffffff;
96 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
115 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName))
122 const std::vector<uint32_t> &DecArgs,
126 for (
const auto &DecArg : DecArgs)
131 SPIRV::Decoration::Decoration Dec,
132 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
133 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
140 SPIRV::Decoration::Decoration Dec,
141 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
143 auto MIB =
BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDecorate))
152 auto *OpMD = dyn_cast<MDNode>(GVarMD->
getOperand(
I));
155 if (OpMD->getNumOperands() == 0)
158 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
161 "element of the decoration");
162 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
165 for (
unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
167 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
169 else if (
MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
182 bool IsHeader =
false;
184 for (; It !=
E && It !=
I; ++It) {
185 Opcode = It->getOpcode();
186 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
188 }
else if (IsHeader &&
189 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
202 while (
I->isTerminator() ||
I->isDebugValue()) {
210SPIRV::StorageClass::StorageClass
214 return SPIRV::StorageClass::Function;
216 return SPIRV::StorageClass::CrossWorkgroup;
218 return SPIRV::StorageClass::UniformConstant;
220 return SPIRV::StorageClass::Workgroup;
222 return SPIRV::StorageClass::Generic;
224 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
225 ? SPIRV::StorageClass::DeviceOnlyINTEL
226 : SPIRV::StorageClass::CrossWorkgroup;
228 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
229 ? SPIRV::StorageClass::HostOnlyINTEL
230 : SPIRV::StorageClass::CrossWorkgroup;
232 return SPIRV::StorageClass::Input;
234 return SPIRV::StorageClass::Output;
236 return SPIRV::StorageClass::CodeSectionINTEL;
238 return SPIRV::StorageClass::Private;
244SPIRV::MemorySemantics::MemorySemantics
247 case SPIRV::StorageClass::StorageBuffer:
248 case SPIRV::StorageClass::Uniform:
249 return SPIRV::MemorySemantics::UniformMemory;
250 case SPIRV::StorageClass::Workgroup:
251 return SPIRV::MemorySemantics::WorkgroupMemory;
252 case SPIRV::StorageClass::CrossWorkgroup:
253 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
254 case SPIRV::StorageClass::AtomicCounter:
255 return SPIRV::MemorySemantics::AtomicCounterMemory;
256 case SPIRV::StorageClass::Image:
257 return SPIRV::MemorySemantics::ImageMemory;
259 return SPIRV::MemorySemantics::None;
266 return SPIRV::MemorySemantics::Acquire;
268 return SPIRV::MemorySemantics::Release;
270 return SPIRV::MemorySemantics::AcquireRelease;
272 return SPIRV::MemorySemantics::SequentiallyConsistent;
276 return SPIRV::MemorySemantics::None;
295 return SPIRV::Scope::Invocation;
297 return SPIRV::Scope::CrossDevice;
298 else if (Id == SubGroup)
299 return SPIRV::Scope::Subgroup;
300 else if (Id == WorkGroup)
301 return SPIRV::Scope::Workgroup;
302 else if (Id == Device)
303 return SPIRV::Scope::Device;
304 return SPIRV::Scope::CrossDevice;
311 MI->getOpcode() == SPIRV::G_TRUNC ||
MI->getOpcode() == SPIRV::G_ZEXT
312 ?
MRI->getVRegDef(
MI->getOperand(1).getReg())
314 if (
auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
315 if (GI->is(Intrinsic::spv_track_constant)) {
317 return MRI->getVRegDef(ConstReg);
319 }
else if (ConstInstr->
getOpcode() == SPIRV::ASSIGN_TYPE) {
321 return MRI->getVRegDef(ConstReg);
323 return MRI->getVRegDef(ConstReg);
328 assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
329 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
333 if (
const auto *GI = dyn_cast<GIntrinsic>(&
MI))
334 return GI->is(IntrinsicID);
339 Type *ElementTy = cast<ValueAsMetadata>(
N->getOperand(
I))->getType();
346 return MangledName ==
"write_pipe_2" || MangledName ==
"read_pipe_2" ||
347 MangledName ==
"write_pipe_2_bl" || MangledName ==
"read_pipe_2_bl" ||
348 MangledName ==
"write_pipe_4" || MangledName ==
"read_pipe_4" ||
349 MangledName ==
"reserve_write_pipe" ||
350 MangledName ==
"reserve_read_pipe" ||
351 MangledName ==
"commit_write_pipe" ||
352 MangledName ==
"commit_read_pipe" ||
353 MangledName ==
"work_group_reserve_write_pipe" ||
354 MangledName ==
"work_group_reserve_read_pipe" ||
355 MangledName ==
"work_group_commit_write_pipe" ||
356 MangledName ==
"work_group_commit_read_pipe" ||
357 MangledName ==
"get_pipe_num_packets_ro" ||
358 MangledName ==
"get_pipe_max_packets_ro" ||
359 MangledName ==
"get_pipe_num_packets_wo" ||
360 MangledName ==
"get_pipe_max_packets_wo" ||
361 MangledName ==
"sub_group_reserve_write_pipe" ||
362 MangledName ==
"sub_group_reserve_read_pipe" ||
363 MangledName ==
"sub_group_commit_write_pipe" ||
364 MangledName ==
"sub_group_commit_read_pipe" ||
365 MangledName ==
"to_global" || MangledName ==
"to_local" ||
366 MangledName ==
"to_private";
370 return MangledName ==
"__enqueue_kernel_basic" ||
371 MangledName ==
"__enqueue_kernel_basic_events" ||
372 MangledName ==
"__enqueue_kernel_varargs" ||
373 MangledName ==
"__enqueue_kernel_events_varargs";
377 return MangledName ==
"__get_kernel_work_group_size_impl" ||
378 MangledName ==
"__get_kernel_sub_group_count_for_ndrange_impl" ||
379 MangledName ==
"__get_kernel_max_sub_group_size_for_ndrange_impl" ||
380 MangledName ==
"__get_kernel_preferred_work_group_size_multiple_impl";
384 if (!
Name.starts_with(
"__"))
389 Name ==
"__translate_sampler_initializer";
394 bool IsNonMangledSPIRV =
Name.starts_with(
"__spirv_");
395 bool IsNonMangledHLSL =
Name.starts_with(
"__hlsl_");
396 bool IsMangled =
Name.starts_with(
"_Z");
399 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
404 std::string Result = DemangledName;
413 size_t Start, Len = 0;
414 size_t DemangledNameLenStart = 2;
415 if (
Name.starts_with(
"_ZN")) {
417 size_t NameSpaceStart =
Name.find_first_not_of(
"rVKRO", 3);
419 if (
Name.substr(NameSpaceStart, 11) !=
"2cl7__spirv")
420 return std::string();
421 DemangledNameLenStart = NameSpaceStart + 11;
423 Start =
Name.find_first_not_of(
"0123456789", DemangledNameLenStart);
424 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
425 .getAsInteger(10, Len);
426 return Name.substr(Start, Len).str();
430 if (
Name.starts_with(
"opencl.") ||
Name.starts_with(
"ocl_") ||
431 Name.starts_with(
"spirv."))
437 if (
const TargetExtType *ExtTy = dyn_cast<TargetExtType>(Ty))
453 if (
F.getFnAttribute(
"hlsl.shader").isValid())
460 TypeName.consume_front(
"atomic_");
461 if (TypeName.consume_front(
"void"))
463 else if (TypeName.consume_front(
"bool") || TypeName.consume_front(
"_Bool"))
465 else if (TypeName.consume_front(
"char") ||
466 TypeName.consume_front(
"signed char") ||
467 TypeName.consume_front(
"unsigned char") ||
468 TypeName.consume_front(
"uchar"))
470 else if (TypeName.consume_front(
"short") ||
471 TypeName.consume_front(
"signed short") ||
472 TypeName.consume_front(
"unsigned short") ||
473 TypeName.consume_front(
"ushort"))
475 else if (TypeName.consume_front(
"int") ||
476 TypeName.consume_front(
"signed int") ||
477 TypeName.consume_front(
"unsigned int") ||
478 TypeName.consume_front(
"uint"))
480 else if (TypeName.consume_front(
"long") ||
481 TypeName.consume_front(
"signed long") ||
482 TypeName.consume_front(
"unsigned long") ||
483 TypeName.consume_front(
"ulong"))
485 else if (TypeName.consume_front(
"half") ||
486 TypeName.consume_front(
"_Float16") ||
487 TypeName.consume_front(
"__fp16"))
489 else if (TypeName.consume_front(
"float"))
491 else if (TypeName.consume_front(
"double"))
498std::unordered_set<BasicBlock *>
499PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
500 std::queue<BasicBlock *> ToVisit;
503 std::unordered_set<BasicBlock *> Output;
504 while (ToVisit.size() != 0) {
505 BasicBlock *BB = ToVisit.front();
508 if (Output.count(BB) != 0)
522bool PartialOrderingVisitor::CanBeVisited(BasicBlock *BB)
const {
529 if (BlockToOrder.count(
P) == 0)
535 if (L ==
nullptr ||
L->contains(BB))
541 assert(
L->getNumBackEdges() <= 1);
547 if (Latch ==
nullptr)
551 if (BlockToOrder.count(Latch) == 0)
559 auto It = BlockToOrder.find(BB);
560 if (It != BlockToOrder.end())
561 return It->second.Rank;
569 auto Iterator = BlockToOrder.end();
571 BasicBlock *Latch = L ? L->getLoopLatch() :
nullptr;
575 if (L ==
nullptr || L->contains(BB) || Latch ==
nullptr) {
576 Iterator = BlockToOrder.find(
P);
581 Iterator = BlockToOrder.find(Latch);
584 assert(Iterator != BlockToOrder.end());
585 result = std::max(result, Iterator->second.Rank + 1);
591size_t PartialOrderingVisitor::visit(
BasicBlock *BB,
size_t Unused) {
595 size_t QueueIndex = 0;
596 while (ToVisit.size() != 0) {
600 if (!CanBeVisited(BB)) {
602 if (QueueIndex >= ToVisit.size())
604 "No valid candidate in the queue. Is the graph reducible?");
611 OrderInfo
Info = {Rank, BlockToOrder.size()};
612 BlockToOrder.emplace(BB,
Info);
615 if (Queued.count(S) != 0)
629 visit(&*
F.begin(), 0);
631 Order.reserve(
F.size());
632 for (
auto &[BB,
Info] : BlockToOrder)
633 Order.emplace_back(BB);
635 std::sort(Order.begin(), Order.end(), [&](
const auto &
LHS,
const auto &
RHS) {
636 return compare(LHS, RHS);
642 const OrderInfo &InfoLHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
LHS));
643 const OrderInfo &InfoRHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
RHS));
644 if (InfoLHS.Rank != InfoRHS.Rank)
645 return InfoLHS.Rank < InfoRHS.Rank;
646 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
651 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
652 assert(BlockToOrder.count(&Start) != 0);
655 auto It = Order.begin();
656 while (It != Order.end() && *It != &Start)
661 assert(It != Order.end());
664 std::optional<size_t> EndRank = std::nullopt;
665 for (; It != Order.end(); ++It) {
666 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
669 if (Reachable.count(*It) == 0) {
674 EndRank = BlockToOrder[*It].Rank;
684 std::vector<BasicBlock *> Order;
685 Order.reserve(
F.size());
691 assert(&*
F.begin() == Order[0]);
694 if (BB != LastBlock && &*LastBlock->
getNextNode() != BB) {
706 if (MaybeDef && MaybeDef->
getOpcode() == SPIRV::ASSIGN_TYPE)
714 constexpr unsigned MaxIters = 1024;
715 for (
unsigned I = 0;
I < MaxIters; ++
I) {
717 if (!M.getFunction(OrdName)) {
731 if (!
MRI->getRegClassOrNull(
Reg) || Force) {
777 if (
const FunctionType *RefTy = dyn_cast<FunctionType>(Ty)) {
780 for (
const Type *ArgTy : RefTy->params())
785 if (
const ArrayType *RefTy = dyn_cast<ArrayType>(Ty))
791 if (
const auto *
II = dyn_cast<IntrinsicInst>(Arg))
793 if (
F->getName().starts_with(
"llvm.spv."))
unsigned const MachineRegisterInfo * MRI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Class to represent array types.
LLVM Basic Block Representation.
const Instruction & front() const
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents an Operation in the Expression.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Class to represent function types.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void addOperand(const MCOperand Op)
static MCOperand createImm(int64_t Val)
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
PartialOrderingVisitor(Function &F)
Wrapper class representing virtual and physical registers.
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
static Type * getDoubleTy(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt16Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
bool sortBlocks(Function &F)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
bool isNestedPointer(const Type *Ty)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
Type * toTypedPointer(Type *Ty)
char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
static bool isNonMangledOCLBuiltin(StringRef Name)
bool isEntryPoint(const Function &F)
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD)
static bool isEnqueueKernelBI(const StringRef MangledName)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)