15#include "llvm/Config/config.h"
16#if defined(LLVM_HAVE_TFLITE)
24struct LoggedFeatureSpec {
26 std::optional<std::string> LoggingName;
29std::optional<std::vector<LoggedFeatureSpec>>
34 if (FileName.
empty()) {
36 FileName = {OutputSpecsPath.
data(), OutputSpecsPath.
size()};
39 auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
41 Ctx.
emitError(
"Error opening output specs file: " + FileName +
" : " +
42 BufferOrError.getError().message());
45 auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
46 if (!ParsedJSONValues) {
47 Ctx.
emitError(
"Could not parse specs file: " + FileName);
50 auto ValuesArray = ParsedJSONValues->getAsArray();
52 Ctx.
emitError(
"Expected an array of {tensor_spec:<TensorSpec>, "
53 "logging_name:<name>} dictionaries");
56 std::vector<LoggedFeatureSpec>
Ret;
57 for (
const auto &
Value : *ValuesArray)
58 if (
const auto *Obj =
Value.getAsObject())
59 if (
const auto *SpecPart = Obj->get(
"tensor_spec"))
61 if (
auto LoggingName = Obj->getString(
"logging_name")) {
66 "Only int64, int32, and float tensors are supported. "
67 "Found unsupported type for tensor named " +
74 if (ValuesArray->size() !=
Ret.size()) {
76 "Unable to parse output spec. It should be a json file containing an "
77 "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
78 "with a json object describing a TensorSpec; and a 'logging_name' key, "
79 "which is a string to use as name when logging this tensor in the "
83 if (
Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
84 Ctx.
emitError(
"The first output spec must describe the decision tensor, "
85 "and must have the logging_name " +
93ModelUnderTrainingRunner::ModelUnderTrainingRunner(
95 const std::vector<TensorSpec> &InputSpecs,
96 const std::vector<TensorSpec> &OutputSpecs,
97 const std::vector<TensorSpec> &ExtraOutputsForLogging)
99 OutputSpecs(OutputSpecs), ExtraOutputsForLogging(ExtraOutputsForLogging) {
101 std::make_unique<TFModelEvaluator>(ModelPath, InputSpecs, OutputSpecs);
103 Ctx.
emitError(
"Failed to create saved model evaluator");
108 for (
size_t I = 0,
E = InputSpecs.size();
I <
E; ++
I) {
109 setUpBufferForTensor(
I, InputSpecs[
I],
Evaluator->getUntypedInput(
I));
113void *ModelUnderTrainingRunner::evaluateUntyped() {
114 LastEvaluationResult =
Evaluator->evaluate();
115 if (!LastEvaluationResult.has_value()) {
116 Ctx.
emitError(
"Error evaluating model.");
119 return LastEvaluationResult->getUntypedTensorValue(0);
122std::unique_ptr<ModelUnderTrainingRunner>
123ModelUnderTrainingRunner::createAndEnsureValid(
125 const std::vector<TensorSpec> &InputSpecs,
127 if (
auto MaybeOutputSpecs = loadOutputSpecs(Ctx,
DecisionName, ModelPath,
128 OutputSpecsPathOverride)) {
129 std::unique_ptr<ModelUnderTrainingRunner> MUTR;
130 std::vector<TensorSpec> OutputSpecs;
131 std::vector<TensorSpec> ExtraOutputsForLogging;
133 map_range(*MaybeOutputSpecs, [](
const LoggedFeatureSpec &LFS) {
138 [](
const LoggedFeatureSpec &LFS) {
145 MUTR.reset(
new ModelUnderTrainingRunner(
146 Ctx, ModelPath, InputSpecs, OutputSpecs, ExtraOutputsForLogging));
147 if (MUTR && MUTR->isValid())
150 Ctx.
emitError(
"Could not load or create model evaluator.");
153 Ctx.
emitError(
"Could not load the policy model from the provided path");
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This class evaluates LLVM IR, producing the Constant representing each SSA instruction.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
const std::string & name() const
bool isElementType() const
LLVM Value Representation.
void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
std::optional< TensorSpec > getTensorSpecFromJSON(LLVMContext &Ctx, const json::Value &Value)
Construct a TensorSpec from a JSON dictionary of the form: { "name": <string>, "port": <int>,...
auto map_range(ContainerTy &&C, FuncTy F)