Add template for 1.3 types.hal and regenerate it
After this update, types.hal v1.3 no longer refers to API level 30.
Test: none needed
Bug: 140132458
Change-Id: If707dfbcade6be1a0885fc21f6ddf47f3c27b244
Merged-In: If707dfbcade6be1a0885fc21f6ddf47f3c27b244
(cherry picked from commit e98e6793be04416ead2c56c73baa25cdccd767e9)
diff --git a/current.txt b/current.txt
index 4c5f155..ef1228f 100644
--- a/current.txt
+++ b/current.txt
@@ -587,7 +587,7 @@
07d0a252b2d8fa35887908a996ba395cf392968395fc30afab791f46e0c22a52 android.hardware.boot@1.1::IBootControl
74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types
34515afa2bb792d3c6d8495a5f5d907d179c8507ca5e55c10050d02ae1d516ef android.hardware.neuralnetworks@1.3::IDevice
-e2d20d4eb24f40b44a3766d05f77052581cb3f4df35fb48c0cc5d9cdcf5c872e android.hardware.neuralnetworks@1.3::types
+b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
544049dcda3f943ad67d83d5277f06681a3782982a9af5a78b5d4e8d295d061a android.hardware.vibrator@1.4::IVibrator
5e1c12efbbba89c9143d10b1b90eceff8bc79aa079f5106215b528e104fef101 android.hardware.vibrator@1.4::IVibratorCallback
033eae03c09ebc75e82db37bc39995dfaa9086745577b44d9e14e9ccb48bd8cc android.hardware.vibrator@1.4::types
diff --git a/neuralnetworks/1.2/types.t b/neuralnetworks/1.2/types.t
index cab330d..d197f6b 100644
--- a/neuralnetworks/1.2/types.t
+++ b/neuralnetworks/1.2/types.t
@@ -41,27 +41,7 @@
enum OperandType : @1.0::OperandType {
%insert Operand_1.2
-
- /*
- * DEPRECATED. Since HAL version 1.2, extensions are the preferred
- * alternative to OEM operation and data types.
- *
- * OEM specific scalar value.
- * OEM = 10000,
- */
- /*
- * DEPRECATED. Since HAL version 1.2, extensions are the preferred
- * alternative to OEM operation and data types.
- *
- * A tensor of OEM specific values.
- * TENSOR_OEM_BYTE = 10001,
- */
- /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
- * OperandTypeRange::FUNDAMENTAL_MAX.
- */
- /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF
- * OperandTypeRange::OEM_MAX.
- */
+%insert OEMDeprecationAndOperandTypeRangeMaxComment
};
/**
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index db5dd51..86ab287 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -25,13 +25,6 @@
import android.hidl.safe_union@1.0::Monostate;
-/**
- * NOTE: Since NNAPI 1.2, OEM operation and data type are deprecated. Extensions
- * are the preferred alternative.
- *
- * NOTE: Adding a new fundamental type requires updating the value of
- * OperandTypeRange::FUNDAMENTAL_MAX.
- */
enum OperandType : @1.2::OperandType {
/**
* A tensor of 8 bit signed integers that represent real numbers.
@@ -43,10 +36,29 @@
*
* The formula is:
* real_value = (integer_value - zeroPoint) * scale.
- *
- * Available since API level 30.
*/
TENSOR_QUANT8_ASYMM_SIGNED = 14,
+
+ /*
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * OEM specific scalar value.
+ * OEM = 10000,
+ */
+ /*
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * A tensor of OEM specific values.
+ * TENSOR_OEM_BYTE = 10001,
+ */
+ /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
+ * OperandTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF
+ * OperandTypeRange::OEM_MAX.
+ */
};
/**
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
new file mode 100644
index 0000000..d41cfd2
--- /dev/null
+++ b/neuralnetworks/1.3/types.t
@@ -0,0 +1,344 @@
+%% template file for generating types.hal.
+%% see frameworks/ml/nn/tools/api/README.md.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.3;
+
+import @1.0::DataLocation;
+import @1.0::OperandLifeTime;
+import @1.0::PerformanceInfo;
+import @1.2::OperandType;
+import @1.2::OperationType;
+import @1.2::SymmPerChannelQuantParams;
+
+import android.hidl.safe_union@1.0::Monostate;
+
+enum OperandType : @1.2::OperandType {
+%insert Operand_1.3
+%insert OEMDeprecationAndOperandTypeRangeMaxComment
+};
+
+/**
+ * The range of operand values in the OperandType enum.
+ */
+enum OperandTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operand_1.3_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10001,
+ BASE_MAX = 0xFFFF,
+};
+
+
+/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as
+ * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
+ * Describes one operand of the model's graph.
+ */
+struct Operand {
+ /**
+ * The data type.
+ *
+ * Besides the values listed in {@link OperandType}, any value above
+ * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ *
+ * For a scalar operand, dimensions.size() must be 0.
+ *
+ * A tensor operand with all dimensions specified has "fully
+ * specified" dimensions. Whenever possible (i.e., whenever the
+ * dimensions are known at model construction time), a tensor
+ * operand should have (but is not required to have) fully
+ * specified dimensions, in order to enable the best possible
+ * performance.
+ *
+ * If a tensor operand's dimensions are not fully specified, the
+ * dimensions of the operand are deduced from the operand
+ * dimensions and values of the operation for which that operand
+ * is an output.
+ *
+ * In the following situations, a tensor operand's dimensions must
+ * be fully specified:
+ *
+ * . The operand has lifetime CONSTANT_COPY or
+ * CONSTANT_REFERENCE.
+ *
+ * . The operand has lifetime MODEL_INPUT. Fully
+ * specified dimensions must either be present in the
+ * Operand or they must be provided in the corresponding
+ * RequestArgument.
+ * EXCEPTION: If the input is optional and omitted
+ * (by setting the hasNoValue field of the corresponding
+ * RequestArgument to true) then it need not have fully
+ * specified dimensions.
+ *
+ * A tensor operand with some number of unspecified dimensions is
+ * represented by setting each unspecified dimension to 0.
+ *
+ * A tensor operand with unspecified rank is represented by providing
+ * an empty dimensions vector.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
+ */
+ uint32_t numberOfConsumers;
+
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
+ */
+ float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
+ int32_t zeroPoint;
+
+ /**
+ * How the operand is used.
+ */
+ OperandLifeTime lifetime;
+
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
+ * NO_VALUE:
+ * - All the fields must be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
+ DataLocation location;
+
+ /**
+ * Additional parameters specific to a particular operand type.
+ */
+ safe_union ExtraParams {
+ /**
+ * No additional parameters.
+ */
+ Monostate none;
+
+ /**
+ * Symmetric per-channel quantization parameters.
+ *
+ * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
+ */
+ SymmPerChannelQuantParams channelQuant;
+
+ /**
+ * Extension operand parameters.
+ *
+ * The framework treats this as an opaque data blob.
+ * The format is up to individual extensions.
+ */
+ vec<uint8_t> extension;
+ } extraParams;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * may not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+
+ /**
+ * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
+ * precision as low as that of the IEEE 754 16-bit floating-point format.
+ * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
+ * range and precision of the IEEE 754 32-bit floating-point format.
+ */
+ bool relaxComputationFloat32toFloat16;
+
+ /**
+ * The mapping between extension names and prefixes of operand and
+ * operation type values.
+ *
+ * An operand or operation whose numeric type value is above
+ * {@link OperandTypeRange::BASE_MAX} or
+ * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted
+ * as an extension operand. The low
+ * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value
+ * correspond to the type ID within the extension and the high
+ * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
+ * the "prefix", which maps uniquely to the extension name.
+ *
+ * For example, if a model contains an operation whose value is
+ * 0xAAAABBBB and extensionNameToPrefix contains an entry with
+ * prefix=0xAAAA and name="vendor.test.test_extension", then
+ * the operation should be interpreted as the operation 0xBBBB
+ * of the extension named vendor.test.test_extension.
+ *
+ * This is a one-to-one correspondence. That is, there must be at most one
+ * prefix corresponding to each extension name and at most one extension
+ * name corresponding to each prefix.
+ */
+ vec<ExtensionNameAndPrefix> extensionNameToPrefix;
+
+ /**
+ * A correspondence between an extension name and a prefix of operand and
+ * operation type values.
+ */
+ struct ExtensionNameAndPrefix {
+ /**
+ * The extension name.
+ *
+ * See {@link Extension::name} for the format specification.
+ */
+ string name;
+
+ /**
+ * The unique extension identifier within the model.
+ *
+ * See {@link Model::extensionNameToPrefix}.
+ */
+ uint16_t prefix;
+ };
+
+ /**
+ * Numeric values of extension operand and operation types have the
+ * following structure:
+ * - 16 high bits represent the "prefix", which corresponds uniquely to the
+ * extension name.
+ * - 16 low bits represent the type ID within the extension.
+ */
+ enum ExtensionTypeEncoding : uint8_t {
+ HIGH_BITS_PREFIX = 16,
+ LOW_BITS_TYPE = 16,
+ };
+};