Merge "[AWARE] Developer documentation on HAL usage" into oc-mr1-dev
diff --git a/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp b/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
index ee49023..ec3259a 100644
--- a/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
+++ b/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
@@ -28,6 +28,7 @@
const auto configPath = folder + '/' + configName;
if (access(configPath.c_str(), R_OK) == 0) {
ASSERT_VALID_XML(configPath.c_str(), configSchemaPath);
+ return; // The framework does not read past the first config file found
}
}
}
diff --git a/broadcastradio/1.1/default/BroadcastRadio.cpp b/broadcastradio/1.1/default/BroadcastRadio.cpp
index 38b4b99..1bcfd82 100644
--- a/broadcastradio/1.1/default/BroadcastRadio.cpp
+++ b/broadcastradio/1.1/default/BroadcastRadio.cpp
@@ -64,7 +64,7 @@
}),
AmFmBandConfig({
Band::FM_HD,
- 87900, // lowerLimit
+ 87700, // lowerLimit
107900, // upperLimit
{200}, // spacings
}),
diff --git a/camera/device/1.0/default/CameraDevice.cpp b/camera/device/1.0/default/CameraDevice.cpp
index c53c0d8..a03bbc8 100644
--- a/camera/device/1.0/default/CameraDevice.cpp
+++ b/camera/device/1.0/default/CameraDevice.cpp
@@ -377,10 +377,14 @@
hidl_handle hidlHandle = mem->mHidlHandle;
MemoryId id = object->mDeviceCallback->registerMemory(hidlHandle, buf_size, num_bufs);
mem->handle.mId = id;
- if (object->mMemoryMap.count(id) != 0) {
- ALOGE("%s: duplicate MemoryId %d returned by client!", __FUNCTION__, id);
+
+ {
+ Mutex::Autolock _l(object->mMemoryMapLock);
+ if (object->mMemoryMap.count(id) != 0) {
+ ALOGE("%s: duplicate MemoryId %d returned by client!", __FUNCTION__, id);
+ }
+ object->mMemoryMap[id] = mem;
}
- object->mMemoryMap[id] = mem;
mem->handle.mDevice = object;
return &mem->handle;
}
@@ -398,7 +402,10 @@
ALOGE("%s: camera HAL return memory while camera is not opened!", __FUNCTION__);
}
device->mDeviceCallback->unregisterMemory(mem->handle.mId);
- device->mMemoryMap.erase(mem->handle.mId);
+ {
+ Mutex::Autolock _l(device->mMemoryMapLock);
+ device->mMemoryMap.erase(mem->handle.mId);
+ }
mem->decStrong(mem);
}
@@ -826,7 +833,16 @@
return;
}
if (mDevice->ops->release_recording_frame) {
- CameraHeapMemory* camMemory = mMemoryMap.at(memId);
+ CameraHeapMemory* camMemory;
+ {
+ Mutex::Autolock _l(mMemoryMapLock);
+ auto it = mMemoryMap.find(memId);
+ if (it == mMemoryMap.end() || it->second == nullptr) {
+ ALOGE("%s unknown memoryId %d", __FUNCTION__, memId);
+ return;
+ }
+ camMemory = it->second;
+ }
if (bufferIndex >= camMemory->mNumBufs) {
ALOGE("%s: bufferIndex %d exceeds number of buffers %d",
__FUNCTION__, bufferIndex, camMemory->mNumBufs);
diff --git a/camera/device/1.0/default/CameraDevice_1_0.h b/camera/device/1.0/default/CameraDevice_1_0.h
index c078596..2c980f0 100644
--- a/camera/device/1.0/default/CameraDevice_1_0.h
+++ b/camera/device/1.0/default/CameraDevice_1_0.h
@@ -165,6 +165,8 @@
sp<ICameraDeviceCallback> mDeviceCallback = nullptr;
+ mutable Mutex mMemoryMapLock; // gating access to mMemoryMap
+ // must not hold mLock after this lock is acquired
std::unordered_map<MemoryId, CameraHeapMemory*> mMemoryMap;
bool mMetadataMode = false;
diff --git a/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy b/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
index 7e3dfe0c..43bf1fa 100644
--- a/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
+++ b/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
@@ -39,6 +39,7 @@
rt_sigreturn: 1
getrlimit: 1
madvise: 1
+clock_gettime: 1
# used during process crash by crash_dump to dump process info
rt_sigprocmask: 1
diff --git a/current.txt b/current.txt
index f045fd8..5207c6a 100644
--- a/current.txt
+++ b/current.txt
@@ -244,5 +244,6 @@
86ba9c03978b79a742e990420bc5ced0673d25a939f82572996bef92621e2014 android.hardware.cas@1.0::IMediaCasService
503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types
619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
+0a159f81359cd4f71bbe00972ee8403ea79351fb7c0cd48be72ebb3e424dbaef android.hardware.radio@1.0::types
f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
diff --git a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
index 411d97b..3d78f45 100644
--- a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -3907,7 +3907,7 @@
* Verifies that the addRngEntropy method doesn't blow up when given a largish amount of data.
*/
TEST_F(AddEntropyTest, AddLargeEntropy) {
- EXPECT_EQ(ErrorCode::OK, keymaster().addRngEntropy(HidlBuf(string(16 * 1024, 'a'))));
+ EXPECT_EQ(ErrorCode::OK, keymaster().addRngEntropy(HidlBuf(string(2 * 1024, 'a'))));
}
typedef KeymasterHidlTest AttestationTest;
diff --git a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
index 1d4fd67..a5b5524 100644
--- a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
+++ b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
@@ -839,7 +839,7 @@
OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
*eColorFormat = OMX_COLOR_FormatUnused;
portFormat.nIndex = 0;
- while (1) {
+ while (portFormat.nIndex < 512) {
status = getPortParam(omxNode, OMX_IndexParamVideoPortFormat,
kPortIndexOutput, &portFormat);
if (status != ::android::hardware::media::omx::V1_0::Status::OK) break;
@@ -853,7 +853,9 @@
break;
}
if (OMX_COLOR_FormatYUV420SemiPlanar == portFormat.eColorFormat ||
- OMX_COLOR_FormatYUV420Planar == portFormat.eColorFormat) {
+ OMX_COLOR_FormatYUV420Planar == portFormat.eColorFormat ||
+ OMX_COLOR_FormatYUV420PackedPlanar == portFormat.eColorFormat ||
+ OMX_COLOR_FormatYUV420PackedSemiPlanar == portFormat.eColorFormat) {
*eColorFormat = portFormat.eColorFormat;
break;
}
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index b826b23..b6f9433 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -18,14 +18,66 @@
package android.hardware.neuralnetworks@1.0;
+import IEvent;
import IPreparedModel;
+/**
+ * This interface represents a device driver.
+ */
interface IDevice {
- initialize() generates(Capabilities capabilities);
+ /**
+ * Gets the capabilities of a driver.
+ *
+ * @return status ErrorStatus::NONE if successful.
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities() generates (ErrorStatus status, Capabilities capabilities);
- getSupportedSubgraph(Model model) generates(vec<bool> supported);
+ /**
+ * Gets the supported operations in a model.
+ *
+ * getSupportedSubgraph provides a more nuanced indication on whether a
+ * model is able to be compiled by the driver. Having the entire model
+ * allows for additional information such as tensor shapes to inputs or
+ * tensor strides, information which is not known in "initialize".
+ *
+ * @param model A model whose operations--and their corresponding
+ * operands--are to be verified by the driver.
+ * @return status ErrorStatus::NONE if successful.
+ * @return supportedOperations A list of supported operations, where true
+ * indicates the operation is supported and
+ * false indicates the operation is not
+ * supported. The index of "supported"
+ * corresponds with the index of the operation
+ * it is describing.
+ */
+ getSupportedOperations(Model model)
+ generates (ErrorStatus status, vec<bool> supportedOperations);
- prepareModel(Model model) generates(IPreparedModel preparedModel);
+ /**
+ * Prepares a model for execution.
+ *
+ * prepareModel is used to make any necessary transformations or alternative
+ * representations to a model for execution, possible including
+ * transformations on the constant data, optimization on the model's graph,
+ * or compilation into the device's native binary.
+ *
+ * The only information that may be unknown to the model at this stage is
+ * the shape of the tensors, which may only be known at execution time.
+ *
+ * @param model The model to be prepared for execution.
+ * @param event A synchronization callback that must be signaled once the
+ * execution has finished.
+ * @return status ErrorStatus::NONE if successful.
+ * @return preparedModel A handle to the resultant prepared model.
+ */
+ prepareModel(Model model, IEvent event)
+ generates (ErrorStatus status, IPreparedModel preparedModel);
- getStatus() generates(DeviceStatus status);
+ /**
+ * Returns the current status of a driver.
+ *
+ * @return status Status of the driver.
+ */
+ getStatus() generates (DeviceStatus status);
};
diff --git a/neuralnetworks/1.0/IEvent.hal b/neuralnetworks/1.0/IEvent.hal
index 63afeaf..2ebda58 100644
--- a/neuralnetworks/1.0/IEvent.hal
+++ b/neuralnetworks/1.0/IEvent.hal
@@ -29,21 +29,15 @@
* indicate to the Neuralnetworks runtime whether the computation was
* successful or not, and that the corresponding output is ready to be
* consumed if the execution was successful.
- *
- * TODO: Mention that "notify" is also called by a runtime thread
- * during CPU fallback execution? Depends on whether the HIDL comments
- * are strictly for vendors or not.
*/
interface IEvent {
/**
- * IEvent::notify is called by the server thread (i.e. the thread doing the
- * work) to mark the event as completed so that any threads requiring the
- * corresponding resources can continue executing.
+ * IEvent::notify is called by the server thread (i.e., the thread doing
+ * the work) to mark the event as completed so that any threads requiring
+ * the corresponding output can continue executing.
*
- * @param status Status of the execution associated with the Event.
- * Should be SUCCESS or ERROR.
+ * @param status ErrorStatus::NONE if successful.
*/
- oneway notify(Status status);
-
+ oneway notify(ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
index 428ddc7..a7c3342 100644
--- a/neuralnetworks/1.0/IPreparedModel.hal
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -14,13 +14,33 @@
* limitations under the License.
*/
-/* This HAL is a work in progress */
-
package android.hardware.neuralnetworks@1.0;
import IEvent;
+/**
+ * IPreparedModel describes a model that has been prepared for execution and
+ * is used to launch executions.
+ */
interface IPreparedModel {
- // Multiple threads can call this execute function concurrently.
- execute(Request request, IEvent event) generates(bool success);
+ /**
+ * Spawns an asynchronous execution on a prepared model.
+ *
+ * Executions are asynchronous with respect to the Neuralnetworks runtime.
+ * To support this, IPreparedModel::execute must spawn a new task and return
+ * whether the task was successfully launched. The asynchronous task which
+ * performs the execution must call event's IEvent::notify with the status
+ * of the execution immediately after the execution has finished.
+ *
+ * Multiple threads can call this execute function concurrently.
+ *
+ * @param request The input and output information on which the prepared
+ * model is to be executed.
+ * prepared model.
+ * @param event A callback used for synchronization that must be signaled
+ * once the execution has finished.
+ * @return status ErrorStatus::NONE if the asynchronous task was
+ * successfully launched.
+ */
+ execute(Request request, IEvent event) generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 844c44c..39e3d34 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -14,68 +14,945 @@
* limitations under the License.
*/
-/* This HAL is a work in progress */
-
package android.hardware.neuralnetworks@1.0;
-// The types an operand can have.
-// These values are the same as found in the NeuralNetworks.h file.
-// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
-enum OperandType : uint32_t {
- FLOAT16 = 0,
- FLOAT32 = 1,
- INT8 = 2,
- UINT8 = 3,
- INT16 = 4,
- UINT16 = 5,
- INT32 = 6,
- UINT32 = 7,
- TENSOR_FLOAT16 = 8,
- TENSOR_FLOAT32 = 9,
- TENSOR_INT32 = 10,
- TENSOR_QUANT8_ASYMM = 11,
+/**
+ * Operand types.
+ *
+ * The type of an operand in a model.
+ *
+ * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors
+ * with at least one dimension). Types not prefaced by TENSOR_* represent
+ * scalar values and must have no dimensions.
+ */
+enum OperandType : int32_t {
+ /**
+ * The following entries are used to declare scalars.
+ */
+ FLOAT32 = 0,
+ INT32 = 1,
+ UINT32 = 2,
+
+ /**
+ * The following entries are used to declare tensors.
+ */
+ TENSOR_FLOAT32 = 3,
+ TENSOR_INT32 = 4,
+
+ /**
+ * A tensor of 8 bit integers that represent real numbers.
+ *
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 8 bit integer to the real value and vice versa. These two numbers are:
+ * - scale: a 32 bit floating point value
+ * - zero_value: a 32 bit integer
+ *
+ * The formula is:
+ * real_value = (integer_value - zero_value) * scale.
+ */
+ TENSOR_QUANT8_ASYMM = 5,
+
+ /**
+ * The following entries are OEM specific operand types.
+ */
+ OEM = 10000,
+ TENSOR_OEM_BYTE = 10001,
};
-// The type of operations. Unlike the operation types found in
-// NeuralNetworks.h file, these specify the data type they operate on.
-// This is done to simplify the work of drivers.
-// TODO: Currently they are the same. Add a conversion when finalizing the model.
-// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
-enum OperationType : uint32_t {
- OEM_OPERATION = 0,
- ADD = 1,
- AVERAGE_POOL_2D = 2,
- CONCATENATION = 3,
- CONV_2D = 4,
- DEPTHWISE_CONV_2D = 5,
- DEPTH_TO_SPACE = 6,
- DEQUANTIZE = 7,
- EMBEDDING_LOOKUP = 8,
- FAKE_QUANT = 9,
- FLOOR = 10,
- FULLY_CONNECTED = 11,
- HASHTABLE_LOOKUP = 12,
- L2_NORMALIZATION = 13,
- L2_POOL_2D = 14,
- LOCAL_RESPONSE_NORMALIZATION = 15,
- LOGISTIC = 16,
- LSH_PROJECTION = 17,
- LSTM = 18,
- MAX_POOL_2D = 19,
- MUL = 20,
- RELU = 21,
- RELU1 = 22,
- RELU6 = 23,
- RESHAPE = 24,
- RESIZE_BILINEAR = 25,
- RNN = 26,
- SOFTMAX = 27,
- SPACE_TO_DEPTH = 28,
- SVDF = 29,
- TANH = 30,
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+ /**
+ * Adds two tensors, elment-wise.
+ *
+ * Takes two input tensors of identical type and compatible dimensions. The output
+ * is the sum of both input tensors, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the input operands.
+ * It starts with the trailing dimensions, and works its way forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * 0: A tensor.
+ * 1: A tensor of the same type, and compatible dimensions as input0.
+ * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The sum, a tensor of the same type as input0.
+ */
+ ADD = 0,
+
+ /**
+ * Performs a 2-D average pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and padding.
+ *
+ * The values in output Tensor is computed as:
+ * output[batch, row, col, channel] =
+ * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * 7: An INT32 value, specifying the filter width.
+ * 8: An INT32 value, specifying the filter height.
+ * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ */
+ AVERAGE_POOL_2D = 1,
+
+ /**
+ * Concatenates the input tensors along the given dimension.
+ *
+ * The input tensors must have identical type and the same dimensions except the
+ * dimension along the concatenation axis.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]
+ * n+1: An INT32 value, specifying the concatenation axis.
+ * n+2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output, a tensor of the same type as the input tensors.
+ The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ */
+ CONCATENATION = 2,
+
+ /**
+ * Performs an 2-D convolution operation.
+ *
+ * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of
+ * images, applying the filter to each window of each image of the appropriate size.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and padding.
+ *
+ * The values in output Tensor is computed as:
+ * output[batch, row, col, channel] =
+ * sum_{i, j} (
+ * input[batch, row + i, col + j, k] *
+ * filter[channel, row + i, col + j, k] +
+ * bias[channel]
+ * )
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * specifying the filter.
+ * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}.
+ * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ */
+ CONV_2D = 3,
+
+ /**
+ * Performs an depthwise 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a filter
+ * tensor of shape [depth_out, filter_height, filter_width, depth_in] containing
+ * in_channels convolutional filters of depth 1, DEPTHWISE_CONV applies a different
+ * filter to each input channel (expanding from 1 channel to channel_multiplier channels
+ * for each), then concatenates the results together.
+ *
+ * The output has depth_out = depth_in * depth_multiplier channels.
+ * The output dimensions are functions of the filter dimensions, stride, and padding.
+ *
+ * The values in output Tensor is computed as:
+ * output[b, i, j, k * channel_multiplier + q] =
+ * sum_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[di, dj, k, q]
+ * )
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+ * specifying the filter.
+ * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}.
+ * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * 9: An INT32 value, specifying the depthwise multiplier.
+ * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+ */
+ DEPTHWISE_CONV_2D = 4,
+
+ /**
+ * Rearranges data from depth into blocks of spatial data.
+ *
+ * More specifically, this op outputs a copy of the input tensor where values from
+ * the depth dimension are moved in spatial blocks to the height and width dimensions.
+ * The value block_size indicates the input block size and how the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged into
+ * non-overlapping blocks of size block_size x block_size.
+ *
+ * The width of the output tensor is input_depth * block_size, whereas the height is
+ * input_height * block_size.
+ * The depth of the input tensor must be divisible by block_size * block_size
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+ * block_size * block_size must be a divisor of the input depth.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
+ * depth/(block_size*block_size)].
+ */
+ DEPTH_TO_SPACE = 5,
+
+ /**
+ * Dequantizes the input tensor.
+ *
+ * The formula is:
+ * output = (input - zero_value) * scale.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0, but with type
+ {@link OperandType::TENSOR_FLOAT32}.
+ */
+ DEQUANTIZE = 6,
+
+ /**
+ * Looks up items from a given tensor.
+ *
+ * Each item in the output is a raw copy of the corresponding item in
+ * the input “values”. If the the given “lookup” indices are out of bounds,
+ * the op will fail and an error will be reported.
+ *
+ * Inputs:
+ * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2,
+ * then the shape would be [lookup_dimension, values_dimension], where
+ * “lookup_dimension” corresponds to the indexing dimension in the lookup
+ * table, and “values_dimension” to the contents.
+ * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where
+ * “lookup_size” is the number of elements to look for, and each entry
+ * corresponds to the first dimension of the “values” tensor.
+ *
+ * Output:
+ * * 0: A n-D tensor of type X and the same rank and shape as the “values”
+ * tensor, except for the first dimension which has size “lookup_size”.
+ */
+ EMBEDDING_LOOKUP = 7,
+
+ /**
+ * Computes element-wise floor() on the input tensor.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * 0: A tensor.
+ *
+ * Ouputs:
+ * 0: The output, a tensor of the same type and dimensions as input0.
+ */
+ FLOOR = 8,
+
+ /**
+ * Denotes a fully (densely) connected layer, which connects all elements in the input
+ * tensor with each element in the output tensor.
+ *
+ * This layer implements the operation:
+ * outputs = activation(inputs * weights’ + bias)
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
+ * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
+ * [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
+ * and “input_size” is the size of the input.
+ * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of output nodes.
+ * 2: A 1-D tensor, of shape [num_units], specifying the bias.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+ * should be of {@link OperandType::TENSOR_INT32}.
+ * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output tensor, of shape [batch_size, num_units].
+ */
+ FULLY_CONNECTED = 9,
+
+ /**
+ * Looks up values of a hash table with given keys.
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D int32 tensor with shape [ k ].
+ * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in
+ * ascending order.
+ * * 2: Values. A tensor with shape [ n … ].
+ *
+ * Outputs:
+ * * 0: Output. A tensor with shape [ k …].
+ * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup
+ * hits or not.
+ */
+ HASHTABLE_LOOKUP = 10,
+
+ /**
+ * Applies L2 normalization along a the depth dimension.
+ *
+ * The values in output Tensor is computed as:
+ * output[batch, row, col, channel] =
+ * input[batch, row, col, channel] /
+ * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+ *
+ * For x with more dimensions, independently normalizes each 1-D slice along dimension dim.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ */
+ L2_NORMALIZATION = 11,
+
+ /**
+ * Performs an 2-D L2 pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and padding.
+ *
+ * The values in output Tensor is computed as:
+ * output[batch, row, col, channel] =
+ * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1))
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * 7: An INT32 value, specifying the filter width.
+ * 8: An INT32 value, specifying the filter height.
+ * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ */
+ L2_POOL_2D = 12,
+
+ /**
+ * Applies Local Response Normalization along the depth dimension.
+ *
+ * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last
+ * dimension), and each vector is normalized independently. Within a given vector,
+ * each component is divided by the weighted, squared sum of inputs within depth_radius.
+ *
+ * In details:
+ * sqr_sum[a, b, c, d] =
+ * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
+ * output = input / pow((bias + alpha * sqr_sum), beta)
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * 1: An INT32 value, specifying the radius of the normalization window.
+ * 2: A FLOAT32 value, specifying the bias, must not be zero.
+ * 3: A FLOAT32 value, specifying the scale factor, alpha.
+ * 4: A FLOAT32 value, specifying the exponent, beta.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+
+ /**
+ * Computes sigmoid activation on the input tensor element-wise.
+ *
+ * In details:
+ * output = 1 / (1 + exp(-input))
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ LOGISTIC = 14,
+
+ /**
+ * Projects an input to a bit vector via locality senstive hashing.
+ *
+ * Inputs:
+ * * 0: Hash functions. Dim.size == 2, DataType: Float.
+ * Tensor[0].Dim[0]: Number of hash functions.
+ * Tensor[0].Dim[1]: Number of seeds per hash functions.
+ * Tensor[0].Dim[1] <= 32 in sparse case.
+ *
+ * * 1: Input. Dim.size >= 1, no restriction on DataType.
+ * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+ * If not set, each input element is considered to have the same weight of
+ * 1.0.
+ * Tensor[1].Dim[0] == Tensor[2].Dim[0]
+ * * 3: Type:
+ * Sparse: Value LSHProjectionType_SPARSE(=1).
+ * Computed bit vector is considered to be sparse.
+ * Each output element is an int32 made up of multiple bits computed from
+ * hash functions.
+ *
+ * Dense: Value LSHProjectionType_DENSE(=2).
+ * Computed bit vector is considered to be dense. Each output element
+ * represents a bit and can take the value of either 0 or 1.
+ *
+ * Outputs:
+ * * 0: If the projection type is sparse:
+ * Output.Dim == { Tensor[0].Dim[0] }
+ * A tensor of int32 that represents hash signatures.
+ * If the projection type is Dense:
+ * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+ * A flattened tensor that represents projected bit vectors.
+ */
+ LSH_PROJECTION = 15,
+
+ /**
+ * Long short-term memory unit (LSTM) recurrent network layer.
+ *
+ * The default non-peephole implementation is based on:
+ * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
+ * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+ * Computation, 9(8):1735-1780, 1997.
+ *
+ * The peephole implementation is based on:
+ * https://research.google.com/pubs/archive/43905.pdf
+ * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+ * recurrent neural network architectures for large scale acoustic modeling."
+ * INTERSPEECH, 2014.
+ *
+ * The coupling of input and forget gate (CIFG) is based on:
+ * http://arxiv.org/pdf/1503.04069.pdf
+ * Greff et al. "LSTM: A Search Space Odyssey"
+ *
+ * The class has the following independently optional inputs:
+ * * If input gate (if CIFG): “input_to_forget_weights”,
+ * “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”.
+ * * If no peephole connections: “cell_to_input_weights”,
+ * “cell_to_forget_weights”, “cell_to_output_weights”.
+ * * If no projection layer: “projection_weights” and “projection_bias”.
+ * * If no projection bias: “projection_bias”.
+ *
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: Input.
+ * A 2-D tensor of type T, of shape [batch_size, input_size], where
+ * “batch_size” corresponds to the batching dimension, and “input_size”
+ * is the size of the input.
+ * * 1: input_to_input_weights.
+ * A 2-D tensor of type T, of shape [num_units, input_size], where
+ * “num_units” corresponds to the number of cell units.
+ * * 2: input_to_forget_weights.
+ * A 2-D tensor of type T, of shape [num_units, input_size].
+ * * 3: input_to_cell_weights.
+ * A 2-D tensor of type T, of shape [num_units, input_size].
+ * * 4: input_to_output_weights.
+ * A 2-D tensor of type T, of shape [num_units, input_size].
+ * * 5: recurrent_to_input_weights.
+ * A 2-D tensor of type T, of shape [num_units, output_size], where
+ * “output_size” corresponds to either the number of cell units (i.e.,
+ * “num_units”), or the second dimension of the “projection_weights”, if
+ * defined.
+ * * 6: recurrent_to_forget_weights.
+ * A 2-D tensor of type T, of shape [num_units, output_size].
+ * * 7: recurrent_to_cell_weights.
+ * A 2-D tensor of type T, of shape [num_units, output_size].
+ * * 8: recurrent_to_output_weights.
+ * A 2-D tensor of type T, of shape [num_units, output_size].
+ * * 9: cell_to_input_weights.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 10:cell_to_forget_weights.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 11:cell_to_output_weights.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 12:input_gate_bias.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 13:forget_gate_bias.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 14:cell_bias.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 15:output_gate_bias.
+ * A 1-D tensor of type T, of shape [num_units].
+ * * 16:projection_weights.
+ * A 2-D tensor of type T, of shape [output_size, num_units].
+ * * 17:projection_bias.
+ * A 1-D tensor of type T, of shape [output_size].
+ *
+ * Parameters:
+ * * 18:fused_activation_function.
+ * An (optional) ActivationFunctionType indicating the activation
+ * function.
+ * If “NONE” is specified then it results in a linear activation.
+ * * 19:cell_clip.
+ * A clipping threshold for the cell state, such that values are bound
+ * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
+ * disabled.
+ * * 20:proj_clip.
+ * A clipping threshold for the output from the projection layer, such
+ * that values are bound within [-proj_clip, proj_clip]. If set to 0.0
+ * then clipping is disabled.
+ *
+ * Outputs:
+ * * 0: scratch_buffer.
+ * A 3-D tensor of type T, of shape [batch_size, num_cell, 4].
+ * * 1: output_state.
+ * A 2-D tensor of type T, of shape [batch_size, output_size].
+ * * 2: cell_state.
+ * A 2-D tensor of type T, of shape [batch_size, num_units].
+ * * 3: output.
+ * A 2-D tensor of type T, of shape [batch_size, output_size]. This is
+ * effectively the same as the current “output_state” value.
+ */
+ LSTM = 16,
+
+ /**
+ * Performs an 2-D max pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and padding.
+ *
+ * The values in output Tensor is computed as:
+ * output[batch, row, col, channel] =
+ * max_{i, j} (input[batch, row + i, col + j, channel])
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+ * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+ * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+ * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+ * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+ * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+ * 7: An INT32 value, specifying the filter width.
+ * 8: An INT32 value, specifying the filter height.
+ * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ */
+ MAX_POOL_2D = 17,
+
+ /**
+ * Multiplies two tensors, elment-wise.
+ *
+ * Takes two input tensors of identical type and compatible dimensions. The output
+ * is the product of both input tensors, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the resulting output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way forward.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * 0: A tensor.
+ * 1: A tensor of the same type, and compatible dimensions as input0.
+ * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+ * Specifies the activation to invoke on the result of each addition.
+ *
+ * Ouputs:
+ * 0: The product, a tensor of the same type as input0.
+ */
+ MUL = 18,
+
+ /**
+ * Computes rectified linear activation on the input tensor element-wise.
+ *
+ * In details:
+ * output = max(0, input)
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ RELU = 19,
+
+ /**
+ * Computes rectified linear 1 activation on the input tensor element-wise.
+ *
+ * In details:
+ * output = min(1.f, max(-1.f, input))
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ RELU1 = 20,
+
+ /**
+ * Computes rectified linear 6 activation on the input tensor element-wise.
+ *
+ * In details:
+ * output = min(6, max(0, input))
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ RELU6 = 21,
+
+ /**
+ * Reshapes a tensor.
+ *
+ * Given tensor, this operation returns a tensor that has the same values as tensor,
+ * but with a newly specified shape.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the tensor to be reshaped.
+ * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape
+ * of the output tensor. The number of elements implied by shape must be the same
+ * as the number of elements in the input tensor.
+ *
+ * Ouputs:
+ * 0: The output tensor, of shape specified by the input shape.
+ */
+ RESHAPE = 22,
+
+ /**
+ * Resizes images to given size using the bilinear interpretation.
+ *
+ * Resized images will be distorted if their original aspect ratio is not the same as input.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+ * 1: An INT32 value, specifying the output width of the output tensor.
+ * 2: An INT32 value, specifying the output height of the output tensor.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
+ */
+ RESIZE_BILINEAR = 23,
+
+ /**
+ * A basic recurrent neural network layer.
+ *
+ * This layer implements the operation:
+ * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of type T, of shape [batch_size, input_size], where
+ * “batch_size” corresponds to the batching dimension, and “input_size” is
+ * the size of the input.
+ * * 1: weights.
+ * A 2-D tensor of type T, of shape [num_units, input_size], where
+ * “num_units” corresponds to the number of units.
+ * * 2: recurrent_weights.
+ * A 2-D tensor of type T, of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
+ * * 3: bias.
+ * A 1-D tensor of type T, of shape [num_units].
+ *
+ * For FLOAT32 input tensor, bias must also be FLOAT32.
+ * For UINT8 input tensor, bias must be INT32.
+ *
+ * Parameters
+ * * 4: fused_activation_function.
+ * An (optional) ActivationFunctionType indicating the activation
+ * function. If “NONE” is specified then it results in a linear
+ * activation.
+ *
+ * * 5: Hidden state.
+ * A 2-D tensor of type T, of shape [batch_size, num_units].
+ *
+ * Outputs:
+ * * 0: output.
+ * A 2-D tensor of type T, of shape [batch_size, num_units]. This is
+ * effectively the same as the current state value.
+ */
+ RNN = 24,
+
+ /**
+ * Computes the softmax activation on the input tensor element-wise, per batch, by
+ * normalizing the input vector so the maximum coefficient is zero.
+ *
+ * In details:
+ * output[batch, i] =
+ * exp((input[batch, i] - max(input[batch, :])) * beta) /
+ * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 2 or 4.
+ *
+ * Inputs:
+ * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ SOFTMAX = 25,
+
+ /**
+ * Rearranges blocks of spatial data, into depth.
+ *
+ * More specifically, this op outputs a copy of the input tensor where values from
+ * the height and width dimensions are moved to the depth dimension.
+ * The value block_size indicates the input block size and how the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged into
+ * non-overlapping blocks of size block_size x block_size.
+ *
+ * The depth of the output tensor is input_depth * block_size * block_size.
+ * The input tensor's height and width must be divisible by block_size.
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor rank: 4, with "NHWC" data layout.
+ *
+ * Inputs:
+ * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+ * block_size must be a divisor of both the input height and width.
+ *
+ * Ouputs:
+ * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
+ * depth*block_size*block_size].
+ */
+ SPACE_TO_DEPTH = 26,
+
+ /**
+ * SVDF op is a kind of stateful layer derived from the notion that a
+ * densely connected layer that's processing a sequence of input frames can
+ * be approximated by using a singular value decomposition of each of its
+ * nodes. The implementation is based on:
+ *
+ * https://research.google.com/pubs/archive/43813.pdf
+ *
+ * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+ * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+ * INTERSPEECH, 2015.
+ *
+ * It processes the incoming input using a 2-stage filtering mechanism:
+ * * stage 1 performs filtering on the "features" dimension, whose outputs get
+ * pushed into a memory of fixed-size memory_size.
+ * * stage 2 performs filtering on the "time" dimension of the memory_size
+ * memoized outputs of stage 1.
+ *
+ * Specifically, for rank 1, this layer implements the operation:
+ *
+ * memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID"));
+ * outputs = activation(memory * weights_time + bias);
+ *
+ * Where:
+ * * “weights_feature” is a weights matrix that processes the inputs (by
+ * convolving the input with every “feature filter”), and whose outputs get
+ * pushed, stacked in order, into the fixed-size “memory” (the oldest entry
+ * gets dropped);
+ * * “weights_time” is a weights matrix that processes the “memory” (by a
+ * batched matrix multiplication on the num_units);
+ * * “bias” is an optional bias vector (added to each output vector in the
+ * batch); and
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Each rank adds a dimension to the weights matrices by means of stacking
+ * the filters.
+ *
+ * Supported tensor types:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of type T, of shape [batch_size, input_size], where
+ * “batch_size” corresponds to the batching dimension, and “input_size” is
+ * the size of the input.
+ * * 1: weights_feature.
+ * A 2-D tensor of type T, of shape [num_units, input_size], where
+ * “num_units” corresponds to the number of units.
+ * * 2: weights_time.
+ * A 2-D tensor of type T, of shape [num_units, memory_size], where
+ * “memory_size” corresponds to the fixed-size of the memory.
+ * * 3: bias.
+ * A optional 1-D tensor of type T, of shape [num_units].
+ *
+ * For FLOAT32 input tensor, bias must also be FLOAT32.
+ * For UINT8 input tensor, bias must be INT32.
+ *
+ * Parameters:
+ * * 4: rank.
+ * The rank of the SVD approximation.
+ * * 5: fused_activation_function.
+ * An (optional) ActivationFunctionType indicating the activation function.
+ * If “NONE” is specified then it results in a linear activation.
+ *
+ * Outputs:
+ * * 0: state.
+ * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
+ * * 1: output.
+ * A 2-D tensor of type T, of shape [batch_size, num_units].
+ */
+ SVDF = 27,
+
+ /**
+ * Computes hyperbolic tangent of input tensor element-wise.
+ *
+ * In details:
+ * output = tanh(input)
+ *
+ * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * 0: A tensor, specifying the input.
+ *
+ * Ouputs:
+ * 0: The output tensor of same shape as input0.
+ */
+ TANH = 28,
+
+ /**
+ * OEM specific operation.
+ *
+ * This operation is OEM specific. It should only be used for OEM applications.
+ */
+ OEM_OPERATION = 10000,
};
-// Fused activation functions
+/**
+ * Fused activation function types.
+ */
enum FusedActivationFunc : int32_t {
NONE = 0,
RELU = 1,
@@ -83,134 +960,313 @@
RELU6 = 3,
};
-// How an operand is used.
-enum OperandLifeTime : uint32_t {
- // The operand is internal to the model. It's created by an operation
- // and consumed by other operations.
+/**
+ * How an operand is used.
+ */
+enum OperandLifeTime : int32_t {
+ /**
+ * The operand is internal to the model. It's created by an operation
+ * and consumed by other operations.
+ */
TEMPORARY_VARIABLE,
- // The operand is an input of the model. An operand can't be both
- // input and output of a model.
+
+ /**
+ * The operand is an input of the model. An operand can't be both
+ * input and output of a model.
+ */
MODEL_INPUT,
- // The operand is an output of the model.
+
+ /**
+ * The operand is an output of the model.
+ */
MODEL_OUTPUT,
- // The operand is a constant found in Model.operandValues.
+
+ /**
+ * The operand is a constant found in Model.operandValues.
+ */
CONSTANT_COPY,
- // The operand is a constant that was specified via a Memory object.
- CONSTANT_REFERENCE
+
+ /**
+ * The operand is a constant that was specified via a Memory object.
+ */
+ CONSTANT_REFERENCE,
};
-// Status of a device.
-enum DeviceStatus : uint32_t {
+/**
+ * Status of a device.
+ */
+enum DeviceStatus : int32_t {
AVAILABLE,
BUSY,
OFFLINE,
- UNKNOWN // Do we need this?
+ UNKNOWN,
};
-// For the reference workload
-// Used by a driver to report its performance characteristics.
-// TODO revisit the data types and scales.
-struct PerformanceInfo {
- float execTime; // in nanoseconds
- float powerUsage; // in picoJoules
-};
-
+/**
+ * A typed operation.
+ */
struct OperationTuple {
- // The type of operation.
+ /**
+ * The type of operation.
+ */
OperationType operationType;
- // The input data type of operation.
+
+ /**
+ * The input data type of operation.
+ */
OperandType operandType;
};
-// The capabilities of a driver.
+/**
+ * Performance information for the reference workload.
+ *
+ * Used by a driver to report its performance characteristics.
+ */
+struct PerformanceInfo {
+ /**
+ * Execution time in nanoseconds.
+ */
+ float execTime;
+
+ /**
+ * Power usage in picoJoules.
+ */
+ float powerUsage;
+};
+
+/**
+ * The capabilities of a driver.
+ */
struct Capabilities {
+ /**
+ * A collection of typed operations supported by the driver.
+ */
vec<OperationTuple> supportedOperationTuples;
- // TODO Do the same for baseline model IDs
+
+ /**
+ * Indicates whether a driver caches its prepared model for reuse the next
+ * time the application begins. This is useful because the model may have
+ * been prepared in a previous run.
+ *
+ * True if caching is supported, false otherwise.
+ */
bool cachesCompilation;
- // TODO revisit the data types and scales.
- float bootupTime; // in nanoseconds
- PerformanceInfo float16Performance;
+
+ /**
+ * Driver performance when operating on float32 data.
+ */
PerformanceInfo float32Performance;
+
+ /**
+ * Driver performance when operating on asymmetric 8-bit quantized data.
+ */
PerformanceInfo quantized8Performance;
};
-// Describes the location of a data object.
+/**
+ * Describes the location of a data object.
+ */
struct DataLocation {
- // The index of the memory pool where this location is found.
- // Two special values can also be used. See the LOCATION_* constants above.
+ /**
+ * The index of the memory pool where this location is found.
+ */
uint32_t poolIndex;
- // Offset in bytes from the start of the pool.
+
+ /**
+ * Offset in bytes from the start of the pool.
+ */
uint32_t offset;
- // The length of the data, in bytes.
+
+ /**
+ * The length of the data in bytes.
+ */
uint32_t length;
};
+/**
+ * Describes one operand of the model's graph.
+ */
struct Operand {
+ /**
+ * Data type of the operand.
+ */
OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ */
vec<uint32_t> dimensions;
- // The number of operations that uses this operand as input.
- // TODO It would be nice to track the actual consumers, e.g. vec<uint32_t> consumers;
+ /**
+ * The number of operations that use this operand as input.
+ */
uint32_t numberOfConsumers;
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
int32_t zeroPoint;
- // How the operand is used.
+ /**
+ * How the operand is used.
+ */
OperandLifeTime lifetime;
- // Where to find the data for this operand.
- // If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, or MODEL_OUTPUT:
- // - All the fields will be 0.
- // If the lifetime is CONSTANT_COPY:
- // - location.poolIndex is 0.
- // - location.offset is the offset in bytes into Model.operandValues.
- // - location.length is set.
- // If the lifetime is CONSTANT_REFERENCE:
- // - location.poolIndex is set.
- // - location.offset is the offset in bytes into the specified pool.
- // - location.length is set.
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, or MODEL_OUTPUT:
+ * - All the fields will be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
DataLocation location;
};
-// Describes one operation of the graph.
+/**
+ * Describes one operation of the model's graph.
+ */
struct Operation {
- // The tuple describing the operation type and input type.
+ /**
+ * The tuple describing the operation type and input type.
+ */
OperationTuple opTuple;
- // Describes the table that contains the indexes of the inputs of the
- // operation. The offset is the index in the operandIndexes table.
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
vec<uint32_t> inputs;
- // Describes the table that contains the indexes of the outputs of the
- // operation. The offset is the index in the operandIndexes table.
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
vec<uint32_t> outputs;
};
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * might not be known is the shape of the input tensors.
+ */
struct Model {
+ /**
+ * All operands included in the model.
+ */
vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order.
+ */
vec<Operation> operations;
+
+ /**
+ * Input indexes of the model.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ */
vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand data that were
+ * registered by the model.
+ */
vec<memory> pools;
};
+/**
+ * Metadata information specifying the location of the input or output data and
+ * any updates to the input or output operand.
+ */
struct RequestArgument {
- // The location within one of the memory pools
+ /**
+ * The location within one of the memory pools passed in the Request.
+ */
DataLocation location;
- // If dimensions.size() > 0, dimension information was provided along with the
- // argument. This can be the case for models that accept inputs of varying size.
- // This can't change the rank, just the value of the dimensions that were
- // unspecified in the model.
+
+ /**
+ * Updated dimension information.
+ *
+ * If dimensions.size() > 0, dimension information was provided along with the
+ * argument. This can be the case for models that accept inputs of varying size.
+ * This can't change the rank, just the value of the dimensions that were
+ * unspecified in the model.
+ */
vec<uint32_t> dimensions;
};
+/**
+ * Inputs to be sent to and outputs to be retrieved from a prepared model.
+ *
+ * A Request serves two primary tasks:
+ * 1) Provides the input and output data to be used when executing the model.
+ * 2) Specifies any updates to the input operand metadata that were left
+ * unspecified at model preparation time.
+ */
struct Request {
+ /**
+ * Input data and information to be used in the execution of a prepared
+ * model.
+ *
+ * The index of the input corresponds to the index in Model.inputIndexes.
+ * E.g., input[i] corresponds to Model.inputIndexes[i].
+ */
vec<RequestArgument> inputs;
+
+ /**
+ * Output data and information to be used in the execution of a prepared
+ * model.
+ *
+ * The index of the output corresponds to the index in Model.outputIndexes.
+ * E.g., output[i] corresponds to Model.outputIndexes[i].
+ */
vec<RequestArgument> outputs;
+
+ /**
+ * A collection of shared memory pools containing operand data for both the
+ * inputs and the outputs to a model.
+ */
vec<memory> pools;
};
-enum Status : uint32_t {
- SUCCESS,
- ERROR,
+/**
+ * Return status of a function.
+ */
+enum ErrorStatus : int32_t {
+ NONE,
+ DEVICE_UNAVAILABLE,
+ GENERAL_FAILURE,
+ OUTPUT_INSUFFICIENT_SIZE,
+ INVALID_ARGUMENT,
};
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 1efff0e..2318430 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -18,6 +18,7 @@
name: "VtsHalNeuralnetworksV1_0TargetTest",
srcs: [
"Event.cpp",
+ "GeneratedTestHarness.cpp",
"VtsHalNeuralnetworksV1_0TargetTest.cpp",
],
defaults: ["VtsHalTargetTestDefaults"],
@@ -27,4 +28,8 @@
"android.hidl.memory@1.0",
"libhidlmemory",
],
+ header_libs: [
+ "libneuralnetworks_generated_test_harness_headers",
+ "libneuralnetworks_generated_tests",
+ ],
}
diff --git a/neuralnetworks/1.0/vts/functional/Event.cpp b/neuralnetworks/1.0/vts/functional/Event.cpp
index 67de4f5..efaacb3 100644
--- a/neuralnetworks/1.0/vts/functional/Event.cpp
+++ b/neuralnetworks/1.0/vts/functional/Event.cpp
@@ -21,10 +21,10 @@
// thread::join failed: Resource deadlock would occur
}
-Return<void> Event::notify(ReturnedStatus status) {
+Return<void> Event::notify(ErrorStatus status) {
{
std::lock_guard<std::mutex> lock(mMutex);
- mStatus = status == ReturnedStatus::SUCCESS ? Status::SUCCESS : Status::ERROR;
+ mStatus = status == ErrorStatus::NONE ? Status::SUCCESS : Status::ERROR;
if (mStatus == Status::SUCCESS && mCallback != nullptr) {
bool success = mCallback();
if (!success) {
diff --git a/neuralnetworks/1.0/vts/functional/Event.h b/neuralnetworks/1.0/vts/functional/Event.h
index 4f7f2f6..7dd4070 100644
--- a/neuralnetworks/1.0/vts/functional/Event.h
+++ b/neuralnetworks/1.0/vts/functional/Event.h
@@ -24,8 +24,6 @@
using ::android::hardware::Void;
using ::android::sp;
-using ReturnedStatus = ::android::hardware::neuralnetworks::V1_0::Status;
-
/**
* The Event class is used internally by the Neuralnetworks runtime to
* synchronize between different threads. An asynchronous task is launched
@@ -77,9 +75,9 @@
*
* IEvent::notify can be called at most once on a given event.
*
- * @param neuralnetworks::V1_0::Status SUCCESS or ERROR
+ * @param neuralnetworks::V1_0::ErrorStatus ErrorStatus::NONE on success
*/
- Return<void> notify(ReturnedStatus status) override;
+ Return<void> notify(ErrorStatus status) override;
/**
* Event::poll returns the current status of the event.
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..db90ac2
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Event.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworksV1_0TargetTest.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace vts {
+namespace functional {
+// allocator helper
+hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");
+
+namespace generated_tests {
+using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
+using ::generated_tests::filter;
+using ::generated_tests::for_all;
+using ::generated_tests::for_each;
+using ::generated_tests::resize_accordingly;
+using ::generated_tests::MixedTyped;
+using ::generated_tests::MixedTypedExampleType;
+using ::generated_tests::Float32Operands;
+using ::generated_tests::Int32Operands;
+using ::generated_tests::Quant8Operands;
+// Top level driver for models and examples generated by test_generator.py
+// Test driver for those generated from ml/nn/runtime/test/spec
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+ std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExampleType>& examples) {
+ Model model = create_model();
+ sp<IPreparedModel> preparedModel;
+ sp<Event> preparationEvent = new Event();
+ ASSERT_NE(nullptr, preparationEvent.get());
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ preparedModel = prepared;
+ });
+ ASSERT_TRUE(prepareRet.isOk());
+ ASSERT_NE(nullptr, preparedModel.get());
+ Event::Status preparationStatus = preparationEvent->wait();
+ EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
+
+ const uint32_t INPUT = 0;
+ const uint32_t OUTPUT = 1;
+
+ int example_no = 1;
+ for (auto& example : examples) {
+ SCOPED_TRACE(example_no++);
+
+ const MixedTyped& inputs = example.first;
+ const MixedTyped& golden = example.second;
+
+ std::vector<RequestArgument> inputs_info, outputs_info;
+ uint32_t inputSize = 0, outputSize = 0;
+
+ // This function only partially specifies the metadata (vector of RequestArguments).
+ // The contents are copied over below.
+ for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
+ if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
+ RequestArgument arg = {
+ .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ inputs_info[index] = arg;
+ inputSize += s;
+ });
+ // Compute offset for inputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : inputs_info) {
+ i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+
+ MixedTyped test; // holding test results
+
+ // Go through all outputs, initialize RequestArgument descriptors
+ resize_accordingly<float>(golden, test);
+ resize_accordingly<int32_t>(golden, test);
+ resize_accordingly<uint8_t>(golden, test);
+ for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
+ if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+ RequestArgument arg = {
+ .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
+ .dimensions = {},
+ };
+ outputs_info[index] = arg;
+ outputSize += s;
+ });
+ // Compute offset for outputs 1 and so on
+ {
+ size_t offset = 0;
+ for (auto& i : outputs_info) {
+ i.location.offset = offset;
+ offset += i.location.length;
+ }
+ }
+ std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
+ allocateSharedMemory(outputSize)};
+ ASSERT_NE(0ull, pools[INPUT].size());
+ ASSERT_NE(0ull, pools[OUTPUT].size());
+
+ // load data
+ sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+ sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+ ASSERT_NE(nullptr, inputMemory.get());
+ ASSERT_NE(nullptr, outputMemory.get());
+ char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
+ char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
+ ASSERT_NE(nullptr, inputPtr);
+ ASSERT_NE(nullptr, outputPtr);
+ inputMemory->update();
+ outputMemory->update();
+
+ // Go through all inputs, copy the values
+ for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
+ char* begin = (char*)p;
+ char* end = begin + s;
+ // TODO: handle more than one input
+ std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
+ });
+
+ inputMemory->commit();
+ outputMemory->commit();
+ // execute request
+ sp<Event> executionEvent = new Event();
+ ASSERT_NE(nullptr, executionEvent.get());
+ Return<ErrorStatus> executeStatus = preparedModel->execute(
+ {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent);
+ ASSERT_TRUE(executeStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
+ Event::Status eventStatus = executionEvent->wait();
+ EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
+
+ // validate results
+ outputMemory->read();
+#define COPY_BACK(ty) \
+ for_each<ty>(test, [&outputs_info, outputPtr](int index, std::vector<ty>& m) { \
+ RequestArgument& i = outputs_info[index]; \
+ ASSERT_EQ(m.size(), i.location.length / sizeof(ty)); \
+ char* begin = outputPtr + i.location.offset; \
+ memcpy(m.data(), begin, i.location.length); \
+ });
+ COPY_BACK(float);
+ COPY_BACK(int32_t);
+ COPY_BACK(uint8_t);
+#undef COPY_BACK
+ outputMemory->commit();
+ // Filter out don't cares
+ MixedTyped filtered_golden;
+ MixedTyped filtered_test;
+ filter<float>(golden, &filtered_golden, is_ignored);
+ filter<float>(test, &filtered_test, is_ignored);
+ filter<int32_t>(golden, &filtered_golden, is_ignored);
+ filter<int32_t>(test, &filtered_test, is_ignored);
+ filter<uint8_t>(golden, &filtered_golden, is_ignored);
+ filter<uint8_t>(test, &filtered_test, is_ignored);
+
+ // We want "close-enough" results for float
+ for_each<float>(filtered_golden, [&filtered_test](int index, auto& golden_float) {
+ auto& test_float_operands = std::get<Float32Operands>(filtered_test);
+ auto& test_float = test_float_operands[index];
+ for (unsigned int i = 0; i < golden_float.size(); i++) {
+ SCOPED_TRACE(i);
+ EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5);
+ }
+ });
+ EXPECT_EQ(std::get<Int32Operands>(filtered_golden), std::get<Int32Operands>(filtered_test));
+ EXPECT_EQ(std::get<Quant8Operands>(filtered_golden),
+ std::get<Quant8Operands>(filtered_test));
+ }
+}
+
+} // namespace generated_tests
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_0
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 90ccd06..453e3e5 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -16,12 +16,13 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include "Event.h"
#include "VtsHalNeuralnetworksV1_0TargetTest.h"
+#include "Event.h"
+#include "TestHarness.h"
+
#include <android-base/logging.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
-#include <string>
namespace android {
namespace hardware {
@@ -31,6 +32,11 @@
namespace functional {
using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
+using ::generated_tests::MixedTypedExampleType;
+namespace generated_tests {
+extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
+ const std::vector<MixedTypedExampleType>&);
+}
// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
@@ -64,24 +70,24 @@
// status test
TEST_F(NeuralnetworksHidlTest, StatusTest) {
- DeviceStatus status = device->getStatus();
- EXPECT_EQ(DeviceStatus::AVAILABLE, status);
+ Return<DeviceStatus> status = device->getStatus();
+ ASSERT_TRUE(status.isOk());
+ EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
// initialization
-TEST_F(NeuralnetworksHidlTest, InitializeTest) {
- Return<void> ret = device->initialize([](const Capabilities& capabilities) {
- EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
- EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
- EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
- EXPECT_LT(0.0f, capabilities.bootupTime);
- EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
- EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
- EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
- EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
- EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
- EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
- });
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ Return<void> ret =
+ device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
+ EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
+ EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
+ EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
+ EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
+ });
EXPECT_TRUE(ret.isOk());
}
@@ -104,9 +110,7 @@
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0,
- .offset = 0,
- .length = 0},
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
},
{
.type = OperandType::TENSOR_FLOAT32,
@@ -115,9 +119,7 @@
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0,
- .offset = 0,
- .length = size},
+ .location = {.poolIndex = 0, .offset = 0, .length = size},
},
{
.type = OperandType::INT32,
@@ -126,9 +128,7 @@
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0,
- .offset = size,
- .length = sizeof(int32_t)},
+ .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
},
{
.type = OperandType::TENSOR_FLOAT32,
@@ -137,9 +137,7 @@
.scale = 0.0f,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0,
- .offset = 0,
- .length = 0},
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
},
};
@@ -169,6 +167,7 @@
.pools = pools,
};
}
+} // anonymous namespace
// allocator helper
hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
@@ -189,16 +188,16 @@
return memory;
}
-} // anonymous namespace
// supported subgraph test
-TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) {
Model model = createTestModel();
- std::vector<bool> supported;
- Return<void> ret = device->getSupportedSubgraph(
- model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
- ASSERT_TRUE(ret.isOk());
- EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
+ Return<void> ret = device->getSupportedOperations(
+ model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_EQ(model.operations.size(), supported.size());
+ });
+ EXPECT_TRUE(ret.isOk());
}
// execute simple graph
@@ -209,10 +208,20 @@
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
- // prpeare request
+ // prepare request
Model model = createTestModel();
- sp<IPreparedModel> preparedModel = device->prepareModel(model);
+ sp<IPreparedModel> preparedModel;
+ sp<Event> preparationEvent = new Event();
+ ASSERT_NE(nullptr, preparationEvent.get());
+ Return<void> prepareRet = device->prepareModel(
+ model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ preparedModel = prepared;
+ });
+ ASSERT_TRUE(prepareRet.isOk());
ASSERT_NE(nullptr, preparedModel.get());
+ Event::Status preparationStatus = preparationEvent->wait();
+ EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
// prepare inputs
uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
@@ -245,13 +254,14 @@
outputMemory->commit();
// execute request
- sp<Event> event = sp<Event>(new Event());
- ASSERT_NE(nullptr, event.get());
- bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools},
- event);
- EXPECT_TRUE(success);
- Event::Status status = event->wait();
- EXPECT_EQ(Event::Status::SUCCESS, status);
+ sp<Event> executionEvent = new Event();
+ ASSERT_NE(nullptr, executionEvent.get());
+ Return<ErrorStatus> executeStatus = preparedModel->execute(
+ {.inputs = inputs, .outputs = outputs, .pools = pools}, executionEvent);
+ ASSERT_TRUE(executeStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
+ Event::Status eventStatus = executionEvent->wait();
+ EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
// validate results { 1+5, 2+6, 3+7, 4+8 }
outputMemory->read();
@@ -260,8 +270,15 @@
EXPECT_EQ(expectedData, outputData);
}
+// Mixed-typed examples
+typedef MixedTypedExampleType MixedTypedExample;
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_vts_tests.cpp"
+
// TODO: Add tests for execution failure, or wait_for/wait_until timeout.
-// Discussion: https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
+// Discussion:
+// https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
} // namespace functional
} // namespace vts
diff --git a/power/1.1/default/Android.bp b/power/1.1/default/Android.bp
deleted file mode 100644
index 0b3598b..0000000
--- a/power/1.1/default/Android.bp
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-cc_binary {
- proprietary: true,
- defaults: ["hidl_defaults"],
- relative_install_path: "hw",
- name: "android.hardware.power@1.1-service",
- init_rc: ["android.hardware.power@1.1-service.rc"],
- srcs: ["service.cpp" , "Power.cpp"],
-
- shared_libs: [
- "liblog",
- "libdl",
- "libutils",
- "libhardware",
- "libhidlbase",
- "libhidltransport",
- "android.hardware.power@1.0",
- "android.hardware.power@1.1",
- ],
-}
diff --git a/power/1.1/default/Power.cpp b/power/1.1/default/Power.cpp
deleted file mode 100644
index b5d0c84..0000000
--- a/power/1.1/default/Power.cpp
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "android.hardware.power@1.1-impl"
-
-#include <log/log.h>
-
-#include <hardware/hardware.h>
-#include <hardware/power.h>
-
-#include "Power.h"
-
-namespace android {
-namespace hardware {
-namespace power {
-namespace V1_1 {
-namespace implementation {
-
-using ::android::hardware::power::V1_0::Feature;
-using ::android::hardware::power::V1_0::PowerHint;
-using ::android::hardware::power::V1_0::PowerStatePlatformSleepState;
-using ::android::hardware::power::V1_0::Status;
-using ::android::hardware::power::V1_1::PowerStateSubsystem;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-Power::Power(power_module_t *module) : mModule(module) {
- if (mModule)
- mModule->init(mModule);
-}
-
-Power::~Power() {
- delete(mModule);
-}
-
-// Methods from ::android::hardware::power::V1_0::IPower follow.
-Return<void> Power::setInteractive(bool interactive) {
- if (mModule->setInteractive)
- mModule->setInteractive(mModule, interactive ? 1 : 0);
- return Void();
-}
-
-Return<void> Power::powerHint(PowerHint hint, int32_t data) {
- int32_t param = data;
- if (mModule->powerHint) {
- if (data)
- mModule->powerHint(mModule, static_cast<power_hint_t>(hint), ¶m);
- else
- mModule->powerHint(mModule, static_cast<power_hint_t>(hint), NULL);
- }
- return Void();
-}
-
-Return<void> Power::setFeature(Feature feature, bool activate) {
- if (mModule->setFeature)
- mModule->setFeature(mModule, static_cast<feature_t>(feature),
- activate ? 1 : 0);
- return Void();
-}
-
-Return<void> Power::getPlatformLowPowerStats(getPlatformLowPowerStats_cb _hidl_cb) {
- hidl_vec<PowerStatePlatformSleepState> states;
- ssize_t number_platform_modes;
- size_t *voters = nullptr;
- power_state_platform_sleep_state_t *legacy_states = nullptr;
- int ret;
-
- if (mModule->get_number_of_platform_modes == nullptr ||
- mModule->get_voter_list == nullptr ||
- mModule->get_platform_low_power_stats == nullptr)
- {
- _hidl_cb(states, Status::SUCCESS);
- return Void();
- }
-
- number_platform_modes = mModule->get_number_of_platform_modes(mModule);
- if (number_platform_modes)
- {
- if ((ssize_t) (SIZE_MAX / sizeof(size_t)) <= number_platform_modes) // overflow
- goto done;
- voters = new (std::nothrow) size_t [number_platform_modes];
- if (voters == nullptr)
- goto done;
-
- ret = mModule->get_voter_list(mModule, voters);
- if (ret != 0)
- goto done;
-
- if ((ssize_t) (SIZE_MAX / sizeof(power_state_platform_sleep_state_t))
- <= number_platform_modes) // overflow
- goto done;
- legacy_states = new (std::nothrow)
- power_state_platform_sleep_state_t [number_platform_modes];
- if (legacy_states == nullptr)
- goto done;
-
- for (int i = 0; i < number_platform_modes; i++)
- {
- legacy_states[i].voters = nullptr;
- legacy_states[i].voters = new power_state_voter_t [voters[i]];
- if (legacy_states[i].voters == nullptr)
- goto done;
- }
-
- ret = mModule->get_platform_low_power_stats(mModule, legacy_states);
- if (ret != 0)
- goto done;
-
- states.resize(number_platform_modes);
- for (int i = 0; i < number_platform_modes; i++)
- {
- power_state_platform_sleep_state_t& legacy_state = legacy_states[i];
- PowerStatePlatformSleepState& state = states[i];
- state.name = legacy_state.name;
- state.residencyInMsecSinceBoot = legacy_state.residency_in_msec_since_boot;
- state.totalTransitions = legacy_state.total_transitions;
- state.supportedOnlyInSuspend = legacy_state.supported_only_in_suspend;
- state.voters.resize(voters[i]);
- for(size_t j = 0; j < voters[i]; j++)
- {
- state.voters[j].name = legacy_state.voters[j].name;
- state.voters[j].totalTimeInMsecVotedForSinceBoot = legacy_state.voters[j].total_time_in_msec_voted_for_since_boot;
- state.voters[j].totalNumberOfTimesVotedSinceBoot = legacy_state.voters[j].total_number_of_times_voted_since_boot;
- }
- }
- }
-done:
- if (legacy_states)
- {
- for (int i = 0; i < number_platform_modes; i++)
- {
- if(legacy_states[i].voters)
- delete(legacy_states[i].voters);
- }
- }
- delete[] legacy_states;
- delete[] voters;
- _hidl_cb(states, Status::SUCCESS);
- return Void();
-}
-
-// Methods from ::android::hardware::power::V1_1::IPower follow.
-Return<void> Power::getSubsystemLowPowerStats(getSubsystemLowPowerStats_cb _hidl_cb) {
- hidl_vec<PowerStateSubsystem> subsystems;
- ssize_t number_subsystems = 0;
-
- //This API will report zero subsystems to support older devices
- //For devices that support this API, they will have their own implementation
- subsystems.resize(number_subsystems);
- _hidl_cb(subsystems, Status::SUCCESS);
- return Void();
-}
-
-Return<void> Power::powerHintAsync(PowerHint hint, int32_t data) {
- // just call the normal power hint in this oneway function
- return powerHint(hint, data);
-}
-
-} // namespace implementation
-} // namespace V1_1
-} // namespace power
-} // namespace hardware
-} // namespace android
diff --git a/power/1.1/default/Power.h b/power/1.1/default/Power.h
deleted file mode 100644
index e779d64..0000000
--- a/power/1.1/default/Power.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_POWER_V1_1_POWER_H
-#define ANDROID_HARDWARE_POWER_V1_1_POWER_H
-
-#include <android/hardware/power/1.1/IPower.h>
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-#include <hardware/power.h>
-
-namespace android {
-namespace hardware {
-namespace power {
-namespace V1_1 {
-namespace implementation {
-
-using ::android::hardware::power::V1_0::Feature;
-using ::android::hardware::power::V1_0::PowerHint;
-using ::android::hardware::power::V1_1::IPower;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-struct Power : public IPower {
- Power(power_module_t* module);
- ~Power();
-
- // Methods from ::android::hardware::power::V1_0::IPower follow
- Return<void> setInteractive(bool interactive) override;
- Return<void> powerHint(PowerHint hint, int32_t data) override;
- Return<void> setFeature(Feature feature, bool activate) override;
- Return<void> getPlatformLowPowerStats(getPlatformLowPowerStats_cb _hidl_cb) override;
-
- // Methods from ::android::hardware::power::V1_1::IPower follow.
- Return<void> getSubsystemLowPowerStats(getSubsystemLowPowerStats_cb _hidl_cb) override;
- Return<void> powerHintAsync(PowerHint hint, int32_t data) override;
-
- // Methods from ::android::hidl::base::V1_0::IBase follow.
-
- private:
- power_module_t* mModule;
-};
-
-} // namespace implementation
-} // namespace V1_1
-} // namespace power
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_POWER_V1_1_POWER_H
diff --git a/power/1.1/default/android.hardware.power@1.1-service.rc b/power/1.1/default/android.hardware.power@1.1-service.rc
deleted file mode 100644
index f2512f1..0000000
--- a/power/1.1/default/android.hardware.power@1.1-service.rc
+++ /dev/null
@@ -1,4 +0,0 @@
-service power-hal-1-1 /vendor/bin/hw/android.hardware.power@1.1-service
- class hal
- user system
- group system
diff --git a/power/1.1/default/service.cpp b/power/1.1/default/service.cpp
deleted file mode 100644
index 571db2f..0000000
--- a/power/1.1/default/service.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "android.hardware.power@1.1-service"
-
-#include <android/log.h>
-#include <hidl/HidlTransportSupport.h>
-#include <android/hardware/power/1.1/IPower.h>
-#include <hardware/power.h>
-#include "Power.h"
-
-using android::sp;
-using android::status_t;
-using android::OK;
-
-// libhwbinder:
-using android::hardware::configureRpcThreadpool;
-using android::hardware::joinRpcThreadpool;
-
-// Generated HIDL files
-using android::hardware::power::V1_1::IPower;
-using android::hardware::power::V1_1::implementation::Power;
-
-int main() {
-
- status_t status;
- android::sp<IPower> service = nullptr;
- const hw_module_t* hw_module = nullptr;
- power_module_t* power_module = nullptr;
- int err;
-
- ALOGI("Power HAL Service 1.1 (Default) is starting.");
-
- err = hw_get_module(POWER_HARDWARE_MODULE_ID, &hw_module);
- if (err) {
- ALOGE("hw_get_module %s failed: %d", POWER_HARDWARE_MODULE_ID, err);
- goto shutdown;
- }
-
- if (!hw_module->methods || !hw_module->methods->open) {
- power_module = reinterpret_cast<power_module_t*>(
- const_cast<hw_module_t*>(hw_module));
- } else {
- err = hw_module->methods->open(hw_module, POWER_HARDWARE_MODULE_ID,
- reinterpret_cast<hw_device_t**>(&power_module));
- if (err) {
- ALOGE("Passthrough failed to load legacy HAL.");
- goto shutdown;
- }
- }
-
- service = new Power(power_module);
- if (service == nullptr) {
- ALOGE("Can not create an instance of Power HAL Iface, exiting.");
-
- goto shutdown;
- }
-
- configureRpcThreadpool(1, true /*callerWillJoin*/);
-
- status = service->registerAsService();
- if (status != OK) {
- ALOGE("Could not register service for Power HAL Iface (%d).", status);
- goto shutdown;
- }
-
- ALOGI("Power Service is ready");
- joinRpcThreadpool();
- //Should not pass this line
-
-shutdown:
- // In normal operation, we don't expect the thread pool to exit
-
- ALOGE("Power Service is shutting down");
- return 1;
-}
diff --git a/power/Android.bp b/power/Android.bp
index 7a315fa..a5415df 100644
--- a/power/Android.bp
+++ b/power/Android.bp
@@ -4,6 +4,5 @@
"1.0/default",
"1.0/vts/functional",
"1.1",
- "1.1/default",
"1.1/vts/functional",
]
diff --git a/radio/1.0/types.hal b/radio/1.0/types.hal
index c5d7f8a..4d22bc0 100644
--- a/radio/1.0/types.hal
+++ b/radio/1.0/types.hal
@@ -1507,8 +1507,8 @@
int32_t lac; // 16-bit Location Area Code, 0..65535, INT_MAX if unknown
int32_t cid; // 16-bit GSM Cell Identity described in
// TS 27.007, 0..65535, INT_MAX if unknown
- int32_t arfcn; // 16-bit GSM Absolute RF channel number, INT_MAX if
- // unknown
+ int32_t arfcn; // 16-bit GSM Absolute RF channel number; this value must
+ // be valid
uint8_t bsic; // 6-bit Base Station Identity Code, 0xFF if unknown
};
@@ -1520,9 +1520,9 @@
int32_t cid; // 28-bit UMTS Cell Identity described in
// TS 25.331, 0..268435455, INT_MAX if unknown
int32_t psc; // 9-bit UMTS Primary Scrambling Code described in
- // TS 25.331, 0..511, INT_MAX if unknown
- int32_t uarfcn; // 16-bit UMTS Absolute RF Channel Number, INT_MAX if
- // unknown
+ // TS 25.331, 0..511; this value must be valid
+ int32_t uarfcn; // 16-bit UMTS Absolute RF Channel Number; this value must
+ // be valid
};
struct CellIdentityCdma {
@@ -1547,10 +1547,10 @@
// unknown
int32_t ci; // 28-bit Cell Identity described in TS TS 27.007, INT_MAX
// if unknown
- int32_t pci; // physical cell id 0..503, INT_MAX if unknown
+ int32_t pci; // physical cell id 0..503; this value must be valid
int32_t tac; // 16-bit tracking area code, INT_MAX if unknown
- int32_t earfcn; // 18-bit LTE Absolute RC Channel Number, INT_MAX if
- // unknown
+ int32_t earfcn; // 18-bit LTE Absolute RF Channel Number; this value must
+ // be valid
};
struct CellIdentityTdscdma {
diff --git a/tests/bar/1.0/.hidl_for_test b/tests/bar/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/bar/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/bar/1.0/Android.bp b/tests/bar/1.0/Android.bp
index 44ae7a5..2dbfb0f 100644
--- a/tests/bar/1.0/Android.bp
+++ b/tests/bar/1.0/Android.bp
@@ -67,7 +67,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.bar@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.bar@1.0_genc++"],
diff --git a/tests/baz/1.0/.hidl_for_test b/tests/baz/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/baz/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/baz/1.0/Android.bp b/tests/baz/1.0/Android.bp
index 7fa8b27..cec3039 100644
--- a/tests/baz/1.0/Android.bp
+++ b/tests/baz/1.0/Android.bp
@@ -60,7 +60,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.baz@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.baz@1.0_genc++"],
diff --git a/tests/expression/1.0/.hidl_for_test b/tests/expression/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/expression/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/expression/1.0/Android.bp b/tests/expression/1.0/Android.bp
index 0ea0acf..093b660 100644
--- a/tests/expression/1.0/Android.bp
+++ b/tests/expression/1.0/Android.bp
@@ -42,7 +42,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.expression@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.expression@1.0_genc++"],
diff --git a/tests/extension/light/2.0/.hidl_for_test b/tests/extension/light/2.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/extension/light/2.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/extension/light/2.0/Android.bp b/tests/extension/light/2.0/Android.bp
index e8a5017..52117b4 100644
--- a/tests/extension/light/2.0/Android.bp
+++ b/tests/extension/light/2.0/Android.bp
@@ -39,7 +39,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.extension.light@2.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.extension.light@2.0_genc++"],
diff --git a/tests/foo/1.0/.hidl_for_test b/tests/foo/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/foo/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/foo/1.0/Android.bp b/tests/foo/1.0/Android.bp
index b5de12e..d0038ab 100644
--- a/tests/foo/1.0/Android.bp
+++ b/tests/foo/1.0/Android.bp
@@ -67,7 +67,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.foo@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.foo@1.0_genc++"],
diff --git a/tests/hash/1.0/.hidl_for_test b/tests/hash/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/hash/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/hash/1.0/Android.bp b/tests/hash/1.0/Android.bp
index d4d1d1a..336963e 100644
--- a/tests/hash/1.0/Android.bp
+++ b/tests/hash/1.0/Android.bp
@@ -35,7 +35,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.hash@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.hash@1.0_genc++"],
diff --git a/tests/inheritance/1.0/.hidl_for_test b/tests/inheritance/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/inheritance/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/inheritance/1.0/Android.bp b/tests/inheritance/1.0/Android.bp
index 93a8ad5..a8c0e6c 100644
--- a/tests/inheritance/1.0/Android.bp
+++ b/tests/inheritance/1.0/Android.bp
@@ -56,7 +56,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.inheritance@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.inheritance@1.0_genc++"],
diff --git a/tests/libhwbinder/1.0/.hidl_for_test b/tests/libhwbinder/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/libhwbinder/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/libhwbinder/1.0/Android.bp b/tests/libhwbinder/1.0/Android.bp
index 4f3beb0..6132628 100644
--- a/tests/libhwbinder/1.0/Android.bp
+++ b/tests/libhwbinder/1.0/Android.bp
@@ -42,7 +42,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.libhwbinder@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.libhwbinder@1.0_genc++"],
diff --git a/tests/libhwbinder/aidl/.hidl_for_test b/tests/libhwbinder/aidl/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/libhwbinder/aidl/.hidl_for_test
+++ /dev/null
diff --git a/tests/memory/1.0/.hidl_for_test b/tests/memory/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/memory/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/memory/1.0/Android.bp b/tests/memory/1.0/Android.bp
index d39ba28..c5cc4a0 100644
--- a/tests/memory/1.0/Android.bp
+++ b/tests/memory/1.0/Android.bp
@@ -35,7 +35,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.memory@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.memory@1.0_genc++"],
diff --git a/tests/msgq/1.0/.hidl_for_test b/tests/msgq/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/msgq/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/msgq/1.0/Android.bp b/tests/msgq/1.0/Android.bp
index 7758ee8..017e0d4 100644
--- a/tests/msgq/1.0/Android.bp
+++ b/tests/msgq/1.0/Android.bp
@@ -42,7 +42,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.msgq@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.msgq@1.0_genc++"],
diff --git a/tests/multithread/1.0/.hidl_for_test b/tests/multithread/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/multithread/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/multithread/1.0/Android.bp b/tests/multithread/1.0/Android.bp
index 76ad2c1..68c19aa 100644
--- a/tests/multithread/1.0/Android.bp
+++ b/tests/multithread/1.0/Android.bp
@@ -35,7 +35,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.multithread@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.multithread@1.0_genc++"],
diff --git a/tests/pointer/1.0/.hidl_for_test b/tests/pointer/1.0/.hidl_for_test
deleted file mode 100644
index e69de29..0000000
--- a/tests/pointer/1.0/.hidl_for_test
+++ /dev/null
diff --git a/tests/pointer/1.0/Android.bp b/tests/pointer/1.0/Android.bp
index 178f165..55598ca 100644
--- a/tests/pointer/1.0/Android.bp
+++ b/tests/pointer/1.0/Android.bp
@@ -42,7 +42,7 @@
],
}
-cc_test_library {
+cc_library {
name: "android.hardware.tests.pointer@1.0",
defaults: ["hidl-module-defaults"],
generated_sources: ["android.hardware.tests.pointer@1.0_genc++"],