Merge "Camera: Add default implementation of camera.device@3.3" into oc-mr1-dev
diff --git a/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp b/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
index ee49023..ec3259a 100644
--- a/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
+++ b/audio/2.0/vts/functional/ValidateAudioConfiguration.cpp
@@ -28,6 +28,7 @@
         const auto configPath = folder + '/' + configName;
         if (access(configPath.c_str(), R_OK) == 0) {
             ASSERT_VALID_XML(configPath.c_str(), configSchemaPath);
+            return; // The framework does not read past the first config file found
         }
     }
 }
diff --git a/camera/device/1.0/default/CameraDevice.cpp b/camera/device/1.0/default/CameraDevice.cpp
index c53c0d8..a03bbc8 100644
--- a/camera/device/1.0/default/CameraDevice.cpp
+++ b/camera/device/1.0/default/CameraDevice.cpp
@@ -377,10 +377,14 @@
     hidl_handle hidlHandle = mem->mHidlHandle;
     MemoryId id = object->mDeviceCallback->registerMemory(hidlHandle, buf_size, num_bufs);
     mem->handle.mId = id;
-    if (object->mMemoryMap.count(id) != 0) {
-        ALOGE("%s: duplicate MemoryId %d returned by client!", __FUNCTION__, id);
+
+    {
+        Mutex::Autolock _l(object->mMemoryMapLock);
+        if (object->mMemoryMap.count(id) != 0) {
+            ALOGE("%s: duplicate MemoryId %d returned by client!", __FUNCTION__, id);
+        }
+        object->mMemoryMap[id] = mem;
     }
-    object->mMemoryMap[id] = mem;
     mem->handle.mDevice = object;
     return &mem->handle;
 }
@@ -398,7 +402,10 @@
         ALOGE("%s: camera HAL return memory while camera is not opened!", __FUNCTION__);
     }
     device->mDeviceCallback->unregisterMemory(mem->handle.mId);
-    device->mMemoryMap.erase(mem->handle.mId);
+    {
+        Mutex::Autolock _l(device->mMemoryMapLock);
+        device->mMemoryMap.erase(mem->handle.mId);
+    }
     mem->decStrong(mem);
 }
 
@@ -826,7 +833,16 @@
         return;
     }
     if (mDevice->ops->release_recording_frame) {
-        CameraHeapMemory* camMemory = mMemoryMap.at(memId);
+        CameraHeapMemory* camMemory;
+        {
+            Mutex::Autolock _l(mMemoryMapLock);
+            auto it = mMemoryMap.find(memId);
+            if (it == mMemoryMap.end() || it->second == nullptr) {
+                ALOGE("%s unknown memoryId %d", __FUNCTION__, memId);
+                return;
+            }
+            camMemory = it->second;
+        }
         if (bufferIndex >= camMemory->mNumBufs) {
             ALOGE("%s: bufferIndex %d exceeds number of buffers %d",
                     __FUNCTION__, bufferIndex, camMemory->mNumBufs);
diff --git a/camera/device/1.0/default/CameraDevice_1_0.h b/camera/device/1.0/default/CameraDevice_1_0.h
index c078596..2c980f0 100644
--- a/camera/device/1.0/default/CameraDevice_1_0.h
+++ b/camera/device/1.0/default/CameraDevice_1_0.h
@@ -165,6 +165,8 @@
 
     sp<ICameraDeviceCallback> mDeviceCallback = nullptr;
 
+    mutable Mutex mMemoryMapLock; // gating access to mMemoryMap
+                                  // must not hold mLock after this lock is acquired
     std::unordered_map<MemoryId, CameraHeapMemory*> mMemoryMap;
 
     bool mMetadataMode = false;
diff --git a/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy b/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
index 7e3dfe0c..43bf1fa 100644
--- a/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
+++ b/configstore/1.0/default/seccomp_policy/configstore@1.0-arm64.policy
@@ -39,6 +39,7 @@
 rt_sigreturn: 1
 getrlimit: 1
 madvise: 1
+clock_gettime: 1
 
 # used during process crash by crash_dump to dump process info
 rt_sigprocmask: 1
diff --git a/current.txt b/current.txt
index 5207c6a..52fbfd1 100644
--- a/current.txt
+++ b/current.txt
@@ -245,5 +245,6 @@
 503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types
 619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
 0a159f81359cd4f71bbe00972ee8403ea79351fb7c0cd48be72ebb3e424dbaef android.hardware.radio@1.0::types
+09342041e17c429fce0034b9096d17849122111436a5f0053e7e59500e1cb89c android.hardware.media.omx@1.0::IOmxStore
 f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
 c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
diff --git a/media/omx/1.0/IOmxStore.hal b/media/omx/1.0/IOmxStore.hal
index a224b0e..3ec0535 100644
--- a/media/omx/1.0/IOmxStore.hal
+++ b/media/omx/1.0/IOmxStore.hal
@@ -39,7 +39,7 @@
      *   string:                arbitrary string
      *   size:                  <num>x<num>
      *   ratio:                 <num>:<num>
-     *   range<type>:           <type>-<type>
+     *   range<type>:           <type> | <type>-<type>
      *   list<type>:            <type> | <type>,<list<type>>
      */
     struct Attribute {
@@ -97,7 +97,7 @@
      *
      * Required node attributes for video nodes that are required by Android to
      * describe measured values for this device:
-     *   key: 'measured-frame-rate-<width>-<height>-range',
+     *   key: 'measured-frame-rate-<width>x<height>-range',
      *     value-type: range<num>; where width: num, height: num
      *
      * Optional node attributes for decoders to describe supported values:
@@ -111,7 +111,7 @@
      * Optional node attributes for encoders to describe supported values:
      *   key: 'complexity-default', value-type: num
      *   key: 'complexity-range', value-type: range<num>
-     *   key: 'feature-bitrate-control', value-type: list<enum<VBR,CBR,CQ>>
+     *   key: 'feature-bitrate-modes', value-type: list<enum<VBR,CBR,CQ>>
      *   key: 'feature-intra-refresh', value-type: enum<0,1>
      *   key: 'quality-default', value-type: num
      *   key: 'quality-range', value-type: range<num>
diff --git a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
index 1d4fd67..a5b5524 100644
--- a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
+++ b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
@@ -839,7 +839,7 @@
     OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
     *eColorFormat = OMX_COLOR_FormatUnused;
     portFormat.nIndex = 0;
-    while (1) {
+    while (portFormat.nIndex < 512) {
         status = getPortParam(omxNode, OMX_IndexParamVideoPortFormat,
                               kPortIndexOutput, &portFormat);
         if (status != ::android::hardware::media::omx::V1_0::Status::OK) break;
@@ -853,7 +853,9 @@
             break;
         }
         if (OMX_COLOR_FormatYUV420SemiPlanar == portFormat.eColorFormat ||
-            OMX_COLOR_FormatYUV420Planar == portFormat.eColorFormat) {
+            OMX_COLOR_FormatYUV420Planar == portFormat.eColorFormat ||
+            OMX_COLOR_FormatYUV420PackedPlanar == portFormat.eColorFormat ||
+            OMX_COLOR_FormatYUV420PackedSemiPlanar == portFormat.eColorFormat) {
             *eColorFormat = portFormat.eColorFormat;
             break;
         }
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index ec3b27f..b6f9433 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -21,14 +21,63 @@
 import IEvent;
 import IPreparedModel;
 
+/**
+ * This interface represents a device driver.
+ */
 interface IDevice {
+    /**
+     * Gets the capabilities of a driver.
+     *
+     * @return status ErrorStatus::NONE if successful.
+     * @return capabilities Capabilities of the driver.
+     */
     getCapabilities() generates (ErrorStatus status, Capabilities capabilities);
 
+    /**
+     * Gets the supported operations in a model.
+     *
+     * getSupportedSubgraph provides a more nuanced indication on whether a
+     * model is able to be compiled by the driver. Having the entire model
+     * allows for additional information such as tensor shapes to inputs or
+     * tensor strides, information which is not known in "initialize".
+     *
+     * @param model A model whose operations--and their corresponding
+     *              operands--are to be verified by the driver.
+     * @return status ErrorStatus::NONE if successful.
+     * @return supportedOperations A list of supported operations, where true
+     *                             indicates the operation is supported and
+     *                             false indicates the operation is not
+     *                             supported. The index of "supported"
+     *                             corresponds with the index of the operation
+     *                             it is describing.
+     */
     getSupportedOperations(Model model)
             generates (ErrorStatus status, vec<bool> supportedOperations);
 
+    /**
+     * Prepares a model for execution.
+     *
+     * prepareModel is used to make any necessary transformations or alternative
+     * representations to a model for execution, possible including
+     * transformations on the constant data, optimization on the model's graph,
+     * or compilation into the device's native binary.
+     *
+     * The only information that may be unknown to the model at this stage is
+     * the shape of the tensors, which may only be known at execution time.
+     *
+     * @param model The model to be prepared for execution.
+     * @param event A synchronization callback that must be signaled once the
+     *              execution has finished.
+     * @return status ErrorStatus::NONE if successful.
+     * @return preparedModel A handle to the resultant prepared model.
+     */
     prepareModel(Model model, IEvent event)
             generates (ErrorStatus status, IPreparedModel preparedModel);
 
+    /**
+     * Returns the current status of a driver.
+     *
+     * @return status Status of the driver.
+     */
     getStatus() generates (DeviceStatus status);
 };
diff --git a/neuralnetworks/1.0/IEvent.hal b/neuralnetworks/1.0/IEvent.hal
index cf71bbc..2ebda58 100644
--- a/neuralnetworks/1.0/IEvent.hal
+++ b/neuralnetworks/1.0/IEvent.hal
@@ -29,21 +29,15 @@
  * indicate to the Neuralnetworks runtime whether the computation was
  * successful or not, and that the corresponding output is ready to be
  * consumed if the execution was successful.
- *
- * TODO: Mention that "notify" is also called by a runtime thread
- * during CPU fallback execution? Depends on whether the HIDL comments
- * are strictly for vendors or not.
  */
 interface IEvent {
 
     /**
-     * IEvent::notify is called by the server thread (i.e. the thread doing the
-     * work) to mark the event as completed so that any threads requiring the
-     * corresponding resources can continue executing.
+     * IEvent::notify is called by the server thread (i.e., the thread doing
+     * the work) to mark the event as completed so that any threads requiring
+     * the corresponding output can continue executing.
      *
-     * @param status Status of the execution associated with the Event.
-     *               Should be SUCCESS or ERROR.
+     * @param status ErrorStatus::NONE if successful.
      */
     oneway notify(ErrorStatus status);
-
 };
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
index 1b82610..a7c3342 100644
--- a/neuralnetworks/1.0/IPreparedModel.hal
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -14,13 +14,33 @@
  * limitations under the License.
  */
 
-/* This HAL is a work in progress */
-
 package android.hardware.neuralnetworks@1.0;
 
 import IEvent;
 
+/**
+ * IPreparedModel describes a model that has been prepared for execution and
+ * is used to launch executions.
+ */
 interface IPreparedModel {
-    // Multiple threads can call this execute function concurrently.
+    /**
+     * Spawns an asynchronous execution on a prepared model.
+     *
+     * Executions are asynchronous with respect to the Neuralnetworks runtime.
+     * To support this, IPreparedModel::execute must spawn a new task and return
+     * whether the task was successfully launched. The asynchronous task which
+     * performs the execution must call event's IEvent::notify with the status
+     * of the execution immediately after the execution has finished.
+     *
+     * Multiple threads can call this execute function concurrently.
+     *
+     * @param request The input and output information on which the prepared
+     *                model is to be executed.
+     *                prepared model.
+     * @param event A callback used for synchronization that must be signaled
+     *              once the execution has finished.
+     * @return status ErrorStatus::NONE if the asynchronous task was
+     *                successfully launched.
+     */
     execute(Request request, IEvent event) generates (ErrorStatus status);
 };
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 870c067..39e3d34 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -14,64 +14,945 @@
  * limitations under the License.
  */
 
-/* This HAL is a work in progress */
-
 package android.hardware.neuralnetworks@1.0;
 
-// The types an operand can have.
-// These values are the same as found in the NeuralNetworks.h file.
-// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
-enum OperandType : uint32_t {
-    OEM                       = 0,
-    FLOAT32                   = 1,
-    INT32                     = 2, // TODO: is this needed?
-    UINT32                    = 3,
-    TENSOR_OEM_BYTE           = 4,
-    TENSOR_FLOAT32            = 5,
-    TENSOR_INT32              = 6,
-    TENSOR_QUANT8_ASYMM       = 7,
+/**
+ * Operand types.
+ *
+ * The type of an operand in a model.
+ *
+ * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors
+ * with at least one dimension). Types not prefaced by TENSOR_* represent
+ * scalar values and must have no dimensions.
+ */
+enum OperandType : int32_t {
+    /**
+     * The following entries are used to declare scalars.
+     */
+    FLOAT32             = 0,
+    INT32               = 1,
+    UINT32              = 2,
+
+    /**
+     * The following entries are used to declare tensors.
+     */
+    TENSOR_FLOAT32      = 3,
+    TENSOR_INT32        = 4,
+
+    /**
+     * A tensor of 8 bit integers that represent real numbers.
+     *
+     * Attached to this tensor are two numbers that can be used to convert the
+     * 8 bit integer to the real value and vice versa. These two numbers are:
+     * - scale: a 32 bit floating point value
+     * - zero_value: a 32 bit integer
+     *
+     * The formula is:
+     * real_value = (integer_value - zero_value) * scale.
+     */
+    TENSOR_QUANT8_ASYMM = 5,
+
+    /**
+     * The following entries are OEM specific operand types.
+     */
+    OEM                 = 10000,
+    TENSOR_OEM_BYTE     = 10001,
 };
 
-// The type of operations.  Unlike the operation types found in
-// NeuralNetworks.h file, these specify the data type they operate on.
-// This is done to simplify the work of drivers.
-// TODO: Currently they are the same.  Add a conversion when finalizing the model.
-// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
-enum OperationType : uint32_t {
-    OEM_OPERATION                = 0,
-    ADD                          = 1,
-    AVERAGE_POOL_2D              = 2,
-    CONCATENATION                = 3,
-    CONV_2D                      = 4,
-    DEPTHWISE_CONV_2D            = 5,
-    DEPTH_TO_SPACE               = 6,
-    DEQUANTIZE                   = 7,
-    EMBEDDING_LOOKUP             = 8,
-    FAKE_QUANT                   = 9,
-    FLOOR                        = 10,
-    FULLY_CONNECTED              = 11,
-    HASHTABLE_LOOKUP             = 12,
-    L2_NORMALIZATION             = 13,
-    L2_POOL_2D                   = 14,
-    LOCAL_RESPONSE_NORMALIZATION = 15,
-    LOGISTIC                     = 16,
-    LSH_PROJECTION               = 17,
-    LSTM                         = 18,
-    MAX_POOL_2D                  = 19,
-    MUL                          = 20,
-    RELU                         = 21,
-    RELU1                        = 22,
-    RELU6                        = 23,
-    RESHAPE                      = 24,
-    RESIZE_BILINEAR              = 25,
-    RNN                          = 26,
-    SOFTMAX                      = 27,
-    SPACE_TO_DEPTH               = 28,
-    SVDF                         = 29,
-    TANH                         = 30,
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+    /**
+     * Adds two tensors, elment-wise.
+     *
+     * Takes two input tensors of identical type and compatible dimensions.  The output
+     * is the sum of both input tensors, optionally modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the output is the maximum size along each dimension of the input operands.
+     * It starts with the trailing dimensions, and works its way forward.
+     *
+     * Example:
+     *     input1.dimension =    {4, 1, 2}
+     *     input2.dimension = {5, 4, 3, 1}
+     *     output.dimension = {5, 4, 3, 2}
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * 0: A tensor.
+     * 1: A tensor of the same type, and compatible dimensions as input0.
+     * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The sum, a tensor of the same type as input0.
+     */
+    ADD = 0,
+
+    /**
+     * Performs a 2-D average pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and padding.
+     *
+     * The values in output Tensor is computed as:
+     *     output[batch, row, col, channel] =
+     *         sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+     * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+     * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+     * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+     * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+     * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+     * 7: An INT32 value, specifying the filter width.
+     * 8: An INT32 value, specifying the filter height.
+     * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+     */
+    AVERAGE_POOL_2D = 1,
+
+    /**
+     * Concatenates the input tensors along the given dimension.
+     *
+     * The input tensors must have identical type and the same dimensions except the
+     * dimension along the concatenation axis.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]
+     * n+1: An INT32 value, specifying the concatenation axis.
+     * n+2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output, a tensor of the same type as the input tensors.
+          The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+     */
+    CONCATENATION = 2,
+
+    /**
+     * Performs an 2-D convolution operation.
+     *
+     * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of
+     * images, applying the filter to each window of each image of the appropriate size.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and padding.
+     *
+     * The values in output Tensor is computed as:
+     *     output[batch, row, col, channel] =
+     *         sum_{i, j} (
+     *             input[batch, row + i, col + j, k] *
+     *             filter[channel, row + i, col + j, k] +
+     *             bias[channel]
+     *         )
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+     * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+     *    specifying the filter.
+     * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+     *    For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+     *    also be of {@link OperandType::TENSOR_FLOAT32}.
+     *    For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+     *    should be of {@link OperandType::TENSOR_INT32}.
+     * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+     * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+     * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+     * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+     * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
+     * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
+     * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+     */
+    CONV_2D = 3,
+
+    /**
+     * Performs an depthwise 2-D convolution operation.
+     *
+     * Given an input tensor of shape [batches, height, width, depth_in] and a filter
+     * tensor of shape [depth_out, filter_height, filter_width, depth_in] containing
+     * in_channels convolutional filters of depth 1, DEPTHWISE_CONV applies a different
+     * filter to each input channel (expanding from 1 channel to channel_multiplier channels
+     * for each), then concatenates the results together.
+     *
+     * The output has depth_out = depth_in * depth_multiplier channels.
+     * The output dimensions are functions of the filter dimensions, stride, and padding.
+     *
+     * The values in output Tensor is computed as:
+     *     output[b, i, j, k * channel_multiplier + q] =
+     *         sum_{di, dj} (
+     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+     *             filter[di, dj, k, q]
+     *         )
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+     * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
+     *    specifying the filter.
+     * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
+     *    For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+     *    also be of {@link OperandType::TENSOR_FLOAT32}.
+     *    For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+     *    should be of {@link OperandType::TENSOR_INT32}.
+     * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+     * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+     * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+     * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+     * 7: An INT32 value, specifying the output stride in the ‘width’ dimension.
+     * 8: An INT32 value, specifying the output stride in the ‘height’ dimension.
+     * 9: An INT32 value, specifying the depthwise multiplier.
+     * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
+     */
+    DEPTHWISE_CONV_2D = 4,
+
+    /**
+     * Rearranges data from depth into blocks of spatial data.
+     *
+     * More specifically, this op outputs a copy of the input tensor where values from
+     * the depth dimension are moved in spatial blocks to the height and width dimensions.
+     * The value block_size indicates the input block size and how the data is moved.
+     *
+     * Chunks of data of size block_size * block_size from depth are rearranged into
+     * non-overlapping blocks of size block_size x block_size.
+     *
+     * The width of the output tensor is input_depth * block_size, whereas the height is
+     * input_height * block_size.
+     * The depth of the input tensor must be divisible by block_size * block_size
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+     * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+     *    block_size * block_size must be a divisor of the input depth.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
+     *    depth/(block_size*block_size)].
+     */
+    DEPTH_TO_SPACE = 5,
+
+    /**
+     * Dequantizes the input tensor.
+     *
+     * The formula is:
+     *     output = (input - zero_value) * scale.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0, but with type
+          {@link OperandType::TENSOR_FLOAT32}.
+     */
+    DEQUANTIZE = 6,
+
+    /**
+     * Looks up items from a given tensor.
+     *
+     * Each item in the output is a raw copy of the corresponding item in
+     * the input “values”. If the the given “lookup” indices are out of bounds,
+     * the op will fail and an error will be reported.
+     *
+     * Inputs:
+     * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2,
+     *      then the shape would be [lookup_dimension, values_dimension], where
+     *      “lookup_dimension” corresponds to the indexing dimension in the lookup
+     *      table, and “values_dimension” to the contents.
+     * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where
+     *      “lookup_size” is the number of elements to look for, and each entry
+     *      corresponds to the first dimension of the “values” tensor.
+     *
+     * Output:
+     * * 0: A n-D tensor of type X and the same rank and shape as the “values”
+     *      tensor, except for the first dimension which has size “lookup_size”.
+     */
+    EMBEDDING_LOOKUP = 7,
+
+    /**
+     * Computes element-wise floor() on the input tensor.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * 0: A tensor.
+     *
+     * Ouputs:
+     * 0: The output, a tensor of the same type and dimensions as input0.
+     */
+    FLOOR = 8,
+
+    /**
+     * Denotes a fully (densely) connected layer, which connects all elements in the input
+     * tensor with each element in the output tensor.
+     *
+     * This layer implements the operation:
+     *     outputs = activation(inputs * weights’ + bias)
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
+     *    a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
+     *    [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
+     *    and “input_size” is the size of the input.
+     * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units”
+     *    corresponds to the number of output nodes.
+     * 2: A 1-D tensor, of shape [num_units], specifying the bias.
+     *    For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
+     *    also be of {@link OperandType::TENSOR_FLOAT32}.
+     *    For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
+     *    should be of {@link OperandType::TENSOR_INT32}.
+     * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output tensor, of shape [batch_size, num_units].
+     */
+    FULLY_CONNECTED = 9,
+
+    /**
+     * Looks up values of a hash table with given keys.
+     *
+     * Inputs:
+     * * 0: Lookups. A 1-D int32 tensor with shape [ k ].
+     * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in
+     *      ascending order.
+     * * 2: Values. A tensor with shape [ n … ].
+     *
+     * Outputs:
+     * * 0: Output. A tensor with shape [ k …].
+     * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup
+     *      hits or not.
+     */
+    HASHTABLE_LOOKUP = 10,
+
+    /**
+     * Applies L2 normalization along a the depth dimension.
+     *
+     * The values in output Tensor is computed as:
+     *     output[batch, row, col, channel] =
+     *         input[batch, row, col, channel] /
+     *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+     *
+     * For x with more dimensions, independently normalizes each 1-D slice along dimension dim.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+     */
+    L2_NORMALIZATION = 11,
+
+    /**
+     * Performs an 2-D L2 pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and padding.
+     *
+     * The values in output Tensor is computed as:
+     *     output[batch, row, col, channel] =
+     *         sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1))
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+     * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+     * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+     * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+     * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+     * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+     * 7: An INT32 value, specifying the filter width.
+     * 8: An INT32 value, specifying the filter height.
+     * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+     */
+    L2_POOL_2D = 12,
+
+    /**
+     * Applies Local Response Normalization along the depth dimension.
+     *
+     * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last
+     * dimension), and each vector is normalized independently. Within a given vector,
+     * each component is divided by the weighted, squared sum of inputs within depth_radius.
+     *
+     * In details:
+     *     sqr_sum[a, b, c, d] =
+     *         sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
+     *     output = input / pow((bias + alpha * sqr_sum), beta)
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     * 1: An INT32 value, specifying the radius of the normalization window.
+     * 2: A FLOAT32 value, specifying the bias, must not be zero.
+     * 3: A FLOAT32 value, specifying the scale factor, alpha.
+     * 4: A FLOAT32 value, specifying the exponent, beta.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    LOCAL_RESPONSE_NORMALIZATION = 13,
+
+    /**
+     * Computes sigmoid activation on the input tensor element-wise.
+     *
+     * In details:
+     *     output = 1 / (1 + exp(-input))
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    LOGISTIC = 14,
+
+    /**
+     * Projects an input to a bit vector via locality senstive hashing.
+     *
+     * Inputs:
+     * * 0: Hash functions. Dim.size == 2, DataType: Float.
+     *            Tensor[0].Dim[0]: Number of hash functions.
+     *            Tensor[0].Dim[1]: Number of seeds per hash functions.
+     *            Tensor[0].Dim[1] <= 32 in sparse case.
+     *
+     * * 1: Input. Dim.size >= 1, no restriction on DataType.
+     * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+     *     If not set, each input element is considered to have the same weight of
+     *     1.0.
+     *     Tensor[1].Dim[0] == Tensor[2].Dim[0]
+     * * 3: Type:
+     *        Sparse: Value LSHProjectionType_SPARSE(=1).
+     *          Computed bit vector is considered to be sparse.
+     *          Each output element is an int32 made up of multiple bits computed from
+     *          hash functions.
+     *
+     *        Dense: Value LSHProjectionType_DENSE(=2).
+     *          Computed bit vector is considered to be dense. Each output element
+     *          represents a bit and can take the value of either 0 or 1.
+     *
+     * Outputs:
+     * * 0: If the projection type is sparse:
+     *        Output.Dim == { Tensor[0].Dim[0] }
+     *        A tensor of int32 that represents hash signatures.
+     *      If the projection type is Dense:
+     *        Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+     *        A flattened tensor that represents projected bit vectors.
+     */
+    LSH_PROJECTION = 15,
+
+    /**
+     * Long short-term memory unit (LSTM) recurrent network layer.
+     *
+     * The default non-peephole implementation is based on:
+     * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
+     * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+     * Computation, 9(8):1735-1780, 1997.
+     *
+     * The peephole implementation is based on:
+     * https://research.google.com/pubs/archive/43905.pdf
+     * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+     * recurrent neural network architectures for large scale acoustic modeling."
+     * INTERSPEECH, 2014.
+     *
+     * The coupling of input and forget gate (CIFG) is based on:
+     * http://arxiv.org/pdf/1503.04069.pdf
+     * Greff et al. "LSTM: A Search Space Odyssey"
+     *
+     * The class has the following independently optional inputs:
+     * * If input gate (if CIFG): “input_to_forget_weights”,
+     *   “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”.
+     * * If no peephole connections: “cell_to_input_weights”,
+     *   “cell_to_forget_weights”, “cell_to_output_weights”.
+     * * If no projection layer: “projection_weights” and “projection_bias”.
+     * * If no projection bias: “projection_bias”.
+     *
+     * Supported tensor types:
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Inputs:
+     * * 0: Input.
+     *      A 2-D tensor of type T, of shape [batch_size, input_size], where
+     *      “batch_size” corresponds to the batching dimension, and “input_size”
+     *      is the size of the input.
+     * * 1: input_to_input_weights.
+     *      A 2-D tensor of type T, of shape [num_units, input_size], where
+     *      “num_units” corresponds to the number of cell units.
+     * * 2: input_to_forget_weights.
+     *      A 2-D tensor of type T, of shape [num_units, input_size].
+     * * 3: input_to_cell_weights.
+     *      A 2-D tensor of type T, of shape [num_units, input_size].
+     * * 4: input_to_output_weights.
+     *      A 2-D tensor of type T, of shape [num_units, input_size].
+     * * 5: recurrent_to_input_weights.
+     *      A 2-D tensor of type T, of shape [num_units, output_size], where
+     *      “output_size” corresponds to either the number of cell units (i.e.,
+     *      “num_units”), or the second dimension of the “projection_weights”, if
+     *      defined.
+     * * 6: recurrent_to_forget_weights.
+     *      A 2-D tensor of type T, of shape [num_units, output_size].
+     * * 7: recurrent_to_cell_weights.
+     *      A 2-D tensor of type T, of shape [num_units, output_size].
+     * * 8: recurrent_to_output_weights.
+     *      A 2-D tensor of type T, of shape [num_units, output_size].
+     * * 9: cell_to_input_weights.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 10:cell_to_forget_weights.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 11:cell_to_output_weights.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 12:input_gate_bias.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 13:forget_gate_bias.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 14:cell_bias.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 15:output_gate_bias.
+     *      A 1-D tensor of type T, of shape [num_units].
+     * * 16:projection_weights.
+     *      A 2-D tensor of type T, of shape [output_size, num_units].
+     * * 17:projection_bias.
+     *      A 1-D tensor of type T, of shape [output_size].
+     *
+     * Parameters:
+     * * 18:fused_activation_function.
+     *      An (optional) ActivationFunctionType indicating the activation
+     *      function.
+     *      If “NONE” is specified then it results in a linear activation.
+     * * 19:cell_clip.
+     *      A clipping threshold for the cell state, such that values are bound
+     *      within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
+     *      disabled.
+     * * 20:proj_clip.
+     *      A clipping threshold for the output from the projection layer, such
+     *      that values are bound within [-proj_clip, proj_clip]. If set to 0.0
+     *      then clipping is disabled.
+     *
+     * Outputs:
+     * * 0: scratch_buffer.
+     *      A 3-D tensor of type T, of shape [batch_size, num_cell, 4].
+     * * 1: output_state.
+     *      A 2-D tensor of type T, of shape [batch_size, output_size].
+     * * 2: cell_state.
+     *      A 2-D tensor of type T, of shape [batch_size, num_units].
+     * * 3: output.
+     *      A 2-D tensor of type T, of shape [batch_size, output_size]. This is
+     *      effectively the same as the current “output_state” value.
+     */
+    LSTM = 16,
+
+    /**
+     * Performs an 2-D max pooling operation.
+     *
+     * The output dimensions are functions of the filter dimensions, stride, and padding.
+     *
+     * The values in output Tensor is computed as:
+     *     output[batch, row, col, channel] =
+     *         max_{i, j} (input[batch, row + i, col + j, channel])
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
+     * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
+     * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
+     * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
+     * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
+     * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
+     * 7: An INT32 value, specifying the filter width.
+     * 8: An INT32 value, specifying the filter height.
+     * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+     */
+    MAX_POOL_2D = 17,
+
+    /**
+     * Multiplies two tensors, elment-wise.
+     *
+     * Takes two input tensors of identical type and compatible dimensions.  The output
+     * is the product of both input tensors, optionally modified by an activation function.
+     *
+     * Two dimensions are compatible when:
+     *     1. they are equal, or
+     *     2. one of them is 1
+     *
+     * The size of the resulting output is the maximum size along each dimension of the
+     * input operands. It starts with the trailing dimensions, and works its way forward.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: up to 4
+     *
+     * Inputs:
+     * 0: A tensor.
+     * 1: A tensor of the same type, and compatible dimensions as input0.
+     * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
+     *    Specifies the activation to invoke on the result of each addition.
+     *
+     * Ouputs:
+     * 0: The product, a tensor of the same type as input0.
+     */
+    MUL = 18,
+
+    /**
+     * Computes rectified linear activation on the input tensor element-wise.
+     *
+     * In details:
+     *     output = max(0, input)
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    RELU = 19,
+
+    /**
+     * Computes rectified linear 1 activation on the input tensor element-wise.
+     *
+     * In details:
+     *     output = min(1.f, max(-1.f, input))
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    RELU1 = 20,
+
+    /**
+     * Computes rectified linear 6 activation on the input tensor element-wise.
+     *
+     * In details:
+     *     output = min(6, max(0, input))
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    RELU6 = 21,
+
+    /**
+     * Reshapes a tensor.
+     *
+     * Given tensor, this operation returns a tensor that has the same values as tensor,
+     * but with a newly specified shape.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the tensor to be reshaped.
+     * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape
+     *    of the output tensor. The number of elements implied by shape must be the same
+     *    as the number of elements in the input tensor.
+     *
+     * Ouputs:
+     * 0: The output tensor, of shape specified by the input shape.
+     */
+    RESHAPE = 22,
+
+    /**
+     * Resizes images to given size using the bilinear interpretation.
+     *
+     * Resized images will be distorted if their original aspect ratio is not the same as input.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
+     * 1: An INT32 value, specifying the output width of the output tensor.
+     * 2: An INT32 value, specifying the output height of the output tensor.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
+     */
+    RESIZE_BILINEAR = 23,
+
+    /**
+     * A basic recurrent neural network layer.
+     *
+     * This layer implements the operation:
+     * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias)
+     *
+     * Where:
+     * * “input_weights” is a weight matrix that multiplies the inputs;
+     * * “recurrent_weights” is a weight matrix that multiplies the current
+     *    “state” which itself is the output from the previous time step
+     *    computation;
+     * * “bias” is a bias vector (added to each output vector in the batch);
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * Supported tensor types:
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 2-D tensor of type T, of shape [batch_size, input_size], where
+     *      “batch_size” corresponds to the batching dimension, and “input_size” is
+     *      the size of the input.
+     * * 1: weights.
+     *      A 2-D tensor of type T, of shape [num_units, input_size], where
+     *      “num_units” corresponds to the number of units.
+     * * 2: recurrent_weights.
+     *      A 2-D tensor of type T, of shape [num_units, num_units], with columns
+     *      corresponding to the weights from each unit.
+     * * 3: bias.
+     *      A 1-D tensor of type T, of shape [num_units].
+     *
+     *    For FLOAT32 input tensor, bias must also be FLOAT32.
+     *    For UINT8 input tensor, bias must be INT32.
+     *
+     * Parameters
+     * * 4: fused_activation_function.
+     *      An (optional) ActivationFunctionType indicating the activation
+     *      function. If “NONE” is specified then it results in a linear
+     *      activation.
+     *
+     * * 5: Hidden state.
+     *      A 2-D tensor of type T, of shape [batch_size, num_units].
+     *
+     * Outputs:
+     * * 0: output.
+     *      A 2-D tensor of type T, of shape [batch_size, num_units]. This is
+     *      effectively the same as the current state value.
+     */
+    RNN = 24,
+
+    /**
+     * Computes the softmax activation on the input tensor element-wise, per batch, by
+     * normalizing the input vector so the maximum coefficient is zero.
+     *
+     * In details:
+     *     output[batch, i] =
+     *         exp((input[batch, i] - max(input[batch, :])) * beta) /
+     *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 2 or 4.
+     *
+     * Inputs:
+     * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+     * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    SOFTMAX = 25,
+
+    /**
+     * Rearranges blocks of spatial data, into depth.
+     *
+     * More specifically, this op outputs a copy of the input tensor where values from
+     * the height and width dimensions are moved to the depth dimension.
+     * The value block_size indicates the input block size and how the data is moved.
+     *
+     * Chunks of data of size block_size * block_size from depth are rearranged into
+     * non-overlapping blocks of size block_size x block_size.
+     *
+     * The depth of the output tensor is input_depth * block_size * block_size.
+     * The input tensor's height and width must be divisible by block_size.
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     *                         {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * Supported tensor rank: 4, with "NHWC" data layout.
+     *
+     * Inputs:
+     * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+     * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
+     *    block_size must be a divisor of both the input height and width.
+     *
+     * Ouputs:
+     * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
+     *    depth*block_size*block_size].
+     */
+    SPACE_TO_DEPTH = 26,
+
+    /**
+     * SVDF op is a kind of stateful layer derived from the notion that a
+     * densely connected layer that's processing a sequence of input frames can
+     * be approximated by using a singular value decomposition of each of its
+     * nodes. The implementation is based on:
+     *
+     * https://research.google.com/pubs/archive/43813.pdf
+     *
+     * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+     * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+     * INTERSPEECH, 2015.
+     *
+     * It processes the incoming input using a 2-stage filtering mechanism:
+     * * stage 1 performs filtering on the "features" dimension, whose outputs get
+     *   pushed into a memory of fixed-size memory_size.
+     * * stage 2 performs filtering on the "time" dimension of the memory_size
+     *   memoized outputs of stage 1.
+     *
+     * Specifically, for rank 1, this layer implements the operation:
+     *
+     *    memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID"));
+     *    outputs = activation(memory * weights_time + bias);
+     *
+     * Where:
+     * * “weights_feature” is a weights matrix that processes the inputs (by
+     *   convolving the input with every “feature filter”), and whose outputs get
+     *   pushed, stacked in order, into the fixed-size “memory” (the oldest entry
+     *   gets dropped);
+     * * “weights_time” is a weights matrix that processes the “memory” (by a
+     *   batched matrix multiplication on the num_units);
+     * * “bias” is an optional bias vector (added to each output vector in the
+     *   batch); and
+     * * “activation” is the function passed as the “fused_activation_function”
+     *   argument (if not “NONE”).
+     *
+     * Each rank adds a dimension to the weights matrices by means of stacking
+     * the filters.
+     *
+     * Supported tensor types:
+     * * {@link OperandType::TENSOR_FLOAT32}
+     *
+     * Inputs:
+     * * 0: input.
+     *      A 2-D tensor of type T, of shape [batch_size, input_size], where
+     *      “batch_size” corresponds to the batching dimension, and “input_size” is
+     *      the size of the input.
+     * * 1: weights_feature.
+     *      A 2-D tensor of type T, of shape [num_units, input_size], where
+     *      “num_units” corresponds to the number of units.
+     * * 2: weights_time.
+     *      A 2-D tensor of type T, of shape [num_units, memory_size], where
+     *      “memory_size” corresponds to the fixed-size of the memory.
+     * * 3: bias.
+     *      A optional 1-D tensor of type T, of shape [num_units].
+     *
+     *    For FLOAT32 input tensor, bias must also be FLOAT32.
+     *    For UINT8 input tensor, bias must be INT32.
+     *
+     * Parameters:
+     * * 4: rank.
+     *      The rank of the SVD approximation.
+     * * 5: fused_activation_function.
+     *      An (optional) ActivationFunctionType indicating the activation function.
+     *      If “NONE” is specified then it results in a linear activation.
+     *
+     * Outputs:
+     * * 0: state.
+     *      A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
+     * * 1: output.
+     *      A 2-D tensor of type T, of shape [batch_size, num_units].
+     */
+    SVDF = 27,
+
+    /**
+     * Computes hyperbolic tangent of input tensor element-wise.
+     *
+     * In details:
+     *     output = tanh(input)
+     *
+     * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
+     * Supported tensor rank: up to 4.
+     *
+     * Inputs:
+     * 0: A tensor, specifying the input.
+     *
+     * Ouputs:
+     * 0: The output tensor of same shape as input0.
+     */
+    TANH = 28,
+
+    /**
+     * OEM specific operation.
+     *
+     * This operation is OEM specific. It should only be used for OEM applications.
+     */
+    OEM_OPERATION = 10000,
 };
 
-// Fused activation functions
+/**
+ * Fused activation function types.
+ */
 enum FusedActivationFunc : int32_t {
     NONE  = 0,
     RELU  = 1,
@@ -79,134 +960,310 @@
     RELU6 = 3,
 };
 
-// How an operand is used.
-enum OperandLifeTime : uint32_t {
-    // The operand is internal to the model.  It's created by an operation
-    // and consumed by other operations.
+/**
+ * How an operand is used.
+ */
+enum OperandLifeTime : int32_t {
+    /**
+     * The operand is internal to the model.  It's created by an operation
+     * and consumed by other operations.
+     */
     TEMPORARY_VARIABLE,
-    // The operand is an input of the model. An operand can't be both
-    // input and output of a model.
+
+    /**
+     * The operand is an input of the model. An operand can't be both
+     * input and output of a model.
+     */
     MODEL_INPUT,
-    // The operand is an output of the model.
+
+    /**
+     * The operand is an output of the model.
+     */
     MODEL_OUTPUT,
-    // The operand is a constant found in Model.operandValues.
+
+    /**
+     * The operand is a constant found in Model.operandValues.
+     */
     CONSTANT_COPY,
-    // The operand is a constant that was specified via a Memory object.
-    CONSTANT_REFERENCE
+
+    /**
+     * The operand is a constant that was specified via a Memory object.
+     */
+    CONSTANT_REFERENCE,
 };
 
-// Status of a device.
-enum DeviceStatus : uint32_t {
+/**
+ * Status of a device.
+ */
+enum DeviceStatus : int32_t {
     AVAILABLE,
     BUSY,
     OFFLINE,
-    UNKNOWN  // Do we need this?
+    UNKNOWN,
 };
 
-// For the reference workload
-// Used by a driver to report its performance characteristics.
-// TODO revisit the data types and scales.
-struct PerformanceInfo {
-    float execTime;    // in nanoseconds
-    float powerUsage;  // in picoJoules
-};
-
+/**
+ * A typed operation.
+ */
 struct OperationTuple {
-    // The type of operation.
+    /**
+     * The type of operation.
+     */
     OperationType operationType;
-    // The input data type of operation.
+
+    /**
+     * The input data type of operation.
+     */
     OperandType operandType;
 };
 
-// The capabilities of a driver.
+/**
+ * Performance information for the reference workload.
+ *
+ * Used by a driver to report its performance characteristics.
+ */
+struct PerformanceInfo {
+    /**
+     * Execution time in nanoseconds.
+     */
+    float execTime;
+
+    /**
+     * Power usage in picoJoules.
+     */
+    float powerUsage;
+};
+
+/**
+ * The capabilities of a driver.
+ */
 struct Capabilities {
+    /**
+     * A collection of typed operations supported by the driver.
+     */
     vec<OperationTuple> supportedOperationTuples;
-    // TODO Do the same for baseline model IDs
+
+    /**
+     * Indicates whether a driver caches its prepared model for reuse the next
+     * time the application begins. This is useful because the model may have
+     * been prepared in a previous run.
+     *
+     * True if caching is supported, false otherwise.
+     */
     bool cachesCompilation;
-    // TODO revisit the data types and scales.
-    float bootupTime;  // in nanoseconds
-    PerformanceInfo float16Performance;
+
+    /**
+     * Driver performance when operating on float32 data.
+     */
     PerformanceInfo float32Performance;
+
+    /**
+     * Driver performance when operating on asymmetric 8-bit quantized data.
+     */
     PerformanceInfo quantized8Performance;
 };
 
-// Describes the location of a data object.
+/**
+ * Describes the location of a data object.
+ */
 struct DataLocation {
-    // The index of the memory pool where this location is found.
-    // Two special values can also be used.  See the LOCATION_* constants above.
+    /**
+     * The index of the memory pool where this location is found.
+     */
     uint32_t poolIndex;
-    // Offset in bytes from the start of the pool.
+
+    /**
+     * Offset in bytes from the start of the pool.
+     */
     uint32_t offset;
-    // The length of the data, in bytes.
+
+    /**
+     * The length of the data in bytes.
+     */
     uint32_t length;
 };
 
+/**
+ * Describes one operand of the model's graph.
+ */
 struct Operand {
+    /**
+     * Data type of the operand.
+     */
     OperandType type;
+
+    /**
+     * Dimensions of the operand.
+     */
     vec<uint32_t> dimensions;
 
-    // The number of operations that uses this operand as input.
-    // TODO It would be nice to track the actual consumers, e.g. vec<uint32_t> consumers;
+    /**
+     * The number of operations that use this operand as input.
+     */
     uint32_t numberOfConsumers;
 
+    /**
+     * Quantized scale of the operand.
+     *
+     * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+     */
     float scale;
+
+    /**
+     * Quantized zero-point offset of the operand.
+     *
+     * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+     */
     int32_t zeroPoint;
 
-    // How the operand is used.
+    /**
+     * How the operand is used.
+     */
     OperandLifeTime lifetime;
 
-    // Where to find the data for this operand.
-    // If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, or MODEL_OUTPUT:
-    // - All the fields will be 0.
-    // If the lifetime is CONSTANT_COPY:
-    // - location.poolIndex is 0.
-    // - location.offset is the offset in bytes into Model.operandValues.
-    // - location.length is set.
-    // If the lifetime is CONSTANT_REFERENCE:
-    // - location.poolIndex is set.
-    // - location.offset is the offset in bytes into the specified pool.
-    // - location.length is set.
+    /**
+     * Where to find the data for this operand.
+     * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, or MODEL_OUTPUT:
+     * - All the fields will be 0.
+     * If the lifetime is CONSTANT_COPY:
+     * - location.poolIndex is 0.
+     * - location.offset is the offset in bytes into Model.operandValues.
+     * - location.length is set.
+     * If the lifetime is CONSTANT_REFERENCE:
+     * - location.poolIndex is set.
+     * - location.offset is the offset in bytes into the specified pool.
+     * - location.length is set.
+     */
     DataLocation location;
 };
 
-// Describes one operation of the graph.
+/**
+ * Describes one operation of the model's graph.
+ */
 struct Operation {
-    // The tuple describing the operation type and input type.
+    /**
+     * The tuple describing the operation type and input type.
+     */
     OperationTuple opTuple;
-    // Describes the table that contains the indexes of the inputs of the
-    // operation. The offset is the index in the operandIndexes table.
+
+    /**
+     * Describes the table that contains the indexes of the inputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
     vec<uint32_t> inputs;
-    // Describes the table that contains the indexes of the outputs of the
-    // operation. The offset is the index in the operandIndexes table.
+
+    /**
+     * Describes the table that contains the indexes of the outputs of the
+     * operation. The offset is the index in the operandIndexes table.
+     */
     vec<uint32_t> outputs;
 };
 
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * might not be known is the shape of the input tensors.
+ */
 struct Model {
+    /**
+     * All operands included in the model.
+     */
     vec<Operand> operands;
+
+    /**
+     * All operations included in the model.
+     *
+     * The operations are sorted into execution order.
+     */
     vec<Operation> operations;
+
+    /**
+     * Input indexes of the model.
+     *
+     * Each value corresponds to the index of the operand in "operands".
+     */
     vec<uint32_t> inputIndexes;
+
+    /**
+     * Output indexes of the model.
+     *
+     * Each value corresponds to the index of the operand in "operands".
+     */
     vec<uint32_t> outputIndexes;
+
+    /**
+     * A byte buffer containing operand data that were copied into the model.
+     */
     vec<uint8_t> operandValues;
+
+    /**
+     * A collection of shared memory pools containing operand data that were
+     * registered by the model.
+     */
     vec<memory> pools;
 };
 
+/**
+ * Metadata information specifying the location of the input or output data and
+ * any updates to the input or output operand.
+ */
 struct RequestArgument {
-    // The location within one of the memory pools
+    /**
+     * The location within one of the memory pools passed in the Request.
+     */
     DataLocation location;
-    // If dimensions.size() > 0, dimension information was provided along with the
-    // argument.  This can be the case for models that accept inputs of varying size.
-    // This can't change the rank, just the value of the dimensions that were
-    // unspecified in the model.
+
+    /**
+     * Updated dimension information.
+     *
+     * If dimensions.size() > 0, dimension information was provided along with the
+     * argument.  This can be the case for models that accept inputs of varying size.
+     * This can't change the rank, just the value of the dimensions that were
+     * unspecified in the model.
+     */
     vec<uint32_t> dimensions;
 };
 
+/**
+ * Inputs to be sent to and outputs to be retrieved from a prepared model.
+ *
+ * A Request serves two primary tasks:
+ * 1) Provides the input and output data to be used when executing the model.
+ * 2) Specifies any updates to the input operand metadata that were left
+ *    unspecified at model preparation time.
+ */
 struct Request {
+    /**
+     * Input data and information to be used in the execution of a prepared
+     * model.
+     *
+     * The index of the input corresponds to the index in Model.inputIndexes.
+     *   E.g., input[i] corresponds to Model.inputIndexes[i].
+     */
     vec<RequestArgument> inputs;
+
+    /**
+     * Output data and information to be used in the execution of a prepared
+     * model.
+     *
+     * The index of the output corresponds to the index in Model.outputIndexes.
+     *   E.g., output[i] corresponds to Model.outputIndexes[i].
+     */
     vec<RequestArgument> outputs;
+
+    /**
+     * A collection of shared memory pools containing operand data for both the
+     * inputs and the outputs to a model.
+     */
     vec<memory> pools;
 };
 
-enum ErrorStatus : uint32_t {
+/**
+ * Return status of a function.
+ */
+enum ErrorStatus : int32_t {
     NONE,
     DEVICE_UNAVAILABLE,
     GENERAL_FAILURE,
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 2f557f8..db90ac2 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -33,6 +33,7 @@
 
 namespace generated_tests {
 using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
+using ::generated_tests::filter;
 using ::generated_tests::for_all;
 using ::generated_tests::for_each;
 using ::generated_tests::resize_accordingly;
@@ -44,6 +45,7 @@
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
 void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+             std::function<bool(int)> is_ignored,
              const std::vector<MixedTypedExampleType>& examples) {
     Model model = create_model();
     sp<IPreparedModel> preparedModel;
@@ -166,18 +168,28 @@
         COPY_BACK(uint8_t);
 #undef COPY_BACK
         outputMemory->commit();
+        // Filter out don't cares
+        MixedTyped filtered_golden;
+        MixedTyped filtered_test;
+        filter<float>(golden, &filtered_golden, is_ignored);
+        filter<float>(test, &filtered_test, is_ignored);
+        filter<int32_t>(golden, &filtered_golden, is_ignored);
+        filter<int32_t>(test, &filtered_test, is_ignored);
+        filter<uint8_t>(golden, &filtered_golden, is_ignored);
+        filter<uint8_t>(test, &filtered_test, is_ignored);
+
         // We want "close-enough" results for float
-        for_each<float>(golden, [&test](int index, auto& golden_float) {
-            auto& test_float_operands = std::get<Float32Operands>(test);
+        for_each<float>(filtered_golden, [&filtered_test](int index, auto& golden_float) {
+            auto& test_float_operands = std::get<Float32Operands>(filtered_test);
             auto& test_float = test_float_operands[index];
             for (unsigned int i = 0; i < golden_float.size(); i++) {
                 SCOPED_TRACE(i);
-                EXPECT_FLOAT_EQ(golden_float[i], test_float[i]);
+                EXPECT_NEAR(golden_float[i], test_float[i], 1.e-5);
             }
         });
-
-        EXPECT_EQ(std::get<Int32Operands>(golden), std::get<Int32Operands>(test));
-        EXPECT_EQ(std::get<Quant8Operands>(golden), std::get<Quant8Operands>(test));
+        EXPECT_EQ(std::get<Int32Operands>(filtered_golden), std::get<Int32Operands>(filtered_test));
+        EXPECT_EQ(std::get<Quant8Operands>(filtered_golden),
+                  std::get<Quant8Operands>(filtered_test));
     }
 }
 
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 5a20f44..453e3e5 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -34,7 +34,7 @@
 using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
 using ::generated_tests::MixedTypedExampleType;
 namespace generated_tests {
-extern void Execute(const sp<IDevice>&, std::function<Model(void)>,
+extern void Execute(const sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>,
                     const std::vector<MixedTypedExampleType>&);
 }
 
@@ -83,9 +83,6 @@
             EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
             EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
             EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
-            EXPECT_LT(0.0f, capabilities.bootupTime);
-            EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
             EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
             EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
             EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
diff --git a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
index 059e5db..6e78082 100644
--- a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
+++ b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
@@ -1,5 +1,5 @@
 service sensors-hal-1-0 /vendor/bin/hw/android.hardware.sensors@1.0-service
     class hal
     user system
-    group system
-    capabilities SYS_NICE
+    group system wakelock
+    capabilities BLOCK_SUSPEND SYS_NICE
diff --git a/tests/bar/1.0/Android.bp b/tests/bar/1.0/Android.bp
index 44ae7a5..b6ee042 100644
--- a/tests/bar/1.0/Android.bp
+++ b/tests/bar/1.0/Android.bp
@@ -67,16 +67,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.bar@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.bar@1.0_genc++"],
     generated_headers: ["android.hardware.tests.bar@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.bar@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/baz/1.0/Android.bp b/tests/baz/1.0/Android.bp
index 7fa8b27..ef68149 100644
--- a/tests/baz/1.0/Android.bp
+++ b/tests/baz/1.0/Android.bp
@@ -60,16 +60,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.baz@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.baz@1.0_genc++"],
     generated_headers: ["android.hardware.tests.baz@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.baz@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/expression/1.0/Android.bp b/tests/expression/1.0/Android.bp
index 0ea0acf..bc389b0 100644
--- a/tests/expression/1.0/Android.bp
+++ b/tests/expression/1.0/Android.bp
@@ -42,16 +42,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.expression@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.expression@1.0_genc++"],
     generated_headers: ["android.hardware.tests.expression@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.expression@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/extension/light/2.0/Android.bp b/tests/extension/light/2.0/Android.bp
index e8a5017..c5987a7 100644
--- a/tests/extension/light/2.0/Android.bp
+++ b/tests/extension/light/2.0/Android.bp
@@ -39,16 +39,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.extension.light@2.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.extension.light@2.0_genc++"],
     generated_headers: ["android.hardware.tests.extension.light@2.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.extension.light@2.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/foo/1.0/Android.bp b/tests/foo/1.0/Android.bp
index b5de12e..6387950 100644
--- a/tests/foo/1.0/Android.bp
+++ b/tests/foo/1.0/Android.bp
@@ -67,16 +67,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.foo@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.foo@1.0_genc++"],
     generated_headers: ["android.hardware.tests.foo@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.foo@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/hash/1.0/Android.bp b/tests/hash/1.0/Android.bp
index d4d1d1a..505a4ad 100644
--- a/tests/hash/1.0/Android.bp
+++ b/tests/hash/1.0/Android.bp
@@ -35,16 +35,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.hash@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.hash@1.0_genc++"],
     generated_headers: ["android.hardware.tests.hash@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.hash@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/inheritance/1.0/Android.bp b/tests/inheritance/1.0/Android.bp
index 93a8ad5..5d8d53d 100644
--- a/tests/inheritance/1.0/Android.bp
+++ b/tests/inheritance/1.0/Android.bp
@@ -56,16 +56,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.inheritance@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.inheritance@1.0_genc++"],
     generated_headers: ["android.hardware.tests.inheritance@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.inheritance@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/libhwbinder/1.0/Android.bp b/tests/libhwbinder/1.0/Android.bp
index 4f3beb0..338a72b 100644
--- a/tests/libhwbinder/1.0/Android.bp
+++ b/tests/libhwbinder/1.0/Android.bp
@@ -42,16 +42,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.libhwbinder@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.libhwbinder@1.0_genc++"],
     generated_headers: ["android.hardware.tests.libhwbinder@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.libhwbinder@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/memory/1.0/Android.bp b/tests/memory/1.0/Android.bp
index d39ba28..a753824 100644
--- a/tests/memory/1.0/Android.bp
+++ b/tests/memory/1.0/Android.bp
@@ -35,16 +35,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.memory@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.memory@1.0_genc++"],
     generated_headers: ["android.hardware.tests.memory@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.memory@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/msgq/1.0/Android.bp b/tests/msgq/1.0/Android.bp
index 7758ee8..0937545 100644
--- a/tests/msgq/1.0/Android.bp
+++ b/tests/msgq/1.0/Android.bp
@@ -42,16 +42,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.msgq@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.msgq@1.0_genc++"],
     generated_headers: ["android.hardware.tests.msgq@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.msgq@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/multithread/1.0/Android.bp b/tests/multithread/1.0/Android.bp
index 76ad2c1..5f4c44c 100644
--- a/tests/multithread/1.0/Android.bp
+++ b/tests/multithread/1.0/Android.bp
@@ -35,16 +35,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.multithread@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.multithread@1.0_genc++"],
     generated_headers: ["android.hardware.tests.multithread@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.multithread@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/tests/pointer/1.0/Android.bp b/tests/pointer/1.0/Android.bp
index 178f165..a765ae7 100644
--- a/tests/pointer/1.0/Android.bp
+++ b/tests/pointer/1.0/Android.bp
@@ -42,16 +42,13 @@
     ],
 }
 
-cc_test_library {
+cc_library {
     name: "android.hardware.tests.pointer@1.0",
     defaults: ["hidl-module-defaults"],
     generated_sources: ["android.hardware.tests.pointer@1.0_genc++"],
     generated_headers: ["android.hardware.tests.pointer@1.0_genc++_headers"],
     export_generated_headers: ["android.hardware.tests.pointer@1.0_genc++_headers"],
     vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libhidlbase",
         "libhidltransport",
diff --git a/wifi/1.0/README-NAN.md b/wifi/1.0/README-NAN.md
new file mode 100644
index 0000000..f4b3320
--- /dev/null
+++ b/wifi/1.0/README-NAN.md
@@ -0,0 +1,221 @@
+Copyright 2017 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+# Wi-Fi Aware (NAN) HAL API Usage
+
+The Wi-Fi Aware (NAN) HAL API is defined in (<i>hardware/interfaces/wifi/\<version\>/</i>):
+
+* IWifiNanIface.hal
+* IWifiNanIfaceEventCallback.hal
+* types.hal (structure definitions)
+
+The Wi-Fi Aware (NAN) HAL API surface is very large - only a subset is used from the framework.
+
+Understanding of the HAL API subset which is actively used by the Android framework can be deduced
+by reviewing framework code, specifically (<i>frameworks/opt/net/wif/</i>):
+
+* WifiAwareNativeApi.java
+* WifiAwareNativeCallback.java
+
+The above framework files determine the API usage - and should be consulted as the authoritative
+reference. Please consult the primary HAL file for documentation - they will not be replicated
+in this document. APIs which are in the HAL but are not listed in this README file are not used by
+the framework.
+
+Note: the HAL API is translated to the legacy HAL API (<i>wifi_nan.h</i>). This README file covers
+the new HAL API only. To understand the mapping between new and legacy HALs please consult
+<i>hardware/interfaces/wifi/\<version\>/default/hidl_struct_util.cpp</i>.
+
+## IWifiNanIface
+
+Format:
+* Hard-coded values are in <b>bold</b>, e.g. <b>true</b> or <b>5</b>
+* Assigned but not fixed value are specified using the <i>variable</i> keyword, possibly with some
+details/constraints
+* Unassigned values are specified using the <i>N/A</i> keyword. Unassigned usually means initialized
+to 0.
+
+APIs:
+
+* registerEventCallback(IWifiNanIfaceEventCallback callback)
+* getCapabilitiesRequest
+* enableRequest
+  * NanEnableRequest
+    * bool[2] operateInBand
+        * Index [NanBandIndex.NAN_BAND_24GHZ] = <b>true</b>
+        * Index [NanBandIndex.NAN_BAND_5GHZ] = <i>variable</i>
+    * uint8_t hopCountMax = <b>2</b>
+    * NanConfigRequest configParams
+        * uint8_t masterPref = <i>variable</i>
+        * bool disableDiscoveryAddressChangeIndication = <i>variable</i>
+        * bool disableStartedClusterIndication = <i>variable</i>
+        * bool disableJoinedClusterIndication = <i>variable</i>
+        * bool includePublishServiceIdsInBeacon = <b>true</b>
+        * uint8_t numberOfPublishServiceIdsInBeacon = <b>0</b>
+        * bool includeSubscribeServiceIdsInBeacon = <b>true</b>
+        * uint8_t numberOfSubscribeServiceIdsInBeacon = <b>0</b>
+        * uint16_t rssiWindowSize = <b>8</b>
+        * uint32_t macAddressRandomizationIntervalSec = <i>variable</i>
+            * Normal run-time: set to <b>1800</b> (30 minutes)
+            * Tests: set to <b>120</b> (2 minutes)
+        * NanBandSpecificConfig[2] bandSpecificConfig
+            * Index [NanBandIndex.NAN_BAND_24GHZ]
+                * uint8_t rssiClose = <b>60</b>
+                * uint8_t rssiMiddle = <b>70</b>
+                * uint8_t rssiCloseProximity = <b>60</b>
+                * uint8_t dwellTimeMs = <b>200</b>
+                * uint16_t scanPeriodSec = <b>20</b>
+                * bool validDiscoveryWindowIntervalVal = <i>variable</i>
+                * uint8_t discoveryWindowIntervalVal = <i>variable</i>
+            * Index [NanBandIndex.NAN_BAND_5GHZ]
+                * uint8_t rssiClose = <b>60</b>
+                * uint8_t rssiMiddle = <b>75</b>
+                * uint8_t rssiCloseProximity = <b>60</b>
+                * uint8_t dwellTimeMs = <b>200</b>
+                * uint16_t scanPeriodSec = <b>20</b>
+                * bool validDiscoveryWindowIntervalVal = <i>variable</i>
+                * uint8_t discoveryWindowIntervalVal = <i>variable</i>
+    * NanDebugConfig debugConfigs
+        * bool validClusterIdVals = <b>true</b>
+        * uint16_t clusterIdBottomRangeVal = <i>variable</i>
+        * uint16_t clusterIdTopRangeVal = <i>variable</i>
+        * bool validIntfAddrVal = <b>false</b>
+        * MacAddress intfAddrVal = <i>N/A</i>
+        * bool validOuiVal = <b>false</b>
+        * uint32_t ouiVal = <i>N/A</i>
+        * bool validRandomFactorForceVal = <b>false</b>
+        * uint8_t randomFactorForceVal = <i>N/A</i>
+        * bool validHopCountForceVal = <b>false</b>
+        * uint8_t hopCountForceVal = <i>N/A</i>
+        * bool validDiscoveryChannelVal = <b>false</b>
+        * WifiChannelInMhz[2] discoveryChannelMhzVal = <i>N/A</i>
+        * bool validUseBeaconsInBandVal = <b>false</b>
+        * bool[2] useBeaconsInBandVal = <i>N/A</i>
+        * bool validUseSdfInBandVal = <b>false</b>
+        * bool[2] useSdfInBandVal = <i>N/A</i>
+* configRequest
+    * NanConfigRequest: same as for <i>enableRequest</i>
+* disableRequest
+* startPublishRequest
+    * NanPublishRequest
+        * NanDiscoveryCommonConfig baseConfigs
+            * uint8_t sessionId = <i>variable</i>
+            * uint16_t ttlSec = <i>variable</i>
+            * uint16_t discoveryWindowPeriod = <b>1</b>
+            * uint8_t discoveryCount = <b>0</b>
+            * vec<uint8_t> serviceName = <i>variable</i>
+            * NanMatchAlg discoveryMatchIndicator = <b>NanMatchAlg.MATCH_NEVER</b>
+            * vec<uint8_t> serviceSpecificInfo = <i>variable</i>
+            * vec<uint8_t> extendedServiceSpecificInfo = <i>N/A</i>
+            * vec<uint8_t> rxMatchFilter = <i>variable</i>
+            * vec<uint8_t> txMatchFilter = <i>variable</i>
+            * bool useRssiThreshold = <b>false</b>
+            * bool disableDiscoveryTerminationIndication = <i>variable</i>
+            * bool disableMatchExpirationIndication = <b>true</b>
+            * bool disableFollowupReceivedIndication = <b>false</b>
+            * NanDataPathSecurityConfig securityConfig = <b>NanDataPathSecurityType.OPEN</b>
+            * bool rangingRequired = <b>false</b>
+            * uint32_t rangingIntervalMsec = <i>N/A</i>
+            * bitfield<NanRangingIndication> configRangingIndications = <i>N/A</i>
+            * uint16_t distanceIngressCm = <i>N/A</i>
+            * uint16_t distanceEgressCm = <i>N/A</i>
+        * NanPublishType publishType = <i>variable</i>
+        * NanTxType txType = <b>NanTxType.BROADCAST</b>
+        * bool autoAcceptDataPathRequests = <b>false</b>
+* stopPublishRequest
+* startSubscribeRequest
+    * NanSubscribeRequest
+        * NanDiscoveryCommonConfig baseConfigs
+            * Mostly same as <i>publish</i> above except:
+            * NanMatchAlg discoveryMatchIndicator = <b>NanMatchAlg.MATCH_ONCE</b>
+        * NanSubscribeType subscribeType = <i>variable</i>
+        * NanSrfType srfType = <i>N/A</i>
+        * bool srfRespondIfInAddressSet = <i>N/A</i>
+        * bool shouldUseSrf = <i>N/A</i>
+        * bool isSsiRequiredForMatch = <i>N/A</i>
+        * vec<MacAddress> intfAddr = <i>N/A</i>
+* stopSubscribeRequest
+* transmitFollowupRequest
+    * NanTransmitFollowupRequest
+        * uint8_t discoverySessionId = <i>variable</i>
+        * uint32_t peerId = <i>variable</i>
+        * MacAddress addr = <i>variable</i>
+        * bool isHighPriority = <b>false</b>
+        * bool shouldUseDiscoveryWindow = <b>true</b>
+        * vec<uint8_t> serviceSpecificInfo = <i>variable</i>
+        * vec<uint8_t> extendedServiceSpecificInfo = <i>N/A</i>
+        * bool disableFollowupResultIndication = <b>false</b>
+* createDataInterfaceRequest
+* deleteDataInterfaceRequest
+* initiateDataPathRequest
+    * NanInitiateDataPathRequest
+        * uint32_t peerId = <i>variable</i>
+        * MacAddress peerDiscMacAddr = <i>variable</i>
+        * NanDataPathChannelCfg channelRequestType =
+        <i>NanDataPathChannelCfg.CHANNEL_NOT_REQUESTED</i>
+        * WifiChannelInMhz channel = <b>2437</b> (note that should be ignored though -
+        CHANNEL_NOT_REQUESTED!)
+        * string ifaceName = <i>variable</i>
+        * NanDataPathSecurityConfig securityConfig = <i>variable</i>
+        * vec<uint8_t> appInfo = <i>N/A</i>
+        * vec<uint8_t> serviceNameOutOfBand = <i>variable</i>
+* respondToDataPathIndicationRequest
+    * NanRespondToDataPathIndicationRequest
+        * bool acceptRequest = <i>variable</i>
+        * uint32_t ndpInstanceId = <i>variable</i>
+        * string ifaceName = <i>variable</i>
+        * NanDataPathSecurityConfig securityConfig = <i>variable</i>
+        * vec<uint8_t> appInfo = <i>N/A</i>
+        * vec<uint8_t> serviceNameOutOfBand = <i>variable</i>
+* terminateDataPathRequest
+
+## IWifiNanIfaceEventCallback
+
+Format:
+* Parameters whose values are <i>ignored</i> will be flagged, otherwise the parameter value is used
+by the framework.
+
+API:
+
+* notifyXxxResponse: all callbacks are used by framework
+* eventClusterEvent
+* eventDisabled
+* eventPublishTerminated
+* eventSubscribeTerminated
+* eventMatch
+    * NanMatchInd (all parameters are used except those listed below)
+        * vec<uint8_t> extendedServiceSpecificInfo: <i>ignored</i>
+        * bool matchOccuredInBeaconFlag: <i>ignored</i>
+        * bool outOfResourceFlag: <i>ignored</i>
+        * uint8_t rssiValue: <i>ignored</i>
+        * NanCipherSuiteType peerCipherType: <i>ignored</i>
+        * bool peerRequiresSecurityEnabledInNdp: <i>ignored</i>
+        * bool peerRequiresRanging: <i>ignored</i>
+        * uint32_t rangingMeasurementInCm: <i>ignored</i>
+        * bitfield<NanRangingIndication> rangingIndicationType: <i>ignored</i>
+* eventMatchExpired: <i>ignored</i>
+* eventFollowupReceived
+    * NanFollowupReceivedInd (all parameters are used except those listed below)
+        * bool receivedInFaw: <i>ignored</i>
+        * vec<uint8_t> extendedServiceSpecificInfo: <i>ignored</i>
+* eventTransmitFollowup
+* eventDataPathRequest
+    * NanDataPathRequestInd (all parameters are used except those listed below)
+        * bool securityRequired: <i>ignored</i>
+        * vec<uint8_t> appInfo: <i>ignored</i>
+* eventDataPathConfirm
+    * NanDataPathConfirmInd (all parameters are used except those listed below)
+        * vec<uint8_t> appInfo: <i>ignored</i>
+* eventDataPathTerminated
+