Merge "Add FusedActivationFunc enum and renamed certain operations." into oc-mr1-dev
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 61af70b..5fe1513 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -44,38 +44,43 @@
enum OperationType : uint32_t {
OEM_OPERATION = 0,
ADD = 1,
- AVERAGE_POOL = 2,
- CAST = 3,
- CONCATENATION = 4,
- CONV = 5,
- DEPTHWISE_CONV = 6,
- DEPTH_TO_SPACE = 7,
- DEQUANTIZE = 8,
- EMBEDDING_LOOKUP = 9,
- FAKE_QUANT = 10,
- FLOOR = 11,
- FULLY_CONNECTED = 12,
- GATHER = 13,
- HASHTABLE_LOOKUP = 14,
- L2_NORMALIZATION = 15,
- L2_POOL = 16,
- LOCAL_RESPONSE_NORMALIZATION = 17,
- LOGISTIC = 18,
- LSH_PROJECTION = 19,
- LSTM = 20,
- MAX_POOL = 21,
- MUL = 22,
- RELU = 23,
- RELU1 = 24,
- RELU6 = 25,
- RESHAPE = 26,
- RESIZE_BILINEAR = 27,
- RNN = 28,
- SOFTMAX = 29,
- SPACE_TO_DEPTH = 30,
- SPLIT = 31,
- SVDF = 32,
- TANH = 33,
+ AVERAGE_POOL_2D = 2,
+ CONCATENATION = 3,
+ CONV_2D = 4,
+ DEPTHWISE_CONV_2D = 5,
+ DEPTH_TO_SPACE = 6,
+ DEQUANTIZE = 7,
+ EMBEDDING_LOOKUP = 8,
+ FAKE_QUANT = 9,
+ FLOOR = 10,
+ FULLY_CONNECTED = 11,
+ HASHTABLE_LOOKUP = 12,
+ L2_NORMALIZATION = 13,
+ L2_POOL_2D = 14,
+ LOCAL_RESPONSE_NORMALIZATION = 15,
+ LOGISTIC = 16,
+ LSH_PROJECTION = 17,
+ LSTM = 18,
+ MAX_POOL_2D = 19,
+ MUL = 20,
+ RELU = 21,
+ RELU1 = 22,
+ RELU6 = 23,
+ RESHAPE = 24,
+ RESIZE_BILINEAR = 25,
+ RNN = 26,
+ SOFTMAX = 27,
+ SPACE_TO_DEPTH = 28,
+ SVDF = 29,
+ TANH = 30,
+};
+
+// Fused activation functions
+enum FusedActivationFunc : int32_t {
+ NONE = 0,
+ RELU = 1,
+ RELU1 = 2,
+ RELU6 = 3,
};
// Two special values that can be used instead of a regular poolIndex.
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 6655ad3..bb9f942 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -150,7 +150,7 @@
std::vector<uint8_t> operandValues(
reinterpret_cast<const uint8_t*>(operand2Data.data()),
reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
- int32_t activation[1] = {0};
+ int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
reinterpret_cast<const uint8_t*>(&activation[1]));
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
index fdd3b0b..9c56e6a 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
@@ -31,6 +31,7 @@
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
using ::android::hardware::neuralnetworks::V1_0::Capabilities;
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
+using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
using ::android::hardware::neuralnetworks::V1_0::Model;
using ::android::hardware::neuralnetworks::V1_0::OperationType;
using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;