Merge "Increase thread pool for VHAL 2.1" into oc-mr1-dev
diff --git a/drm/1.0/vts/functional/drm_hal_clearkey_test.cpp b/drm/1.0/vts/functional/drm_hal_clearkey_test.cpp
index 5564513..3966eac 100644
--- a/drm/1.0/vts/functional/drm_hal_clearkey_test.cpp
+++ b/drm/1.0/vts/functional/drm_hal_clearkey_test.cpp
@@ -76,9 +76,12 @@
#define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
#define EXPECT_OK(ret) EXPECT_TRUE(ret.isOk())
-static const uint8_t kClearKeyUUID[16] = {
- 0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02,
- 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B};
+static const uint8_t kCommonPsshBoxUUID[16] = {0x10, 0x77, 0xEF, 0xEC, 0xC0, 0xB2, 0x4D, 0x02,
+ 0xAC, 0xE3, 0x3C, 0x1E, 0x52, 0xE2, 0xFB, 0x4B};
+
+// To be used in mpd to specify drm scheme for players
+static const uint8_t kClearKeyUUID[16] = {0xE2, 0x71, 0x9D, 0x58, 0xA9, 0x85, 0xB3, 0xC9,
+ 0x78, 0x1A, 0xB0, 0x30, 0xAF, 0x78, 0xD3, 0x0E};
static const uint8_t kInvalidUUID[16] = {
0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80,
@@ -111,6 +114,9 @@
* Ensure the factory supports the clearkey scheme UUID
*/
TEST_F(DrmHalClearkeyFactoryTest, ClearKeyPluginSupported) {
+ EXPECT_TRUE(drmFactory->isCryptoSchemeSupported(kCommonPsshBoxUUID));
+ EXPECT_TRUE(cryptoFactory->isCryptoSchemeSupported(kCommonPsshBoxUUID));
+
EXPECT_TRUE(drmFactory->isCryptoSchemeSupported(kClearKeyUUID));
EXPECT_TRUE(cryptoFactory->isCryptoSchemeSupported(kClearKeyUUID));
}
diff --git a/drm/1.0/vts/functional/drm_hal_vendor_test.cpp b/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
index 61f3014..47c6950 100644
--- a/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
+++ b/drm/1.0/vts/functional/drm_hal_vendor_test.cpp
@@ -1595,9 +1595,8 @@
#endif
gVendorModules = new drm_vts::VendorModules(kModulePath);
if (gVendorModules->getPathList().size() == 0) {
- std::cerr << "No vendor modules found in " << kModulePath <<
- ", exiting" << std::endl;
- exit(-1);
+ std::cerr << "WARNING: No vendor modules found in " << kModulePath <<
+ ", all vendor tests will be skipped" << std::endl;
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index ccc17f1..0adebb8 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -32,7 +32,7 @@
UINT32 = 7,
TENSOR_FLOAT16 = 8,
TENSOR_FLOAT32 = 9,
- TENSOR_SYMMETRICAL_QUANT8 = 10,
+ TENSOR_QUANT8_ASYMM = 10,
};
// The type of operations. Unlike the operation types found in
@@ -41,39 +41,39 @@
// TODO: Currently they are the same. Add a conversion when finalizing the model.
// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
enum OperationType : uint32_t {
- AVERAGE_POOL_FLOAT32 = 0,
- CONCATENATION_FLOAT32 = 1,
- CONV_FLOAT32 = 2,
- DEPTHWISE_CONV_FLOAT32 = 3,
- MAX_POOL_FLOAT32 = 4,
- L2_POOL_FLOAT32 = 5,
- DEPTH_TO_SPACE_FLOAT32 = 6,
- SPACE_TO_DEPTH_FLOAT32 = 7,
- LOCAL_RESPONSE_NORMALIZATION_FLOAT32 = 8,
- SOFTMAX_FLOAT32 = 9,
- RESHAPE_FLOAT32 = 10,
- SPLIT_FLOAT32 = 11,
- FAKE_QUANT_FLOAT32 = 12,
- ADD_FLOAT32 = 13,
- FULLY_CONNECTED_FLOAT32 = 14,
- CAST_FLOAT32 = 15,
- MUL_FLOAT32 = 16,
- L2_NORMALIZATION_FLOAT32 = 17,
- LOGISTIC_FLOAT32 = 18,
- RELU_FLOAT32 = 19,
- RELU6_FLOAT32 = 20,
- RELU1_FLOAT32 = 21,
- TANH_FLOAT32 = 22,
- DEQUANTIZE_FLOAT32 = 23,
- FLOOR_FLOAT32 = 24,
- GATHER_FLOAT32 = 25,
- RESIZE_BILINEAR_FLOAT32 = 26,
- LSH_PROJECTION_FLOAT32 = 27,
- LSTM_FLOAT32 = 28,
- SVDF_FLOAT32 = 29,
- RNN_FLOAT32 = 30,
- N_GRAM_FLOAT32 = 31,
- LOOKUP_FLOAT32 = 32,
+ AVERAGE_POOL = 0,
+ CONCATENATION = 1,
+ CONV = 2,
+ DEPTHWISE_CONV = 3,
+ MAX_POOL = 4,
+ L2_POOL = 5,
+ DEPTH_TO_SPACE = 6,
+ SPACE_TO_DEPTH = 7,
+ LOCAL_RESPONSE_NORMALIZATION = 8,
+ SOFTMAX = 9,
+ RESHAPE = 10,
+ SPLIT = 11,
+ FAKE_QUANT = 12,
+ ADD = 13,
+ FULLY_CONNECTED = 14,
+ CAST = 15,
+ MUL = 16,
+ L2_NORMALIZATION = 17,
+ LOGISTIC = 18,
+ RELU = 19,
+ RELU6 = 20,
+ RELU1 = 21,
+ TANH = 22,
+ DEQUANTIZE = 23,
+ FLOOR = 24,
+ GATHER = 25,
+ RESIZE_BILINEAR = 26,
+ LSH_PROJECTION = 27,
+ LSTM = 28,
+ SVDF = 29,
+ RNN = 30,
+ N_GRAM = 31,
+ LOOKUP = 32,
};
// Two special values that can be used instead of a regular poolIndex.
@@ -102,9 +102,16 @@
float powerUsage; // in picoJoules
};
+struct OperationTuple {
+ // The type of operation.
+ OperationType operationType;
+ // The input data type of operation.
+ OperandType operandType;
+};
+
// The capabilities of a driver.
struct Capabilities {
- vec<OperationType> supportedOperationTypes;
+ vec<OperationTuple> supportedOperationTuples;
// TODO Do the same for baseline model IDs
bool cachesCompilation;
// TODO revisit the data types and scales.
@@ -142,8 +149,8 @@
// Describes one operation of the graph.
struct Operation {
- // The type of operation.
- OperationType type;
+ // The tuple describing the operation type and input type.
+ OperationTuple opTuple;
// Describes the table that contains the indexes of the inputs of the
// operation. The offset is the index in the operandIndexes table.
vec<uint32_t> inputs;
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 9fa694d..5e6b1bd 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -66,8 +66,8 @@
// initialization
TEST_F(NeuralnetworksHidlTest, InitializeTest) {
Return<void> ret = device->initialize([](const Capabilities& capabilities) {
- EXPECT_NE(nullptr, capabilities.supportedOperationTypes.data());
- EXPECT_NE(0ull, capabilities.supportedOperationTypes.size());
+ EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
+ EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
EXPECT_LT(0.0f, capabilities.bootupTime);
EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
@@ -92,7 +92,7 @@
const std::vector<Operand> operands = {
{
- .type = OperandType::FLOAT32,
+ .type = OperandType::TENSOR_FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 1,
.scale = 0.0f,
@@ -102,7 +102,7 @@
.length = 0},
},
{
- .type = OperandType::FLOAT32,
+ .type = OperandType::TENSOR_FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 1,
.scale = 0.0f,
@@ -112,7 +112,7 @@
.length = size},
},
{
- .type = OperandType::FLOAT32,
+ .type = OperandType::TENSOR_FLOAT32,
.dimensions = {1, 2, 2, 1},
.numberOfConsumers = 0,
.scale = 0.0f,
@@ -124,7 +124,9 @@
};
const std::vector<Operation> operations = {{
- .type = OperationType::ADD_FLOAT32, .inputs = {operand1, operand2}, .outputs = {operand3},
+ .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
+ .inputs = {operand1, operand2},
+ .outputs = {operand3},
}};
const std::vector<uint32_t> inputIndexes = {operand1};
diff --git a/renderscript/1.0/default/Device.cpp b/renderscript/1.0/default/Device.cpp
index 3aae060..4831a8b 100644
--- a/renderscript/1.0/default/Device.cpp
+++ b/renderscript/1.0/default/Device.cpp
@@ -1,6 +1,9 @@
#include "Context.h"
#include "Device.h"
+#include <android/dlext.h>
+#include <dlfcn.h>
+
namespace android {
namespace hardware {
namespace renderscript {
@@ -39,7 +42,25 @@
static_assert(sizeof(size_t) <= sizeof(uint64_t), "RenderScript HIDL Error: sizeof(size_t) > sizeof(uint64_t)");
const char* filename = "libRS_internal.so";
- void* handle = dlopen(filename, RTLD_LAZY | RTLD_LOCAL);
+ // Try to load libRS_internal.so from the "rs" namespace directly.
+ typedef struct android_namespace_t* (*GetExportedNamespaceFnPtr)(const char*);
+ GetExportedNamespaceFnPtr getExportedNamespace =
+ (GetExportedNamespaceFnPtr)dlsym(RTLD_DEFAULT, "android_get_exported_namespace");
+ void* handle = nullptr;
+ if (getExportedNamespace != nullptr) {
+ android_namespace_t* rs_namespace = getExportedNamespace("rs");
+ if (rs_namespace != nullptr) {
+ const android_dlextinfo dlextinfo = {
+ .flags = ANDROID_DLEXT_USE_NAMESPACE, .library_namespace = rs_namespace,
+ };
+ handle = android_dlopen_ext(filename, RTLD_LAZY | RTLD_LOCAL, &dlextinfo);
+ }
+ }
+ if (handle == nullptr) {
+ // if there is no "rs" namespace (in case when this HAL impl is loaded
+ // into a vendor process), then use the plain dlopen.
+ handle = dlopen(filename, RTLD_LAZY | RTLD_LOCAL);
+ }
dispatchTable dispatchHal = {
.SetNativeLibDir = (SetNativeLibDirFnPtr) nullptr,