Merge "Update documentation of gnssRequestLocationCb"
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index 7938b73..c1c511f 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -30,6 +30,7 @@
//
// Some handy constants to avoid conversions from enum to int.
constexpr int ABS_ACTIVE = (int)VehicleProperty::ABS_ACTIVE;
+constexpr int AP_POWER_STATE = (int)VehicleProperty::AP_POWER_STATE;
constexpr int OBD2_LIVE_FRAME = (int)VehicleProperty::OBD2_LIVE_FRAME;
constexpr int OBD2_FREEZE_FRAME = (int)VehicleProperty::OBD2_FREEZE_FRAME;
constexpr int OBD2_FREEZE_FRAME_INFO = (int)VehicleProperty::OBD2_FREEZE_FRAME_INFO;
@@ -342,12 +343,6 @@
},
.initialValue = {.int32Values = {toInt(VehicleGear::GEAR_PARK)}}},
- {.config = {.prop = toInt(VehicleProperty::DISPLAY_BRIGHTNESS),
- .access = VehiclePropertyAccess::READ_WRITE,
- .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
- .areaConfigs = {VehicleAreaConfig{.minInt32Value = 0, .maxInt32Value = 10}}},
- .initialValue = {.int32Values = {7}}},
-
{.config =
{
.prop = toInt(VehicleProperty::IGNITION_STATE),
@@ -420,6 +415,23 @@
},
},
+ {.config = {.prop = toInt(VehicleProperty::AP_POWER_STATE),
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {3}},
+ .initialValue = {.int32Values = {toInt(VehicleApPowerState::ON_FULL), 0}}},
+
+ {.config = {.prop = toInt(VehicleProperty::DISPLAY_BRIGHTNESS),
+ .access = VehiclePropertyAccess::READ_WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .areaConfigs = {VehicleAreaConfig{.minInt32Value = 0, .maxInt32Value = 100}}},
+ .initialValue = {.int32Values = {100}}},
+
+ {.config = {.prop = toInt(VehicleProperty::AP_POWER_BOOTUP_REASON),
+ .access = VehiclePropertyAccess::READ,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE},
+ .initialValue = {.int32Values = {toInt(VehicleApPowerBootupReason::USER_POWER_ON)}}},
+
{
.config = {.prop = OBD2_LIVE_FRAME,
.access = VehiclePropertyAccess::READ,
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
index 5118b18..764bebd 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
@@ -146,12 +146,23 @@
&& hvacPowerOn->value.int32Values[0] == 0) {
return StatusCode::NOT_AVAILABLE;
}
- } else if (propValue.prop == OBD2_FREEZE_FRAME_CLEAR) {
- return clearObd2FreezeFrames(propValue);
- } else if (propValue.prop == VEHICLE_MAP_SERVICE) {
- // Placeholder for future implementation of VMS property in the default hal. For now, just
- // returns OK; otherwise, hal clients crash with property not supported.
- return StatusCode::OK;
+ } else {
+ // Handle property specific code
+ switch (propValue.prop) {
+ case OBD2_FREEZE_FRAME_CLEAR:
+ return clearObd2FreezeFrames(propValue);
+ case VEHICLE_MAP_SERVICE:
+ // Placeholder for future implementation of VMS property in the default hal. For
+ // now, just returns OK; otherwise, hal clients crash with property not supported.
+ return StatusCode::OK;
+ case AP_POWER_STATE:
+ // This property has different behavior between get/set. When it is set, the value
+ // goes to the vehicle but is NOT updated in the property store back to Android.
+ // Commented out for now, because it may mess up automated testing that use the
+ // emulator interface.
+ // getEmulatorOrDie()->doSetValueFromClient(propValue);
+ return StatusCode::OK;
+ }
}
if (!mPropStore->writeValue(propValue)) {
diff --git a/boot/1.0/vts/functional/VtsHalBootV1_0TargetTest.cpp b/boot/1.0/vts/functional/VtsHalBootV1_0TargetTest.cpp
index d1d7f73..2f2052c 100644
--- a/boot/1.0/vts/functional/VtsHalBootV1_0TargetTest.cpp
+++ b/boot/1.0/vts/functional/VtsHalBootV1_0TargetTest.cpp
@@ -24,6 +24,8 @@
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
+#include <unordered_set>
+
using ::android::hardware::boot::V1_0::IBootControl;
using ::android::hardware::boot::V1_0::CommandResult;
using ::android::hardware::boot::V1_0::BoolResult;
@@ -32,6 +34,7 @@
using ::android::hardware::Return;
using ::android::sp;
using std::string;
+using std::unordered_set;
using std::vector;
// Test environment for Boot HIDL HAL.
@@ -168,14 +171,18 @@
// Sanity check Boot::getSuffix() on good and bad inputs.
TEST_F(BootHidlTest, GetSuffix) {
string suffixStr;
- vector<string> correctSuffixes = {"_a", "_b"};
+ unordered_set<string> suffixes;
auto cb = [&](hidl_string suffix) { suffixStr = suffix.c_str(); };
- for (Slot i = 0; i < 2; i++) {
+ for (Slot i = 0; i < boot->getNumberSlots(); i++) {
CommandResult cr;
Return<void> result = boot->getSuffix(i, cb);
EXPECT_TRUE(result.isOk());
- ASSERT_EQ(0, suffixStr.compare(correctSuffixes[i]));
+ ASSERT_EQ('_', suffixStr[0]);
+ ASSERT_LE((unsigned)2, suffixStr.size());
+ suffixes.insert(suffixStr);
}
+ // All suffixes should be unique
+ ASSERT_EQ(boot->getNumberSlots(), suffixes.size());
{
string emptySuffix = "";
Return<void> result = boot->getSuffix(boot->getNumberSlots(), cb);
diff --git a/camera/provider/2.4/vts/functional/Android.bp b/camera/provider/2.4/vts/functional/Android.bp
index 7bc4253..08b9222 100644
--- a/camera/provider/2.4/vts/functional/Android.bp
+++ b/camera/provider/2.4/vts/functional/Android.bp
@@ -23,6 +23,7 @@
shared_libs: [
"libbinder",
"libcamera_metadata",
+ "libcutils",
"libfmq",
"libgui",
"libui",
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index e78dbe8..abd875a 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -35,6 +35,7 @@
#include <binder/MemoryHeapBase.h>
#include <CameraMetadata.h>
#include <CameraParameters.h>
+#include <cutils/properties.h>
#include <fmq/MessageQueue.h>
#include <grallocusage/GrallocUsageConversion.h>
#include <gui/BufferItemConsumer.h>
@@ -1100,6 +1101,22 @@
return cameraDeviceNames;
}
+// Test devices with first_api_level >= P does not advertise device@1.0
+TEST_F(CameraHidlTest, noHal1AfterP) {
+ constexpr int32_t HAL1_PHASE_OUT_API_LEVEL = 28;
+ int32_t firstApiLevel = property_get_int32("ro.product.first_api_level", /*default*/-1);
+ ASSERT_GT(firstApiLevel, 0); // first_api_level must exist
+
+ if (firstApiLevel >= HAL1_PHASE_OUT_API_LEVEL) {
+ hidl_vec<hidl_string> cameraDeviceNames = getCameraDeviceNames(mProvider);
+ for (const auto& name : cameraDeviceNames) {
+ int deviceVersion = getCameraDeviceVersion(name, mProviderType);
+ ASSERT_NE(deviceVersion, 0); // Must be a valid device version
+ ASSERT_NE(deviceVersion, CAMERA_DEVICE_API_VERSION_1_0); // Must not be device@1.0
+ }
+ }
+}
+
// Test if ICameraProvider::isTorchModeSupported returns Status::OK
TEST_F(CameraHidlTest, isTorchModeSupported) {
Return<void> ret;
@@ -1119,9 +1136,6 @@
ALOGI("Camera Id[%zu] is %s", i, idList[i].c_str());
}
ASSERT_EQ(Status::OK, status);
- // This is true for internal camera provider.
- // Not necessary hold for external cameras providers
- ASSERT_GT(idList.size(), 0u);
});
ASSERT_TRUE(ret.isOk());
}
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index f0ce938..8646a4c 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -186,35 +186,29 @@
// see if service can handle model
bool fullySupportsModel = false;
- ErrorStatus supportedStatus;
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
-
Return<void> supportedCall = device->getSupportedOperations(
- model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
- supportedStatus = status;
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
- ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- if (fullySupportsModel) {
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- } else {
- EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
- prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
- }
// early termination if vendor service cannot fully prepare model
- if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
@@ -223,6 +217,7 @@
<< std::endl;
return;
}
+ EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
EvaluatePreparedModel(preparedModel, is_ignored, examples);
@@ -235,36 +230,30 @@
// see if service can handle model
bool fullySupportsModel = false;
- ErrorStatus supportedStatus;
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
-
Return<void> supportedCall = device->getSupportedOperations_1_1(
- model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
- supportedStatus = status;
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
ASSERT_NE(0ul, supported.size());
fullySupportsModel =
std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
});
ASSERT_TRUE(supportedCall.isOk());
- ASSERT_EQ(ErrorStatus::NONE, supportedStatus);
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_1(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- if (fullySupportsModel) {
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- } else {
- EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
- prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
- }
// early termination if vendor service cannot fully prepare model
- if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) {
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel.get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
@@ -273,6 +262,7 @@
<< std::endl;
return;
}
+ EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
// If in relaxed mode, set the error range to be 5ULP of FP16.
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
index e838997..59e5b80 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
@@ -52,26 +52,51 @@
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
-inline sp<IPreparedModel> doPrepareModelShortcut(sp<IDevice>& device) {
+static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
+ ASSERT_NE(nullptr, preparedModel);
Model model = createValidTestModel_1_0();
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- if (preparedModelCallback == nullptr) {
- return nullptr;
- }
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
- if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
- return nullptr;
- }
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel =
+ std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) {
- return nullptr;
- }
+ *preparedModel = preparedModelCallback->getPreparedModel();
- return preparedModel;
+ // The getSupportedOperations call returns a list of operations that are
+ // guaranteed not to fail if prepareModel is called, and
+ // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+ // If a driver has any doubt that it can prepare an operation, it must
+ // return false. So here, if a driver isn't sure if it can support an
+ // operation, but reports that it successfully prepared the model, the test
+ // can continue.
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
}
// create device test
@@ -132,18 +157,8 @@
// prepare simple model positive test
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
- Model model = createValidTestModel_1_0();
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- EXPECT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ doPrepareModelShortcut(device, &preparedModel);
}
// prepare simple model negative test 1
@@ -184,8 +199,11 @@
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
const uint32_t OUTPUT = 1;
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createValidTestRequest();
auto postWork = [&] {
@@ -218,8 +236,11 @@
// execute simple graph negative test 1
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createInvalidTestRequest1();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
@@ -235,8 +256,11 @@
// execute simple graph negative test 2
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createInvalidTestRequest2();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
diff --git a/neuralnetworks/1.1/IDevice.hal b/neuralnetworks/1.1/IDevice.hal
index 9d3fc31..ca22555 100644
--- a/neuralnetworks/1.1/IDevice.hal
+++ b/neuralnetworks/1.1/IDevice.hal
@@ -25,6 +25,20 @@
*/
interface IDevice extends @1.0::IDevice {
/**
+ * Gets the capabilities of a driver.
+ *
+ * Note that @1.1::Capabilities provides performance information
+ * on relaxed calculations, whereas @1.0::Capabilities does not.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_1() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
* Gets the supported operations in a model.
*
* getSupportedSubgraph indicates which operations of a model are fully
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index fae5dd0..1d470d6 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -18,6 +18,7 @@
import @1.0::Operand;
import @1.0::OperationType;
+import @1.0::PerformanceInfo;
/**
* Operation types.
@@ -259,6 +260,28 @@
};
/**
+ * The capabilities of a driver.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data.
+ */
+ PerformanceInfo float32Performance;
+
+ /**
+ * Driver performance when operating on asymmetric 8-bit quantized data.
+ */
+ PerformanceInfo quantized8Performance;
+
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16Performance;
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
index 51eff2a..17f6744 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
@@ -29,7 +29,6 @@
#include <hidlmemory/mapping.h>
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_0::Capabilities;
using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
@@ -37,6 +36,7 @@
using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
using ::android::hardware::neuralnetworks::V1_0::OperandType;
using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_1::Capabilities;
using ::android::hardware::neuralnetworks::V1_1::IDevice;
using ::android::hardware::neuralnetworks::V1_1::Model;
using ::android::hardware::neuralnetworks::V1_1::Operation;
@@ -59,27 +59,52 @@
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
-inline sp<IPreparedModel> doPrepareModelShortcut(sp<IDevice>& device) {
+static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
+ ASSERT_NE(nullptr, preparedModel);
Model model = createValidTestModel_1_1();
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel =
+ std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+ // launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- if (preparedModelCallback == nullptr) {
- return nullptr;
- }
+ ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
device->prepareModel_1_1(model, preparedModelCallback);
- if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) {
- return nullptr;
- }
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+ // retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) {
- return nullptr;
- }
+ *preparedModel = preparedModelCallback->getPreparedModel();
- return preparedModel;
+ // The getSupportedOperations call returns a list of operations that are
+ // guaranteed not to fail if prepareModel is called, and
+ // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+ // If a driver has any doubt that it can prepare an operation, it must
+ // return false. So here, if a driver isn't sure if it can support an
+ // operation, but reports that it successfully prepared the model, the test
+ // can continue.
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
}
// create device test
@@ -95,12 +120,14 @@
// initialization
TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
Return<void> ret =
- device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
+ device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
+ EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
+ EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
});
EXPECT_TRUE(ret.isOk());
}
@@ -140,19 +167,8 @@
// prepare simple model positive test
TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
- Model model = createValidTestModel_1_1();
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_1(model, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- EXPECT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ doPrepareModelShortcut(device, &preparedModel);
}
// prepare simple model negative test 1
@@ -195,8 +211,11 @@
std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
const uint32_t OUTPUT = 1;
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createValidTestRequest();
auto postWork = [&] {
@@ -229,8 +248,11 @@
// execute simple graph negative test 1
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createInvalidTestRequest1();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
@@ -246,8 +268,11 @@
// execute simple graph negative test 2
TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
- sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device);
- ASSERT_NE(nullptr, preparedModel.get());
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
Request request = createInvalidTestRequest2();
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
diff --git a/radio/1.2/IRadioIndication.hal b/radio/1.2/IRadioIndication.hal
index 3d93b98..4caddd9 100644
--- a/radio/1.2/IRadioIndication.hal
+++ b/radio/1.2/IRadioIndication.hal
@@ -57,6 +57,8 @@
/**
* Indicates physical channel configurations.
*
+ * An empty configs list indicates that the radio is in idle mode.
+ *
* @param type Type of radio indication
* @param configs Vector of PhysicalChannelConfigs
*/
diff --git a/tv/input/1.0/vts/functional/VtsHalTvInputV1_0TargetTest.cpp b/tv/input/1.0/vts/functional/VtsHalTvInputV1_0TargetTest.cpp
index 0d5110e..573a1d6 100644
--- a/tv/input/1.0/vts/functional/VtsHalTvInputV1_0TargetTest.cpp
+++ b/tv/input/1.0/vts/functional/VtsHalTvInputV1_0TargetTest.cpp
@@ -42,11 +42,27 @@
#define WAIT_FOR_EVENT_TIMEOUT 5
#define DEFAULT_ID INT32_MIN
+// Test environment for TvInput HIDL HAL.
+class TvInputHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+ public:
+ // get the test environment singleton
+ static TvInputHidlEnvironment* Instance() {
+ static TvInputHidlEnvironment* instance = new TvInputHidlEnvironment;
+ return instance;
+ }
+
+ virtual void registerTestServices() override { registerTestService<ITvInput>(); }
+
+ private:
+ TvInputHidlEnvironment() {}
+};
+
/* The main test class for TV Input HIDL HAL. */
class TvInputHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
virtual void SetUp() override {
- tv_input_ = ::testing::VtsHalHidlTargetTestBase::getService<ITvInput>();
+ tv_input_ = ::testing::VtsHalHidlTargetTestBase::getService<ITvInput>(
+ TvInputHidlEnvironment::Instance()->getServiceName<ITvInput>());
ASSERT_NE(tv_input_, nullptr);
tv_input_callback_ = new TvInputCallback(*this);
ASSERT_NE(tv_input_callback_, nullptr);
@@ -187,15 +203,6 @@
};
-/* A class for test environment setup. */
-class TvInputHidlEnvironment : public ::testing::Environment {
- public:
- virtual void SetUp() {}
- virtual void TearDown() {}
-
- private:
-};
-
/*
* GetStreamConfigTest:
* Calls updateStreamConfigurations() for each existing device
@@ -354,8 +361,9 @@
}
int main(int argc, char **argv) {
- ::testing::AddGlobalTestEnvironment(new TvInputHidlEnvironment);
+ ::testing::AddGlobalTestEnvironment(TvInputHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
+ TvInputHidlEnvironment::Instance()->init(&argc, argv);
int status = RUN_ALL_TESTS();
ALOGI("Test result = %d", status);
return status;
diff --git a/wifi/supplicant/1.1/vts/functional/Android.bp b/wifi/supplicant/1.1/vts/functional/Android.bp
index 188dba3..3efe15d 100644
--- a/wifi/supplicant/1.1/vts/functional/Android.bp
+++ b/wifi/supplicant/1.1/vts/functional/Android.bp
@@ -40,6 +40,7 @@
srcs: [
"VtsHalWifiSupplicantV1_1TargetTest.cpp",
"supplicant_hidl_test.cpp",
+ "supplicant_sta_network_hidl_test.cpp",
],
static_libs: [
"VtsHalWifiV1_0TargetTestUtil",
diff --git a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.cpp b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.cpp
index 8cc4a9f..3f17740 100644
--- a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.cpp
+++ b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.cpp
@@ -21,8 +21,13 @@
#include "supplicant_hidl_test_utils_1_1.h"
using ::android::hardware::wifi::supplicant::V1_1::ISupplicant;
+using ::android::hardware::wifi::supplicant::V1_1::ISupplicantStaNetwork;
using ::android::sp;
sp<ISupplicant> getSupplicant_1_1() {
return ISupplicant::castFrom(getSupplicant());
}
+
+sp<ISupplicantStaNetwork> createSupplicantStaNetwork_1_1() {
+ return ISupplicantStaNetwork::castFrom(createSupplicantStaNetwork());
+}
diff --git a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.h b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.h
index c42a35b..e7ce54a 100644
--- a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.h
+++ b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test_utils_1_1.h
@@ -18,8 +18,12 @@
#define SUPPLICANT_HIDL_TEST_UTILS_1_1_H
#include <android/hardware/wifi/supplicant/1.1/ISupplicant.h>
+#include <android/hardware/wifi/supplicant/1.1/ISupplicantStaNetwork.h>
android::sp<android::hardware::wifi::supplicant::V1_1::ISupplicant>
getSupplicant_1_1();
+android::sp<android::hardware::wifi::supplicant::V1_1::ISupplicantStaNetwork>
+ createSupplicantStaNetwork_1_1();
+
#endif /* SUPPLICANT_HIDL_TEST_UTILS_1_1_H */
diff --git a/wifi/supplicant/1.1/vts/functional/supplicant_sta_network_hidl_test.cpp b/wifi/supplicant/1.1/vts/functional/supplicant_sta_network_hidl_test.cpp
new file mode 100644
index 0000000..fa52556
--- /dev/null
+++ b/wifi/supplicant/1.1/vts/functional/supplicant_sta_network_hidl_test.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+#include <android/hardware/wifi/supplicant/1.1/ISupplicantStaNetwork.h>
+
+#include "supplicant_hidl_test_utils.h"
+#include "supplicant_hidl_test_utils_1_1.h"
+
+using ::android::sp;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
+using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
+using ::android::hardware::wifi::supplicant::V1_1::ISupplicantStaNetwork;
+namespace {
+constexpr uint8_t kTestIdentity[] = {0x45, 0x67, 0x98, 0x67, 0x56};
+constexpr uint8_t kTestEncryptedIdentity[] = {0x35, 0x37, 0x58, 0x57, 0x26};
+} // namespace
+
+class SupplicantStaNetworkHidlTest
+ : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ startSupplicantAndWaitForHidlService();
+ EXPECT_TRUE(turnOnExcessiveLogging());
+ sta_network_ = createSupplicantStaNetwork_1_1();
+ ASSERT_NE(sta_network_.get(), nullptr);
+ }
+
+ virtual void TearDown() override { stopSupplicant(); }
+
+ protected:
+ // ISupplicantStaNetwork object used for all tests in this fixture.
+ sp<ISupplicantStaNetwork> sta_network_;
+};
+
+/*
+ * Create:
+ * Ensures that an instance of the ISupplicantStaNetwork proxy object is
+ * successfully created.
+ */
+TEST(SupplicantStaNetworkHidlTestNoFixture, Create) {
+ startSupplicantAndWaitForHidlService();
+ EXPECT_NE(nullptr, createSupplicantStaNetwork_1_1().get());
+ stopSupplicant();
+}
+
+/*
+ * Ensure that the encrypted imsi identity is set successfully.
+ */
+TEST_F(SupplicantStaNetworkHidlTest, setEapEncryptedImsiIdentity) {
+ std::vector<uint8_t> encrypted_identity(
+ kTestEncryptedIdentity,
+ kTestEncryptedIdentity + sizeof(kTestEncryptedIdentity));
+ sta_network_->setEapEncryptedImsiIdentity(
+ encrypted_identity, [](const SupplicantStatus &status) {
+ EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ });
+}
+
+/*
+ * Ensure that the identity and the encrypted imsi identity are sent
+ * successfully.
+ */
+TEST_F(SupplicantStaNetworkHidlTest, SendNetworkEapIdentityResponse_1_1) {
+ sta_network_->sendNetworkEapIdentityResponse_1_1(
+ std::vector<uint8_t>(kTestIdentity,
+ kTestIdentity + sizeof(kTestIdentity)),
+ std::vector<uint8_t>(kTestEncryptedIdentity,
+ kTestIdentity + sizeof(kTestEncryptedIdentity)),
+ [](const SupplicantStatus &status) {
+ EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ });
+}